id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/gui/stdio.py | from decimal import Decimal
import getpass
import datetime
import logging
from electrum import WalletStorage, Wallet
from electrum.util import format_satoshis
from electrum.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum.transaction import TxOutput
from electrum.network import TxBroadcastError, BestEffortRequestFailed
from electrum.logging import console_stderr_handler
_ = lambda x:x # i18n
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print("Wallet not found. try 'electrum-chi create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.done = 0
self.last_balance = ""
console_stderr_handler.setLevel(logging.CRITICAL)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
self.network.register_callback(self.on_network, ['wallet_updated', 'network_updated', 'banner'])
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event in ['wallet_updated', 'network_updated']:
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
messages = []
for tx_hash, tx_mined_status, delta, balance in reversed(self.wallet.get_history()):
if tx_mined_status.conf:
timestamp = tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(delta, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = input("Pay to: ")
self.str_description = input("Description : ")
self.str_amount = input("Amount: ")
self.str_fee = input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, lst, firstline):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = lst[i] if i < len(lst) else ""
print(msg)
def main(self):
while self.done == 0: self.main_command()
def do_send(self):
if not is_address(self.str_recipient):
print(_('Invalid address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
print(repr(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
print(_("Please wait..."))
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
print(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
print(msg)
else:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
def network_dialog(self):
print("use 'electrum-chi setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum-chi setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass | PypiClean |
/IdracRedfishSupportTest-0.0.7.tar.gz/IdracRedfishSupportTest-0.0.7/GetSystemHWInventoryREDFISH.py |
import argparse
import getpass
import json
import logging
import os
import re
import requests
import subprocess
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description="Python script using Redfish API to get system hardware inventory(output will be printed to the screen and also copied to a text file). This includes information for storage controllers, memory, network devices, general system details, power supplies, hard drives, fans, backplanes, processors")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', action="store_true", help='Prints script examples')
parser.add_argument('--system', help='Get system information', action="store_true", required=False)
parser.add_argument('--memory', help='Get memory information', action="store_true", required=False)
parser.add_argument('--processor', help='Get processor information', action="store_true", required=False)
parser.add_argument('--fan', help='Get fan information', action="store_true", required=False)
parser.add_argument('--powersupply', help='Get power supply information', action="store_true", required=False)
parser.add_argument('--storage', help='Get storage information', action="store_true", required=False)
parser.add_argument('--network', help='Get network device information', action="store_true", required=False)
parser.add_argument('--all', help='Get all system/device information', action="store_true", required=False)
args = vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- GetSystemHWInventoryREDFISH.py -ip 192.168.0.120 -u root -p calvin --memory, this example will get only memory information.
\n- GetSystemHWInventoryREDFISH.py -ip 192.168.0.120 -u root -p calvin --processor --memory, this example will get only processor and memory information.
\n- GetSystemHWInventoryREDFISH.py -ip 192.168.0.120 -u root -p calvin --all, this example will get all system information: general system information, processor, memory, fans, power supplies, hard drives, storage controllers, network devices""")
sys.exit(0)
def check_supported_idrac_version():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code == 401:
logging.warning("\n- WARNING, status code %s returned. Incorrect iDRAC username/password or invalid privilege detected." % response.status_code)
sys.exit(0)
elif response.status_code != 200:
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def get_system_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
print("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
message = "\n---- System Information ----\n"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data.items():
if i[0] == "Oem":
for ii in i[1]['Dell']['DellSystem'].items():
if ii[0] != '@odata.context' or ii[0] != '@odata.type':
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
elif i[0] == "Model" or i[0] == "AssetTag" or i[0] == "BiosVersion" or i[0] == "HostName" or i[0] == "Manufacturer" or i[0] == "System" or i[0] == "SKU" or i[0] == "SerialNumber" or i[0] == "Status":
message = "%s: %s" % (i[0], i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_memory_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Memory' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Memory' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
message = "\n---- Memory Information ----"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data['Members']:
dimm = i['@odata.id'].split("/")[-1]
try:
dimm_slot = re.search("DIMM.+",dimm).group()
except:
logging.error("\n- FAIL, unable to get dimm slot info")
sys.exit(0)
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, i['@odata.id']), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, i['@odata.id']), verify=verify_cert, auth=(idrac_username, idrac_password))
sub_data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % sub_data)
sys.exit(0)
else:
message = "\n- Memory details for %s -\n" % dimm_slot
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for ii in sub_data.items():
if ii[0] == 'Oem':
for iii in ii[1]['Dell']['DellMemory'].items():
if iii[0] != '@odata.context' or iii[0] != '@odata.type':
message = "%s: %s" % (iii[0], iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_cpu_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Processors' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Processors' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
message = "\n---- Processor Information ----"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data['Members']:
cpu = i['@odata.id'].split("/")[-1]
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, i['@odata.id']), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, i['@odata.id']), verify=verify_cert, auth=(idrac_username, idrac_password))
sub_data = response.json()
if response.status_code != 200:
print("\n- FAIL, get command failed, error: %s" % sub_data)
sys.exit(0)
else:
message = "\n- Processor details for %s -\n" % cpu
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for ii in sub_data.items():
if ii[0] == 'Oem':
for iii in ii[1]['Dell']['DellProcessor'].items():
if iii[0] != '@odata.context' or iii[0] != '@odata.type':
message = "%s: %s" % (iii[0], iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_fan_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
print("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
message = "\n---- Fan Information ----\n"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
fan_list = []
if data['Links']['CooledBy'] == []:
logging.warning("\n- WARNING, no fans detected for system")
else:
for i in data['Links']['CooledBy']:
for ii in i.items():
fan_list.append(ii[1])
for i in fan_list:
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, i), verify=verify_cert, auth=(idrac_username, idrac_password))
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
data_get = response.json()
if "Fans" not in data_get.keys():
for ii in data_get.items():
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
print(message)
message = "\n"
open_file.writelines(message)
message = "\n"
open_file.writelines(message)
print(message)
else:
count = 0
while True:
if count == len(fan_list):
return
for i in data_get["Fans"]:
message = "\n- Details for %s -\n" % i["FanName"]
count += 1
open_file.writelines(message)
print(message)
message = "\n"
open_file.writelines(message)
for ii in i.items():
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
print(message)
message = "\n"
open_file.writelines(message)
def get_ps_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
message = "\n---- Power Supply Information ----\n"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
if data['Links']['PoweredBy'] == []:
logging.error("- WARNING, no power supplies detected for system")
else:
for i in data['Links']['PoweredBy']:
for ii in i.items():
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, ii[1]), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, ii[1]), verify=verify_cert, auth=(idrac_username, idrac_password))
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
else:
data_get = response.json()
if "PowerSupplies" not in data_get.keys():
message = "\n- Details for %s -\n" % data_get["Name"]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data_get.items():
if i[0] == "Oem":
try:
for ii in i[1]["Dell"]["DellPowerSupply"].items():
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
except:
logging.error("- FAIL, unable to find Dell PowerSupply OEM information")
sys.exit(0)
else:
message = "%s: %s" % (i[0],i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
if len(data['Links']['PoweredBy']) == 1:
message = "\n- Details for %s -\n" % data_get["PowerSupplies"][0]["Name"]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data_get.items():
if i[0] == "PowerSupplies":
for ii in i[1]:
for iii in ii.items():
if iii[0] == "Oem":
try:
for iiii in iii[1]["Dell"]["DellPowerSupply"].items():
message = "%s: %s" % (iiii[0],iiii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
except:
logging.error("- FAIL, unable to find Dell PowerSupply OEM information")
sys.exit(0)
else:
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
elif i[0] == "PowerControl" and i[0] != "Voltages":
for ii in i[1]:
for iii in ii.items():
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (i[0],i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
print("\n")
open_file.writelines("\n")
else:
for i in data_get.items():
if i[0] == "PowerSupplies":
psu_ids = i[1]
count = 0
while True:
if len(psu_ids) == count:
return
else:
for i in psu_ids:
message = "\n- Details for %s -\n" % i["Name"]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for ii in i.items():
if ii[0] == "Oem":
try:
for iii in ii[1]["Dell"]["DellPowerSupply"].items():
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
except:
logging.error("- FAIL, unable to find Dell PowerSupply OEM information")
sys.exit(0)
else:
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
print("\n")
count += 1
def get_storage_controller_information():
global controller_list
message = "\n---- Controller Information ----"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
controller_list = []
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error: %s" % data)
sys.exit(0)
data = response.json()
for i in data["Members"]:
for ii in i.items():
controller_list.append(ii[1])
for i in controller_list:
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, i), verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
message = "\n - Detailed controller information for %s -\n" % i.split("/")[-1]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data.items():
if i[0] == 'StorageControllers':
for ii in i[1]:
for iii in ii.items():
if iii[0] == 'Status':
for iiii in iii[1].items():
message = "%s: %s" % (iiii[0],iiii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
elif i[0] == 'Oem':
try:
for ii in i[1]['Dell']['DellController'].items():
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
except:
for ii in i[1]['Dell'].items():
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (i[0], i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_storage_disks_information():
message = "\n---- Disk Information ----"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in controller_list:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, i.split("/")[-1]), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, i.split("/")[-1]), verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("- FAIL, GET command failed, detailed error information: %s" % data)
sys.exit(0)
if data['Drives'] == []:
message = "\n- WARNING, no drives detected for %s" % i.split("/")[-1]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
for i in data['Drives']:
for ii in i.items():
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, ii[1]), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, ii[1]), verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
message = "\n - Detailed drive information for %s -\n" % ii[1].split("/")[-1]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for ii in data.items():
if ii[0] == 'Oem':
for iii in ii[1]['Dell']['DellPhysicalDisk'].items():
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
elif ii[0] == 'Status':
for iii in ii[1].items():
message = "%s: %s" % (iii[0],iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_backplane_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Chassis' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Chassis' % idrac_ip, verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error is: %s" % data)
sys.exit(0)
message = "\n---- Backplane Information ----"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
backplane_URI_list = []
for i in data['Members']:
backplane = i['@odata.id']
if "Enclosure" in backplane:
backplane_URI_list.append(backplane)
if backplane_URI_list == []:
message = "- WARNING, no backplane information detected for system\n"
open_file.writelines(message)
open_file.writelines("\n")
print(message)
sys.exit()
for i in backplane_URI_list:
response = requests.get('https://%s%s' % (idrac_ip, i),verify=False,auth=(idrac_username, idrac_password))
data = response.json()
message = "\n- Detailed backplane information for %s -\n" % i.split("/")[-1]
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for iii in data.items():
if iii[0] == "Oem":
for iiii in iii[1]['Dell']['DellEnclosure'].items():
message = "%s: %s" % (iiii[0],iiii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (iii[0], iii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
def get_network_information():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/NetworkAdapters' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/NetworkAdapters' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
network_device_list = []
for i in data['Members']:
for ii in i.items():
network_device = ii[1].split("/")[-1]
network_device_list.append(network_device)
for i in network_device_list:
port_list = []
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/NetworkAdapters/%s/NetworkDeviceFunctions' % (idrac_ip, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/NetworkAdapters/%s/NetworkDeviceFunctions' % (idrac_ip, i), verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
for i in data['Members']:
for ii in i.items():
port_list.append(ii[1].split("/")[-1])
for i in network_device_list:
device_id = re.search("\w+.\w+.\w", i).group()
if args["x"]:
response = requests.get('https://%s/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/%s' % (idrac_ip, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/%s' % (idrac_ip, i), verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error is: %s" % data)
sys.exit(0)
message = "\n---- Network Device Information for %s ----\n" % i
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data.items():
if i[0] == "Controllers":
for ii in i[1][0]["ControllerCapabilities"].items():
message = "%s: %s" % (ii[0], ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (i[0], i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in port_list:
device_id = re.search("\w+.\w+.\w", i).group()
# redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Embedded.1/NetworkDeviceFunctions/NIC.Embedded.1-1-1
if args["x"]:
response = requests.get('https://%s/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/%s/NetworkDeviceFunctions/%s' % (idrac_ip, device_id, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/%s/NetworkDeviceFunctions/%s' % (idrac_ip, device_id, i), verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("\n- FAIL, get command failed, error is: %s" % data)
sys.exit(0)
message = "\n---- Network Port Information for %s ----\n" % i
open_file.writelines(message)
open_file.writelines("\n")
print(message)
for i in data.items():
if i[0] == "Oem":
for ii in i[1]['Dell']['DellNIC'].items():
message = "%s: %s" % (ii[0],ii[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
else:
message = "%s: %s" % (i[0], i[1])
open_file.writelines(message)
open_file.writelines("\n")
print(message)
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] or args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip = args["ip"]
idrac_username = args["u"]
if args["p"]:
idrac_password = args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
try:
os.remove("hw_inventory.txt")
except:
logging.debug("- INFO, file %s not detected, skipping step to delete file" % "hw_inventory.txt")
open_file = open("hw_inventory.txt","a")
date_timestamp = datetime.now()
current_date_time="- Data collection timestamp: %s-%s-%s %s:%s:%s\n" % (date_timestamp.month, date_timestamp.day, date_timestamp.year, date_timestamp.hour, date_timestamp.minute, date_timestamp.second)
open_file.writelines(current_date_time)
if args["system"]:
get_system_information()
if args["memory"]:
get_memory_information()
if args["processor"]:
get_cpu_information()
if args["fan"]:
get_fan_information()
if args["powersupply"]:
get_ps_information()
if args["storage"]:
get_storage_controller_information()
get_storage_disks_information()
get_backplane_information()
if args["network"]:
get_network_information()
if args["all"]:
get_system_information()
get_memory_information()
get_cpu_information()
get_fan_information()
get_ps_information()
get_storage_controller_information()
get_storage_disks_information()
get_backplane_information()
get_network_information()
open_file.close() | PypiClean |
/Boodler-2.0.3.tar.gz/Boodler-2.0.3/src/boopak/pinfo.py | import sys
import os.path
import types
import re
import sets
import codecs
import cStringIO
from boopak import version
class PackageInfo:
"""PackageInfo: represents a single package.
A PackageInfo is not the imported module object which a Boodler module
sees from the inside. It's a wrapper around that.
PackageInfo(loader, name, vers, dir, metadata, resources,
external) -- constructor
Publicly readable fields:
name -- the package name
version -- the package version number
key -- a tuple containing (name, version)
encoded_name -- the name of the package's module
metadata -- the Metadata for the package
resources -- the Resources for the package
loader -- the PackageLoader which loaded the package
Public methods:
load_dependencies() -- attempt to load everything this package depends on
get_content() -- return the module which is the content of this package
get_file() -- get a File object contained within this package
open_file() -- open a File object contained within this package
Internal methods:
validate_metadata() -- check the metadata, and load information from it
"""
def __init__(self, loader, name, vers, dir, metadata, resources, external):
self.loader = loader
self.name = name
self.version = vers
self.key = (name, vers)
self.encoded_name = encode_package_name(name, vers)
self.dir = dir
self.content = None
self.content_info = {}
self.import_in_progress = False
self.metadata = metadata
self.resources = resources
self.resource_tree = None
self.external = external
self.dependencies = sets.Set()
self.imported_pkg_specs = {}
def __repr__(self):
return '<PackageInfo \'' + self.name + ' ' + str(self.version) + '\'>'
def validate_metadata(self):
"""validate_metadata() -> None
Make sure that the metadata object attached to this package
correctly describes the package. Also loads up various fields
with information from the metadata object.
Also checks that the resource tree has a valid shape.
If anything is discovered to be wrong, this raises PackageLoadError.
This is called by the package loader (and nothing else should
call it).
"""
pkgname = self.name
metadata = self.metadata
val = metadata.get_one('boodler.package')
if (not val):
raise PackageLoadError(pkgname,
'no boodler.package metadata entry')
if (val != pkgname):
raise PackageLoadError(pkgname,
'boodler.package does not match package location: ' + val)
val = metadata.get_one('boodler.version')
if (not val):
val = '(missing, 1.0 assumed)'
vers = version.VersionNumber()
else:
vers = version.VersionNumber(val)
if (vers != self.version):
raise PackageLoadError(pkgname,
'boodler.version does not match package version: ' + val)
val = metadata.get_one('boodler.main')
if (not val):
pass
elif (val == '.'):
pass
elif (ident_name_regexp.match(val)):
pass
else:
raise PackageLoadError(pkgname,
'boodler.main is not a module or . :' + val)
val = metadata.get_one('boodler.api_required')
if (val):
spec = version.VersionSpec(val)
if (self.loader.boodler_api_vers):
if (not spec.match(self.loader.boodler_api_vers)):
raise PackageLoadError(pkgname,
'boodler.api_required does not match Boodler version: '
+ val)
for val in metadata.get_all('boodler.requires'):
try:
pos = val.find(' ')
if (pos < 0):
deppkg = val
depspec = None
else:
deppkg = val[:pos].strip()
depspec = val[pos+1:].strip()
depspec = version.VersionSpec(depspec)
parse_package_name(deppkg)
deppkg = str(deppkg)
self.dependencies.add( (deppkg, depspec) )
except ValueError, ex:
raise PackageLoadError(pkgname,
'boodler.requires line invalid: ' + val)
for val in metadata.get_all('boodler.requires_exact'):
try:
pos = val.find(' ')
if (pos < 0):
raise ValueError('version number required')
else:
deppkg = val[:pos].strip()
depspec = val[pos+1:].strip()
depspec = version.VersionNumber(depspec)
parse_package_name(deppkg)
deppkg = str(deppkg)
self.dependencies.add( (deppkg, depspec) )
except ValueError, ex:
raise PackageLoadError(pkgname,
'boodler.requires_exact line invalid: ' + val)
try:
self.resource_tree = self.resources.build_tree()
except ValueError, ex:
raise PackageLoadError(pkgname,
'unable to map resources: ' + str(ex))
def load_dependencies(self):
"""load_dependencies() -> (set, dict, int)
Attempt to load all the packages which this package depends on.
This returns a triple (good, bad, count):
- good is a set containing (packagename, version) pairs for every
package that was loaded successfully. (This will include the original
package.)
- bad is a dict. The keys are packagenames which did not load
successfully. Each maps to a (nonempty) list of version requests
for that package, which could not be fulfilled. (The list contains
None, VersionSpecs, and VersionNumbers. Values may occur more than
once.)
- count is an int, representing how many actual errors occurred.
This describes package format problems and read errors. It does not
include packages that were simply not available. (The bad dict
includes both errors and not-availables; so len(bad) >= count.)
If bad is empty, then all dependencies are available.
Note that the good list may include more than one version of a
package.
"""
return self.loader.load_package_dependencies(self)
def get_content(self):
"""get_content() -> module
Return the module which is the content of this package.
Warning: this method imports Python source code from the package
directory, which means it *executes* Python source code from the
package directory. Do not call this on untrusted packages.
A sound-player will have to call this, but a package manager
should not. (The package creation tool does, though.)
"""
if (not (self.content is None)):
return self.content
if (self.import_in_progress):
# Annoying intermediate case; the module has been added to
# sys.modules, but not yet to pkg.content.
return sys.modules.get(self.encoded_name)
self.loader.import_package_content(self)
return self.content
def get_file(self, filename):
"""get_file(filename) -> File
Get a File object representing a file contained in this package.
The filename must be in universal format: relative to the package
root, and written with forward slashes, not backslashes.
(If the filename is invalid or unsafe, ValueError is raised.
However, this does not check whether the file exists.)
"""
pathname = build_safe_pathname(self.dir, filename)
return File(self, pathname, filename)
def open_file(self, filename, binary=False):
"""open_file(filename, binary=False) -> file
Open a file contained within this package. The filename must be
in universal format: relative to the package root, and written
with forward slashes, not backslashes.
This is equivalent to get_file(filename).open(binary).
(If the filename is invalid or unsafe, ValueError is raised.
If the file does not exist, IOError is raised.)
"""
tmpfile = self.get_file(filename)
return tmpfile.open(binary)
class PackageGroup:
"""PackageGroup: represents all the versions of a particular package
that are currently available.
PackageGroup(loader, pkgname) -- constructor
Create a PackageGroup attached to the given PackageLoader, with the
given package name.
Publicly readable fields:
name -- the package name
loader -- the PackageLoader which loaded the package
Public methods:
get_num_versions() -- return the number of versions available
get_versions() -- return the versions available for this package
has_version() -- return whether the package has the given version number
find_version_match() -- find the most recent version matching the spec
Internal methods:
discover_versions() -- determine what versions are available
"""
def __init__(self, loader, pkgname, dirname):
self.loader = loader
self.name = pkgname
self.dir = dirname # May be None
self.versions = []
def __repr__(self):
return '<PackageGroup \'' + self.name + '\'>'
def discover_versions(self, fl, external_versions=None):
"""discover_versions(file, external_versions=None) -> None
Determine what versions are available. We look both at the contents
of an open Versions file, and at a list of versions found in external
packages. Either of these may be None.
This is an internal method; it is called only by load_group, when
the PackageGroup is created.
"""
res = {}
if (fl):
while (True):
ln = fl.readline()
if (not ln):
break
ln = deunicode(ln)
ln = ln.strip()
if (not ln):
continue
if (ln.startswith('#')):
continue
vers = version.VersionNumber(ln)
res[vers] = False
if (external_versions):
for vers in external_versions:
res[vers] = True
self.versions = list(res.keys())
self.versions.sort()
self.versions.reverse()
def get_num_versions(self):
"""get_num_versions() -> int
Return the number of versions available for this package.
"""
return len(self.versions)
def get_versions(self):
"""get_versions() -> list of VersionNumbers
Return the versions available for this package.
"""
return list(self.versions)
def has_version(self, vers):
"""has_version(vers) -> bool
Return whether the package has the given version number available.
The argument must be a VersionNumber.
"""
return (vers in self.versions)
def find_version_match(self, spec=None):
"""find_version_match(spec=None) -> VersionNumber
Find the most recent version matching the given VersionSpec.
If no spec is given, just return the most recent version. If
there are no versions that fit the requirement, returns None.
"""
for vers in self.versions:
if (spec is None or spec.match(vers)):
return vers
return None
class Metadata:
"""Metadata: represents the contents of a Metadata file.
Metadata(pkgname, file=None) -- constructor
Create a Metadata object by reading the given file. (This should
be a readable file object; typically, the result of opening a
Metadata file.) It is the caller's responsibility to close the
file afterwards. If no file is provided, the Metadata will be
empty.
(The file should be opened with mode 'rbU'. This is relaxed about
newlines, but careful about high-bit characters, so that UTF-8
decoding will work. Note that the file should offer bytes, not
Unicode characters.)
The first argument is a package name, but this is only used for
error messages. If the package name is not known when you read the
metadata, pass in something usable as a label.
Public methods:
get_all() -- get all metadata entries with the given key
get_one() -- get the metadata entry with the given key
keys() -- get the keys contained in this Metadata object
clone() -- create a Metadata object identical to this one
dump() -- write the contents of this Metadata object to a file
add() -- add a metadata entry with the given key and value
delete_all() -- delete all metadata entries with the given key
"""
def __init__(self, pkgname, fl=None):
self.pkgname = pkgname
# Map of unicode -> list of unicode.
self.map = {}
if (fl is None):
return
while True:
ln = fl.readline()
if (not ln):
break
ln = deunicode(ln)
ln = ln.strip()
# Ignore blank lines and comments.
if (not ln):
continue
if (ln.startswith('#')):
continue
pos = ln.find(':')
if (pos < 0):
raise PackageLoadError(pkgname,
'metadata file contains invalid line: ' + ln)
key = ln[:pos].strip()
val = ln[pos+1:].strip()
if (' ' in key):
raise PackageLoadError(pkgname,
'metadata file contains invalid line: ' + ln)
dict_accumulate(self.map, key, val)
def __repr__(self):
return '<Metadata \'' + self.pkgname + '\'>'
def __len__(self):
return len(self.map)
def get_all(self, key):
"""get_all(key) -> list of unicode
Returns all metadata entries with the given key. If there are none,
this returns an empty list.
This maintains the order of entries loaded (or added).
"""
return self.map.get(key, [])
def get_one(self, key, default=None):
"""get_one(key, default=None) -> unicode
Returns the metadata entry with the given key. If there are none,
this returns None (or the default argument, if supplied). If there
is more than one such entry, this returns the first.
"""
res = self.get_all(key)
if (not res):
return default
return res[0]
def keys(self):
"""keys() -> list of unicode
Get the keys contained in this Metadata object.
"""
return list(self.map.keys())
def clone(self):
"""clone() -> Metadata
Create a Metadata object identical to this one. (This is a deep
copy.)
"""
res = Metadata('<clone>')
for key in self.map.keys():
res.map[key] = list(self.map[key])
return res
def dump(self, fl, comment=None):
"""dump(file, comment=None) -> None
Write the contents of this Metadata object to a file. The file
must be able to accept unicode writes. (Preferably by encoding
them via UTF-8.) (Note: this doesn't match the constructor,
which does its own UTF-8 decoding.)
If comment is a string, or a list of strings, they will appear
at the top of the file. You need not include the '#' character in
the comment argument.
"""
if (type(comment) in [str, unicode]):
comment = [comment]
if (comment):
for val in comment:
fl.write('# ')
fl.write(val)
fl.write('\n')
fl.write('\n')
ls = self.keys()
ls.sort()
for key in ls:
for val in self.map[key]:
fl.write(key)
fl.write(': ')
fl.write(val)
fl.write('\n')
def add(self, key, val):
"""add(key, val) -> None
Add a metadata entry with the given key and value.
(This should only be called by a package management tool. A
package should not modify its own metadata.)
"""
dict_accumulate(self.map, key, val)
def delete_all(self, key):
"""delete_all(key) -> None
Delete all metadata entries with the given key. If there are none,
this does nothing.
(This should only be called by a package management tool. A
package should not modify its own metadata.)
"""
if (self.map.has_key(key)):
self.map.pop(key)
class Resources:
"""Resources: represents the contents of a Resources file.
Resources(pkgname, file=None) -- constructor
Create a Resources object by reading the given file. (This should
be a readable file object; typically, the result of opening a
Resources file.) It is the caller's responsibility to close the
file afterwards. If no file is provided, the Resources object will
be empty.
(The file should be opened with mode 'rbU'. This is relaxed about
newlines, but careful about high-bit characters, so that UTF-8
decoding will work. Note that the file should offer bytes, not
Unicode characters.)
The first argument is a package name, but this is only used for
error messages. If the package name is not known when you read the
resources, pass in something usable as a label.
This does not take any pains to verify that the resources exist. That
is the responsibility of whoever created the package.
Public methods:
get() -- get the Resource object with the given key
keys() -- get the keys contained in this Resources object
resources() -- get the Resources contained in this Resources object
build_tree() -- construct a nested dict representing the resources
dump() -- write the contents of this Resources object to a file
create() -- create a Resource object with the given key
"""
def __init__(self, pkgname, fl=None):
self.pkgname = pkgname
# Map of str -> Resource.
self.map = {}
# List of keys, in order added
self.keylist = []
if (fl is None):
return
curdata = None
while True:
ln = fl.readline()
if (not ln):
break
ln = deunicode(ln)
ln = ln.strip()
# Ignore blank lines and comments.
if (not ln):
continue
if (ln.startswith('#')):
continue
if (ln.startswith(':')):
# Beginning of a new section
key = ln[1:].strip()
try:
parse_resource_name(key)
key = str(key)
except ValueError:
raise PackageLoadError(pkgname,
'invalid resource: ' + key)
if (self.map.has_key(key)):
raise PackageLoadError(pkgname,
'duplicate resource: ' + key)
curdata = Resource(key)
self.map[key] = curdata
self.keylist.append(key)
continue
if (not curdata):
raise PackageLoadError(pkgname,
'resource file needs initial ":resource" line')
pos = ln.find(':')
if (pos < 0):
raise PackageLoadError(pkgname,
'resource file contains invalid line: ' + ln)
key = ln[:pos].strip()
val = ln[pos+1:].strip()
if (' ' in key):
raise PackageLoadError(pkgname,
'resource file contains invalid line: ' + ln)
dict_accumulate(curdata.map, key, val)
def __repr__(self):
return '<Resources \'' + self.pkgname + '\'>'
def __len__(self):
return len(self.map)
def get(self, key):
"""get(key) -> Resource
Get the Resource object with the given key. If not found, returns
None.
"""
return self.map.get(key)
def keys(self):
"""keys() -> list of str
Get the keys contained in this Resources object.
This maintains the order of resources loaded (or created).
"""
return list(self.keylist)
def resources(self):
"""resources() -> list of Resource
Get the Resources contained in this Resources object.
"""
return self.map.values()
def build_tree(self):
"""build_tree() -> dict
Construct a dict containing the namespaced groups and resources
in this Resources object. Individual resources are represented by
keys; groups are represented by dicts containing more groups
and resources.
Example: if the resource keys are
'one', 'two', 'grp.three', 'grp.four'
then build_tree() will return {
'one': 'one',
'two': 'two',
'grp': { 'three': 'grp.three', 'four': 'grp.four' }
}
A single entry cannot be both a group and a resource. (That is,
'one' and 'one.two' cannot both be resource keys.) If this
rule is violated, build_tree() will raise ValueError. Duplicate
keys also raise ValueError.
"""
res = {}
for key in self.keys():
ls = parse_resource_name(key)
resel = ls.pop()
grp = res
for el in ls:
subgrp = grp.get(el)
if (subgrp is None):
subgrp = {}
grp[el] = subgrp
if (type(subgrp) != types.DictType):
raise ValueError('resource cannot be an attr of another resource: ' + key)
grp = subgrp
if (grp.has_key(resel)):
raise ValueError('resource cannot contain an attr of another resource: ' + key)
grp[resel] = key
return res
def dump(self, fl, comment=None):
"""dump(file, comment=None) -> None
Write the contents of this Resources object to a file. The file
must be able to accept unicode writes. (Preferably by encoding
them via UTF-8.) (Note: this doesn't match the constructor,
which does its own UTF-8 decoding.)
If comment is a string, or a list of strings, they will appear
at the top of the file. You need not include the '#' character in
the comment argument.
"""
if (type(comment) in [str, unicode]):
comment = [comment]
if (comment):
for val in comment:
fl.write('# ')
fl.write(val)
fl.write('\n')
fl.write('\n')
ls = self.keys()
ls.sort()
for key in ls:
fl.write(':')
fl.write(key)
fl.write('\n')
res = self.map[key]
res.dump(fl)
fl.write('\n')
def create(self, key):
"""create(key) -> Resource
Create a Resource object with the given key. If the key is not
a valid resource key, or if it already exists, this raises ValueError.
(This should only be called by a package management tool. A
package should not modify its own metadata.)
"""
try:
parse_resource_name(key)
key = str(key)
except ValueError:
raise ValueError(self.pkgname + ': invalid resource name: ' + key)
if (self.map.has_key(key)):
raise ValueError(self.pkgname + ': resource already exists: ' + key)
res = Resource(key)
self.map[key] = res
self.keylist.append(key)
return res
class Resource:
"""Resource: represents one section in a Resources file.
Resource(key) -- constructor
Create a Resource with the given key. (The resource key is a
Python-style qualified identifier: "foo" or "Mod.Foo".)
Public methods:
get_all() -- get all metadata entries with the given key
get_one() -- get the metadata entry with the given key
keys() -- get the keys contained in this Resource object
dump() -- write the contents of this Resource object to a file
add() -- add a metadata entry with the given key and value
delete_all() -- delete all metadata entries with the given key
"""
def __init__(self, key):
# Map of unicode -> list of unicode.
self.key = key
self.map = {}
def __repr__(self):
return '<Resource \'' + self.key + '\'>'
def get_all(self, key):
"""get_all(key) -> list of unicode
Returns all metadata entries with the given key. If there are none,
this returns an empty list.
"""
return self.map.get(key, [])
def get_one(self, key, default=None):
"""get_one(key, default=None) -> unicode
Returns the metadata entry with the given key. If there are none,
this returns None (or the default argument, if supplied). If there
is more than one such entry, this returns the first.
"""
res = self.get_all(key)
if (not res):
return default
return res[0]
def keys(self):
"""keys() -> list of unicode
Get the keys contained in this Resource object.
"""
return list(self.map.keys())
def dump(self, fl):
"""dump(file) -> None
Write the contents of this Resource object to a file.
"""
ls = self.keys()
ls.sort()
for key in ls:
for val in self.map[key]:
fl.write(key)
fl.write(': ')
fl.write(val)
fl.write('\n')
def add(self, key, val):
"""add(key, val) -> None
Add a metadata entry with the given key and value.
(This should only be called by a package management tool. A
package should not modify its own metadata.)
"""
dict_accumulate(self.map, key, val)
def delete_all(self, key):
"""delete_all(key) -> None
Delete all metadata entries with the given key. If there are none,
this does nothing.
(This should only be called by a package management tool. A
package should not modify its own metadata.)
"""
if (self.map.has_key(key)):
self.map.pop(key)
class File:
"""File: represents a file in a package.
File(pkg, pathname, univname=None) -- constructor
Creates a file in the given package. (The package may be None if you
are creating a File object ad-hoc. Unless it's for a "mix-in" sound
file -- those need to know where they live.)
The pathname should be a valid, non-relative pathname in native
form. You can also supply the universal, relative-to-the-package
pathname as univname; this is used only when printing the filename
for human eyes.
The file need not exist. But since this class only handles reading
files, a File that refers to a nonexistent path can only generate
IOError when opened.
Public method:
open() -- open the file for reading
"""
def __init__(self, pkg, pathname, univname=None):
self.package = pkg
self.pathname = pathname
self.univname = univname
# If the file was pulled from Resource metadata, the metadata
# field will be set (by the caller). See attrify_filename().
self.metadata = None
def __repr__(self):
if (self.univname):
return '<File \'./' + self.univname + '\'>'
else:
return '<File \'' + self.pathname + '\'>'
def open(self, binary=False):
"""open(binary=False) -> file
Open the file for reading. Returns a Python file object.
If binary is False, the file is opened with newline translation
('rU'); otherwise, in binary mode ('rb').
"""
if (binary):
mode = 'rb'
else:
mode = 'rU'
return open(self.pathname, mode)
class MemFile(File):
"""MemFile: represents a file which exists only in memory.
This is a subclass of File.
MemFile(dat, suffix, label) -- constructor
Creates a file whose contents are the (byte) string dat. The label
is used for displaying the object. The suffix is available to any
user of the file who wants to know what type it is by filename.
(Bleah, Unix.) The suffix should begin with a dot (".aiff", etc.)
Publicly readable fields:
suffix -- the suffix passed in
Public method:
open() -- open the file for reading
"""
def __init__(self, dat, suffix, label):
File.__init__(self, None, '<'+label+'>')
self.data = dat
self.suffix = suffix
self.label = label
def __repr__(self):
return '<MemFile <' + self.label + '>>'
def open(self, binary=False):
return cStringIO.StringIO(self.data)
# Regular expression for valid Python identifiers: letters, digits, and
# underscores. (But not starting with a digit.)
ident_name_regexp = re.compile('\\A[a-zA-Z_][a-zA-Z_0-9]*\\Z')
# Regular expression for valid package names: one or more elements,
# separated by periods. Each element must contain only lower-case letters,
# digits, and underscores. An element may not start with a digit.
package_name_regexp = re.compile('\\A[a-z_][a-z_0-9]*(\.([a-z_][a-z_0-9]*))*\\Z')
# Regular expression for valid resource names: one or more elements,
# separated by periods. Each element must contain only letters, digits,
# and underscores. An element may not start with a digit.
resource_name_regexp = re.compile('\\A[a-zA-Z_][a-zA-Z_0-9]*(\.([a-zA-Z_][a-zA-Z_0-9]*))*\\Z')
# Regexp which matches one capital letter (as a group)
capital_letter_regexp = re.compile('([A-Z])')
# Regexp which matches a caret followed by one letter (as a group)
caret_letter_regexp = re.compile('\\^([A-Za-z])')
def parse_package_version_spec(val):
"""parse_package_version_spec(val) -> (pkgname, VersionNumber)
or (pkgname, VersionSpec) or (pkgname, None)
Parse a package identifier together with its version spec
(e.g., "org.boodler.sample:1.0") or exact version spec
(e.g., "org.boodler.sample::1.0"). If neither is present,
the second value of the return tuple will be None.
Raises a ValueError if the name was in any way invalid. (Thus,
this function can be used to ensure that a package name is valid.)
"""
vers = None
pos = val.find(':')
if (pos >= 0):
spec = val[ pos+1 : ]
val = val[ : pos ]
if (spec.startswith(':')):
vers = version.VersionNumber(spec[ 1 : ])
else:
vers = version.VersionSpec(spec)
parse_package_name(val)
return (val, vers)
def parse_package_name(pkgname):
"""parse_package_name(pkgname) -> list of str
Parse a package name (e.g., "org.boodler.sample") into a list of
elements (["org", "boodler", "sample"]).
Raises a ValueError if the name was in any way invalid. (Thus,
this function can be used to ensure that a package name is valid.)
"""
res = package_name_regexp.match(pkgname)
if (not res):
raise ValueError('invalid package name: ' + pkgname)
# Now we know there are no Unicode-only characters
pkgname = str(pkgname)
ls = pkgname.split('.')
if ('' in ls):
raise ValueError('invalid package name: ' + pkgname)
return ls
def encode_package_name(pkgname, vers):
"""encode_package_name(pkgname, vers) -> str
Convert a Boodler package name and VersionNumber into a legal Python
identifier. (This is used as the "real" name for the package module --
the name Python knows about.)
The encoding guarantees that two modules with different (pkgname, vers)
keys will produce different identifiers.
"""
vers = str(vers)
vers = capital_letter_regexp.sub('C\\1', vers)
res = pkgname + 'V' + vers
res = res.replace('+', 'P')
res = res.replace('-', 'M')
res = res.replace('_', 'U')
res = res.replace('.', '_')
return '_BooPkg_'+res
def parse_resource_name(resname):
"""parse_resource_name(resname) -> list of str
Parse a resource name (e.g., "voice.Doh") into a list of elements
(["voice", "Doh"]).
Raises a ValueError if the name was in any way invalid. (Thus,
this function can be used to ensure that a resource name is valid.)
"""
res = resource_name_regexp.match(resname)
if (not res):
raise ValueError('invalid resource name: ' + resname)
# Now we know there are no Unicode-only characters
resname = str(resname)
ls = resname.split('.')
if ('' in ls):
raise ValueError('invalid resource name: ' + resname)
return ls
def build_safe_pathname(basedir, filename):
"""build_safe_pathname(basedir, filename) -> str
Take a relative filename and append it to a base pathname, checking
for dangerous practices.
The relative filename must be in universal format: slashes, no
backslashes. It must not begin with a slash, and it must not contain
'..' elements. (Single dots are okay.) If these rules are violated,
this function raises a ValueError.
The base pathname must be in platform format (ie, on Windows, it
should be backslash-delimited). The result will be in platform
format as well.
The result will be plain ascii characters, and will be stored as a
str (even if the arguments are unicode). This is mostly to work around
some annoyances with the aifc module, which refuses to accept unicode
pathnames.
"""
if ('\\' in filename):
raise ValueError('attempt to get filename with backslash: ' + filename)
if (filename.startswith('/')):
raise ValueError('attempt to get absolute filename: ' + filename)
els = filename.split('/')
if ('..' in els):
raise ValueError('attempt to get filename with ..: ' + filename)
# Normalize out double slashes and trailing slashes
els = [ el for el in els if el ]
# Normalize out single dots
els = [ el for el in els if (el != '.') ]
pathname = os.path.join(basedir, *els)
pathname = str(pathname)
return pathname
def dict_accumulate(dic, key, val):
"""dict_accumulate(dic, key, val) -> bool
Build a dict which maps keys to arrays. dic[key] is an array containing
all the values which have been added under the key. (If none have,
the dict will not contain that key.)
Returns whether this is the first time this key has been added.
"""
ls = dic.get(key)
if (ls is None):
dic[key] = [val]
return True
else:
ls.append(val)
return False
def dict_all_values(dic, ls=None):
"""dict_all_values(dic, ls=None) -> ls
Return a list of all the values in dic, walking recursively into
all dicts that are values. (If the argument is not a dict, this
just returns [dic].)
If the optional argument ls is provided, the values are appended
to it (and it is also returned).
"""
if (ls is None):
ls = []
if (type(dic) != dict):
ls.append(dic)
else:
for val in dic.values():
dict_all_values(val, ls)
return ls
utf8_decoder = codecs.getdecoder('utf-8')
def deunicode(ln):
"""deunicode(ln) -> unicode
Decode a UTF-8 string into a unicode object. This also strips off the
BOM character (byte order mark, U+FEFF) which occurs at the start of
some UTF-8 files.
(The 'utf-8-sig' decoder would take care of the BOM for us, but it
doesn't exist in Python 2.3.5)
"""
(ln, dummy) = utf8_decoder(ln)
return ln.lstrip(u'\ufeff')
# late imports
from boopak.pload import PackageLoadError, PackageNotFoundError | PypiClean |
/NeuroUnits-0.1.2.tar.gz/NeuroUnits-0.1.2/src/neurounits/visitors/bases/base_actioner_default.py |
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
from .base_actioner import ASTActionerDepthFirst
class ASTActionerDefault(ASTActionerDepthFirst):
def __init__(self, **kwargs):
ASTActionerDepthFirst.__init__(self, **kwargs)
def ActionNode(self, n, **kwargs):
assert False, 'Action node in %s %s' % (type(self), type(n))
def ActionLibrary(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionNineMLComponent(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionIfThenElse(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInEquality(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionBoolAnd(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionBoolOr(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionBoolNot(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefUser(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefBuiltIn(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionStateVariable(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionSymbolicConstant(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionConstant(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionConstantZero(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionAssignedVariable(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionSuppliedValue(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionTimeVariable(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionAnalogReducePort(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnEvent(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnEventStateAssignment(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionTimeDerivativeByRegime(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionRegimeDispatchMap(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionEqnAssignmentByRegime(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionAddOp(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionSubOp(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionMulOp(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionDivOp(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionExpOp(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefBuiltInInstantiation(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefUserInstantiation(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionFunctionDefInstantiationParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionRegime(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnConditionTriggerTransition(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnCrossesTriggerTransition(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnTransitionEvent(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOnEventDefParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionEmitEvent(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionAnalogVisitor(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionEmitEventParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInterface(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOutEventPort(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInEventPort(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionOutEventPortParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInEventPortParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionRTGraph(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionEventPortConnection(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInterfaceWireContinuous(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionInterfaceWireEvent(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionCompoundPortConnector(self, o,**kwargs):
return self.ActionNode(o, **kwargs)
def ActionCompoundPortConnectorWireMapping(self, o,**kwargs):
return self.ActionNode(o, **kwargs)
def ActionLibraryManager(self, o,**kwargs):
return self.ActionNode(o, **kwargs)
def ActionRandomVariable(self,o,**kwargs):
return self.ActionNode(o, **kwargs)
def ActionRandomVariableParameter(self, o, **kwargs):
return self.ActionNode(o, **kwargs)
def ActionAutoRegressiveModel(self,o,**kwargs):
return self.ActionNode(o, **kwargs) | PypiClean |
/CRIkit2-0.4.4.tar.gz/CRIkit2-0.4.4/crikit/preprocess/subtract_baseline.py | import copy as _copy
import numpy as _np
from crikit.preprocess.algorithms.als import (AlsCvxopt as _AlsCvxopt)
from crikit.utils.datacheck import _rng_is_pix_vec
class SubtractBaselineALS:
"""
Subtract baseline using asymmetric least squares algorithm
Parameters
----------
smoothness_param : float, optional (default=1.0)
Smoothness parameter aka 'lambda'
asym_param : float, optional (default=1e-2)
Asymmetry parameter aka 'p'
redux_factor : int, optional (default=10)
Down-sampling factor (more down-sampling leads to faster detrending,
but with more chance of non-optimal detrending)
rng : ndarray (1D), optional (default=None)
Range in pixels to perform action over
use_imag : bool, optional (default=True)
If spectrum(a) are complex-values, use the imaginary portion?
"""
def __init__(self, smoothness_param=1, asym_param=1e-2,
redux=10, order=2, rng=None, fix_end_points=False,
fix_rng=None, fix_const=1,
max_iter=100, min_diff=1e-5, use_imag=True,
**kwargs):
self.rng = _rng_is_pix_vec(rng)
self._k = kwargs
self._k.update({'smoothness_param' : smoothness_param,
'asym_param' : asym_param,
'redux' : redux,
'order' : order,
'rng' : rng,
'fix_end_points' : fix_end_points,
'fix_rng' : fix_rng,
'fix_const' : fix_const,
'max_iter' : max_iter,
'min_diff' : min_diff})
self.use_imag = use_imag
def _calc(self, data, ret_obj, **kwargs):
self._inst_als = _AlsCvxopt(**kwargs)
try:
# Get the subarray shape
shp = data.shape[0:-2]
total_num = _np.array(shp).prod()
# Iterate over the sub-array -- super slick way of doing it
for num, idx in enumerate(_np.ndindex(shp)):
print('Detrended iteration {} / {}'.format(num+1, total_num))
# Imaginary portion set
if self.use_imag and _np.iscomplexobj(data):
# if self.rng is None:
# ret_obj[idx] -= 1j*self._inst_als.calculate(data[idx].imag)
# else:
ret_obj[idx] -= 1j*self._inst_als.calculate(data[idx].imag)
else: # Real portion set or real object
# if self.rng is None:
# ret_obj[idx] -= self._inst_als.calculate(data[idx].real)
# else:
ret_obj[idx] -= self._inst_als.calculate(data[idx].real)
except Exception:
return False
else:
# print(self._inst_als.__dict__)
return True
def transform(self, data, **kwargs):
if self.rng is None:
self.rng = _np.arange(data.shape[-1])
total_rng = _np.arange(data.shape[-1])
not_in_rng = list(set(total_rng).difference(self.rng))
not_in_rng.sort()
not_in_rng = _np.array(not_in_rng)
if not_in_rng.size != 0:
data[..., not_in_rng] *= 0
self._k.update(kwargs)
success = self._calc(data, ret_obj=data, **self._k)
return success
def calculate(self, data, **kwargs):
if self.rng is None:
self.rng = _np.arange(data.shape[-1])
data_copy = _np.zeros(data.shape, dtype=data.dtype)
data_copy[..., self.rng] = 1*data[..., self.rng]
self._k.update(kwargs)
success = self._calc(data, ret_obj=data_copy, **self._k)
if success:
return data_copy
else:
return None
if __name__ == '__main__': # pragma: no cover
x = _np.linspace(-100, 100, 1000)
y = 10*_np.exp(-(x**2/(2*20**2)))
rng = _np.arange(200,800)
als = SubtractBaselineALS(smoothness_param=1, asym_param=1e-3, rng=rng,
redux=1, fix_end_points=False, fix_rng=None,
verbose=True)
y_als = als.calculate(y) | PypiClean |
/Moose-0.9.9b3.tar.gz/Moose-0.9.9b3/docs/drafts/milestones/r1.0.md | # Moose Milestone(release/1.0)
- - - -
## 总体要求
* 时间上,要求在 _10月8号_ 之前完成版本1.0的编码和测试工作;
* 功能上:
* 能对**automate**中95%的代码提供默认或仅需极少编码的实现;
* 对重要但不在本次发布中提供的功能预留接口和说明;
* 质量上,整体代码覆盖率要提升到50%,频繁使用的功能需要提升覆盖率到85%以上;
* 维护上:
* 提供 **command**,**actions**,**connection**的说明文档;
* 对构成完整功能单元的模块(尤其对由单个文件构成的)提供头注释,说明该模块的主要设计目的,类提供功能和参数注释;
* 在examples/中提供三个示例;
## 详细功能需求
详细功能需求上我们从三个方面说明,包括 **通用行为抽象**,**底层库和工具**和**交互**。
### 通用行为抽象
通用行为抽象是指对日常应用及功能进行抽象的结果,目的是对一组完整业务逻辑的编码复用,避免两套动作间因细微的差别而导致重复创造轮子的过程。在通用行为抽象层上,要求实现以下功能:
1. 上传
1. 上传特定类型的所有数据到平台上1期任务,并生成索引;
2. 根据文件夹上传到多期任务,并生成对应索引;
3. 有选择地上传多种数据,但索引只根据其中某类数据生成;
2. 导出
1. 自定义导出:提供task_id,自定义导出数据格式和所需处理;
2. 标准化图像数据导出:导出原图,效果图,mask图和json文件;
3. 标准化视频数据导出:导出每帧图片,效果图和csv文件;(提供标准命名方式)
4. 标准化音频数据导出:导出单句音频,对应文本和metadata;(以多国模板为例)
5. 图像json导出可以转化成PASCAL VOC格式;
6. 提供图像导出metadata的入口;
3. 采转标(Migrate)
1. 整期数据采集转标注;
2. 提供号段和期数以及批次对照表转标注;(同时提供标准格式说明)
4. 语料制作(Assemble)
1. 提供标准格式下语料制作接口; _(以children采转标为例)_
5. 预处理
1. 视频抽帧
2. 音频截断
6. 追责(Blame)
1. 导出特定文件名下的标注人员的详细信息以及标注链接
7. 下载
1. 定义标准下载接口,自动使用多线程完成数据下载
### 底层库和工具
底层库和工具为高层抽象行为的实现提供了支撑,同时也帮助开发人员在遇到通用行为抽象没有解决的问题的情况下快速进行实现。包括以下功能需要实现:
1. Azure云连接和常见操作:
1. Azure云连接和重连;
2. 批量/单个上传
3. 批量/单个下载
4. 列出容器内所有文件
5. 创建容器
6. 给容器修改权限
2. MongoDB数据库连接和常见操作:
1. MongoDB连接和重连;
2. MongoDB(条件)查询;
3. MongoDB更新;
3. SQLServer数据库连接和常见操作;
1. SQLServer连接和重连;
2. SQLServer查询和批量查询;
3. SQLServer插入和批量插入;
4. SQLServer常用查询语句的定义; _(待详细定义)_
4. 数据提取过程
1. 标准化标注数据提取过程:自由组合sql查询和mongodb提取过程获取结构化的原始和标注数据
2. 标准化原始数据提取过程:未标注情况下,仅提取原始数据;
5. 工具箱(toolbox)
1. 视频抽帧模块
2. 图像绘制模块
3. textgrid 解析模块
4. ffmpeg调用模块
5. 下载模块
6. 标注结果检查模块
7.
6. Models
1. 对通用格式建模
2. 对GeoJSON格式建模
3. 对app采集语音单段落数据建模
4. 对语音多段落数据建模
5. 对视频标注数据建模
6. 提供基于model的异步下载
7. Utils
1. 编码转换(gbk - unicode -utf-8)
2. 系统路径转换(unix - windows)
3. 常用函数的shortcuts _(带详细定义)_
1. 路径遍历
2. 文件夹检查和创建
### 交互
交互定义Moose和最终用户之间沟通的行为方式,是基于Moose的App完成之后对于输入和输出的格式的规范和说明,包括:
1. 按如下模式创建项目:
```shell
$ moose-admin startproject project_name
```
2. 按如下模式创建app:
```shell
$ cd project_name
$ python manage.py startapp app_name --template project_name/app_template
```
3. 按如下模式创建订单
```shell
$ python manage.py genconf app_name -c conf.cfg
```
4. 按如下模式编辑订单
```shell
$ python manage.py editconf app_name -c conf.cfg
```
5. 按如下模式使用指定订单运行动作
```shell
$ python manage.py run app_name -a act1 -a act2 -c conf1.cfg -c conf2.cfg -o opt1=val1
```
#moose
| PypiClean |
/EUKulele-2.0.0.tar.gz/EUKulele-2.0.0/code_of_conduct.md |
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to make participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies within all project spaces, and it also applies when
an individual is representing the project or its community in public spaces.
Examples of representing a project or community include using an official
project e-mail address, posting via an official social media account, or acting
as an appointed representative at an online or offline event. Representation of
a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [[email protected]](mailto:[email protected]). All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
| PypiClean |
/MagicDrCOM-1.2.0-py3-none-any.whl/drcom/main/tool.py |
import sys
import time
import struct
import socket
import random
import threading
from drcom.main.utils import mac
from drcom.main.utils import hostname
from drcom.main.utils import ipaddress
from drcom.main.utils import md5
from drcom.main.utils import checksum
from drcom.main.utils import int2hex_str
from drcom.main.utils import clean_socket_buffer
from drcom.main.logger import Log
from drcom.main.excepts import DrCOMException
from drcom.main.excepts import TimeoutException
from drcom.main.excepts import MagicDrCOMException
from drcom.configs.settings import *
class DrCOMClient(object):
def __init__(self, usr, pwd):
self.usr = usr
self.pwd = pwd
self.salt = b""
self.server_ip = ""
self.auth_info = b""
self._interrupt = False
self.login_flag = False
self.alive_flag = False
self.platform = sys.platform
self.__initialise__()
@property
def interrupt(self):
return self._interrupt
@interrupt.setter
def interrupt(self, value):
self._interrupt = value
def __initialise__(self):
"""
尝试获取当前主机的主机名称、MAC地址、联网IP地址
:return:
"""
self.host_name = hostname()
if LOCAL_MAC: # 如果没有指定本机MAC,尝试自动获取
self.mac = bytes().fromhex(LOCAL_MAC)
else:
self.mac = bytes().fromhex(mac())
if LOCAL_IP: # 如果没有指定本机IP,尝试自动获取
self.ip = LOCAL_IP
else:
self.ip = ipaddress()
if not self.host_name or not self.mac or not self.ip:
Log(logging.ERROR, 10, "[DrCOM.__init__]:无法获取本机的NIC信息,请直接提交到该项目issues")
raise DrCOMException("无法获取本机的NIC信息")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.settimeout(3)
try:
self.socket.bind(("", 61440))
except socket.error:
Log(logging.ERROR, 10, "[DrCOM.__init__]:无法绑定61440端口,请检查是否有其他进程占据了该端口")
raise DrCOMException("无法绑定本机61440端口")
def _make_login_package(self):
"""
构造登陆数据包
:return:
"""
# (0:3 4) Header = Code + Type + EOF + (UserName Length + 20)
data = b'\x03\x01\x00' + int2hex_str(len(self.usr) + 20)
# (4:19 16) MD5_A = MD5(Code + Type + Salt + Password)
data += md5(b'\x03\x01' + self.salt + self.pwd.encode('ascii'))
# (20:55 36) 用户名
data += self.usr.encode('ascii').ljust(36, b'\x00')
# (56:56 1) 控制检查状态
data += CONTROL_CHECK_STATUS
# (57:57 1) 适配器编号?
data += ADAPTER_NUMBER
# (58:63 6) (MD5_A xor MAC)
data += int2hex_str(int.from_bytes(data[4:10], 'big') ^ int.from_bytes(self.mac, 'big')).rjust(6, b'\x00')
# (64:79 16) MD5_B = MD5(0x01 + Password + Salt + 0x00 *4)
data += md5(b'\x01' + self.pwd.encode('ascii') + self.salt + b'\x00' * 4)
# (80:80 1) NIC Count
data += b'\x01'
# (81:84 4) 本机IP
data += socket.inet_aton(self.ip)
# (85:88 4) ip地址 2
data += b'\00' * 4
# (89:92 4) ip地址 3
data += b'\00' * 4
# (93:96 4) ip地址 4
data += b'\00' * 4
# (97:104 8) 校验和A
data += md5(data + b'\x14\x00\x07\x0b')[:8]
# (105:105 1) IP Dog
data += IP_DOG
# (106:109 4) 未知
data += b'\x00' * 4
# (110:141 32) 主机名
data += self.host_name.encode('ascii').ljust(32, b'\x00')
# (142:145 4) 主要dns: 114.114.114.114
data += b'\x72\x72\x72\x72'
# (146:149 4) DHCP服务器IP
data += socket.inet_aton(DHCP_SERVER_IP)
# (150:153 4) 备用dns:8.8.8.8
data += b'\x08\x08\x08\x08'
# (154:161 8) 未知
data += b'\x00' * 8
data += b'\x94\x00\x00\x00' # (162:165 4) 未知
data += b'\x06\x00\x00\x00' # (166:169 4) OS major 不同客户端有差异
data += b'\x01\x00\x00\x00' # (170:173 4) OS minor 不同客户端有差异
data += b'\xb1\x1d\x00\x00' # (174:177 4) OS build 不同客户端有差异
data += b'\x02\x00\x00\x00' # (178:181 4) 未知 OS相关
# (182:213 32) 操作系统名称
data += "WINDOWS".encode('ascii').ljust(32, b'\x00')
# (214:309 96) 未知 不同客户端有差异,BISTU版此字段包含一段识别符,但不影响登陆
data += b'\x00' * 96
# (310:311 2)
data += AUTH_VERSION
# (312:313 2) 未知
data += b'\x02\x0c'
# (314:317 4) 校验和
data += checksum(data + b'\x01\x26\x07\x11\x00\x00' + self.mac)
# (318:319 2) 未知
data += b'\x00\x00'
# (320:325 6) 本机MAC
data += self.mac
# (326:326 1) auto logout / default: False
data += b'\x00'
# (327:327 1) broadcast mode / default : False
data += b'\x00'
# (328:329 2) 未知 不同客户端有差异
data += b'\x17\x77'
return data
def _make_alive_package(self, num, key, cls):
"""
构造心跳数据包
:param num:
:param key:
:param cls:
:return:
"""
# (0:0 1) 未知
data = b'\x07'
# (1:1 1) 编号
data += int2hex_str(num % 256)
# (2:4 3) 未知
data += b'\x28\x00\x0b'
# (5:5 1) 类型
data += int2hex_str(cls)
# (6:7 2) BISTU版此字段不会变化
if num == 0:
data += b'\xdc\x02'
else:
data += KEEP_ALIVE_VERSION
# (8:9 2) 未知 每个包会有变化
data += b'\x2f\x79'
# (10:15 6) 未知
data += b'\x00' * 6
# (16:19 4)
data += key
# (20:23 4) 未知
data += b'\x00' * 4
# data += struct.pack("!H",0xdc02) # 未验证
if cls == 1:
# (24:39 16) 未知
data += b'\x00' * 16
if cls == 3:
# host_ip
foo = b''.join([int2hex_str(int(i)) for i in self.ip.split('.')])
# use double keep in main to keep online .Ice
crc = b'\x00' * 4
# data += struct.pack("!I",crc) + foo + b'\x00' * 8
data += crc + foo + b'\x00' * 8
return data
def _make_logout_package(self):
# (0:3 4) Header = Code + Type + EOF + (UserName Length + 20)
data = b'\x06\x01\x00' + int2hex_str(len(self.usr) + 20)
# TODO MD5_A字段在BISTU版中的算法未知,但以下算法可以正常使用
# (4:19 16) MD5_A = MD5(Code + Type + Salt + Password)
data += md5(b'\x06\x01' + self.salt + self.pwd.encode('ascii'))
# (20:55 36) 用户名
data += self.usr.encode('ascii').ljust(36, b'\x00')
# (56:56 1) 控制检查状态
data += CONTROL_CHECK_STATUS
# (57:57 1) 适配器编号?
data += ADAPTER_NUMBER
# (58:63 6) (MD5_A xor MAC)
data += int2hex_str(int.from_bytes(data[4:10], 'big') ^ int.from_bytes(self.mac, 'big')).rjust(6, b'\x00')
data += self.auth_info
return data
def _send_package(self, pkg, server):
"""
发送数据包, 每次发送都尝试三次,如果发送三次都失败,触发超时异常
:param pkg:
:return:
"""
last_times = ReTryTimes
while last_times > 0 and not self.interrupt:
last_times = last_times - 1
clean_socket_buffer(self.socket)
self.socket.sendto(pkg, server)
try:
data, address = self.socket.recvfrom(1024)
except socket.timeout:
Log(logging.WARNING, 0, "[DrCOM._send_package]:Continue to retry times [{}]...".format(last_times))
continue
if data and address:
return data, address
if self.interrupt:
exception = TimeoutException("[DrCOM._send_package]:Failure on sending package...")
exception.last_pkg = pkg
raise exception
def send_alive_pkg1(self):
"""
发送类型一的心跳包
:return:
"""
pkg = b'\xff'
pkg += md5(b'\x03\x01' + self.salt + self.pwd.encode('ascii')) # MD5_A
pkg += b'\x00' * 3
pkg += self.auth_info
pkg += struct.pack('!H', int(time.time()) % 0xFFFF)
pkg += b'\x00' * 3
data, address = self._send_package(pkg, (self.server_ip, 61440))
if data[0] == 0x07:
Log(logging.DEBUG, 0, "[DrCOM.send_alive_pkg1]:Successful sending heartbeat package type 1...")
else:
# 当收到的数据包没法识别的时候,标记当前状态已经为掉线状态
Log(logging.ERROR, 40, "[DrCOM.send_alive_pkg1]:Receive unknown packages content: {}".format(data))
self.alive_flag = False
def send_alive_pkg2(self, num, key, cls):
"""
发送类型二的心跳包
:return:
"""
response = 0
pkg = self._make_alive_package(num=num, key=key, cls=cls)
data, address = self._send_package(pkg, (self.server_ip, 61440))
if data[0] == 0x07:
Log(logging.DEBUG, 0, "[DrCOM.send_alive_pkg2]:Successful sending heartbeat package 2[{}]...".format(cls))
response = data[16:20]
else:
# 当收到的数据包没法识别的时候,标记当前状态已经为掉线状态
Log(logging.ERROR, 50, "[DrCOM.send_alive_pkg2]:Receive unknown packages content: {}".format(data))
self.alive_flag = False
return response
def prepare(self):
"""
获取服务器IP和Salt
:return:
"""
random_value = struct.pack("<H", int(time.time() + random.randint(0xF, 0xFF)) % 0xFFFF)
pkg = b'\x01\x02' + random_value + b'\x0a' + b'\x00' * 15
# 尝试目前已知的学校认证服务器地址
for _ in [(SERVER_IP, 61440), ("1.1.1.1", 61440), ("202.1.1.1", 61440)]:
data, address = self._send_package(pkg, _)
# 未获取合理IP地址则进行下一个服务器地址尝试
Log(logging.DEBUG, 0, "[DrCOM.prepare]:Receive PKG content: {}".format(data))
if data[0:4] == b'\x02\x02' + random_value:
self.server_ip = address[0]
self.salt = data[4:8]
Log(logging.DEBUG, 0, "[DrCOM.prepare]:Server IP: {}, Salt: {}".format(self.server_ip, self.salt))
return
else:
Log(logging.ERROR, 20, "[DrCOM.prepare]:Receive unknown packages content: {}".format(data))
if not self.server_ip or not self.salt:
exception = DrCOMException("No Available Server...")
exception.last_pkg = pkg
raise exception
def reset(self):
"""
重置所有参数
:return:
"""
self.interrupt = False
self.login_flag = False
self.alive_flag = False
def login(self):
"""
登录到目标服务器方法
:return:
"""
pkg = self._make_login_package()
data, address = self._send_package(pkg, (self.server_ip, 61440))
Log(logging.DEBUG, 0, "[DrCOM.login]:Receive PKG content: {}".format(data))
if data[0] == 0x04:
self.auth_info = data[23:39]
# 在这里设置当前为登录状态并且也处于在线状态
self.login_flag = True
self.alive_flag = True
Log(logging.INFO, 0, "[DrCOM.login]:Successfully login to DrCOM Server...")
elif data[0] == 0x05:
if len(data) > 32:
if data[32] == 0x31:
Log(logging.ERROR, 31, "[DrCOM.login]:Failure on login because the wrong username...")
if data[32] == 0x33:
Log(logging.ERROR, 32, "[DrCOM.login]:Failure on login because the wrong password...")
else:
Log(logging.ERROR, 30, "[DrCOM.login]:Receive unknown packages content: {}".format(data))
if not self.login_flag:
exception = DrCOMException("Failure on login to DrCOM...")
exception.last_pkg = pkg
raise exception
def keep_alive(self):
num = 0
key = b'\x00' * 4
while self.alive_flag:
try:
self.send_alive_pkg1()
key = self.send_alive_pkg2(num, key, cls=1)
key = self.send_alive_pkg2(num, key, cls=3)
except TimeoutException as exc:
Log(logging.ERROR, 60, "[DrCOM.keep_alive]:" + exc.info)
self.alive_flag = False
break
num = num + 2
time.sleep(10)
def logout(self):
"""
登出,仅测试了BISTU版本
登出过程一共会有6个包,分3组,每组2个
第一组同alive_pkg1的发送与确认
第二组似乎是用于告知网关准备登出
第三组会发送登出的详细信息包括用户名等
"""
# 第一组 初步判断是为了判断当前网络是否联通
# 发送的数据包的最后两个字节可能有验证功能
self.send_alive_pkg1()
# 第二组 登出准备
# 与alive_pkg1的最后两个字节相同
pkg = b'\x01\x03'
pkg += b'\x00\x00'
pkg += b'\x0a'
pkg += b'\x00' * 15
data, address = self._send_package(pkg, (self.server_ip, 61440))
if data[0:2] != b'\x02\x03':
Log(logging.ERROR, 70, "[DrCOM.login]:Receive unknown packages content: {}".format(data))
# 第三组
pkg = self._make_logout_package()
data, address = self._send_package(pkg, (self.server_ip, 61440))
if data[0] == 0x04:
self.login_flag = False
else:
Log(logging.ERROR, 71, "[DrCOM.logout]:Receive unknown packages content: {}".format(data))
if self.login_flag:
exception = DrCOMException("Failure on logout to DrCOM...")
exception.last_pkg = pkg
raise exception
class MagicDrCOMClient(object):
def __init__(self):
print("欢迎使用BISTU专版的第三方Dr.COM客户端")
print("本项目目前由@Ryuchen进行开发和维护")
print("如有任何问题欢迎在本项目的github页面提交issue")
print("[https://github.com/Ryuchen/MagicDrCOM/issues]")
self._usr = ""
self._pwd = ""
# self._login_flag = False
self._alive_check = False
self._relogin_flag = ReLoginFlag
self._relogin_times = ReLoginTimes
self._relogin_check = ReLoginCheck
try:
self._client = DrCOMClient(self._usr, self._pwd)
except DrCOMException as exc:
Log(logging.ERROR, 10, "[MagicDrCOMClient.__init__]:无法进行初始化:" + exc.info)
raise MagicDrCOMException("请检查本机设置之后重试~")
@property
def username(self):
return self._usr
@username.setter
def username(self, value):
if value == "":
raise MagicDrCOMException("账号未填写")
self._usr = value
self._client.usr = value
@property
def password(self):
return self._pwd
@password.setter
def password(self, value):
if value == "":
raise MagicDrCOMException("密码未填写")
self._pwd = value
self._client.pwd = value
# @property
# def login_flag(self):
# return self._login_flag
#
# @login_flag.setter
# def login_flag(self, value):
# self._login_flag = value
@property
def relogin_flag(self):
return self._relogin_flag
@relogin_flag.setter
def relogin_flag(self, value):
self._relogin_flag = value
@property
def relogin_times(self):
return self._relogin_times
@relogin_times.setter
def relogin_times(self, value):
self._relogin_times = value
@property
def relogin_check(self):
return self._relogin_check
@relogin_check.setter
def relogin_check(self, value):
self._relogin_check = value
@property
def status(self):
if self._client.login_flag:
if self._client.alive_flag:
return ONLINE
else:
return DIEOUT
else:
return OFFLINE
def _interval_loop(self, period, callback, args):
"""
模拟事件循环,用来循环调用请求网站方法
:param period: 间隔时间
:param callback: 回调方法
:param args: 参数
:return:
"""
try:
while self._client.login_flag:
time.sleep(period)
callback(*args)
except MagicDrCOMException:
Log(logging.ERROR, 120, "[MagicDrCOM._auto_relogin]:超出最大重试次数!")
def _set_interval(self, period, callback, *args):
threading.Thread(target=self._interval_loop, args=(period, callback, args)).start()
def _daemon(self):
"""
判断网络连通性的方法
Host: 114.114.114.114
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(1)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(("114.114.114.114", 53))
Log(logging.INFO, 0, "[MagicDrCOM.check]:Successful connect to network...")
return True
except socket.error:
if self.status == DIEOUT:
self.relogin()
def _login(self):
if self._client.usr == "" or self._client.pwd == "":
raise MagicDrCOMException("Please enter your username and password...")
Log(logging.INFO, 0, "[MagicDrCOM.login]:Starting login...")
try:
self._client.prepare()
self._client.login()
keep_alive_thread = threading.Thread(target=self._client.keep_alive, args=())
keep_alive_thread.start()
Log(logging.INFO, 0, "[MagicDrCOM.login]:Successfully login to server...")
except (DrCOMException, TimeoutException) as exc:
raise MagicDrCOMException("Failure on login: " + exc.info)
def login(self):
"""
登录方法
:return:
"""
self._login()
if self.relogin_flag:
self._set_interval(self.relogin_check, self._daemon)
Log(logging.INFO, 0, "[MagicDrCOM.login]:Starting network check daemon thread...")
def relogin(self):
self.relogin_times -= 1
if self.relogin_times >= 0:
Log(logging.WARNING, 0, "[MagicDrCOM._auto_relogin]:Starting relogin last %d times..." % self.relogin_times)
self._client.logout()
time.sleep(5)
self._client.prepare()
self._client.login()
else:
raise MagicDrCOMException("Maximum time reties...")
def logout(self):
Log(logging.INFO, 0, "[MagicDrCOM.logout]:Sending logout request to DrCOM Server")
try:
self._client.logout()
self._client.interrupt = True
Log(logging.INFO, 0, "[MagicDrCOM.logout]:Successful logout to DrCOM Server")
except (DrCOMException, TimeoutException) as exc:
raise MagicDrCOMException("Failure on logout: " + exc.info)
def reset(self):
self._client.reset()
# 用于命令行模式
if __name__ == '__main__':
try:
mc = MagicDrCOMClient()
mc.username = USERNAME
mc.password = PASSWORD
except MagicDrCOMException:
sys.exit(1)
try:
mc.login()
user_input = ""
while not user_input == "logout":
print("登出请输入 logout ")
user_input = input() # 等待输入进行阻塞
mc.logout()
sys.exit(1)
except KeyboardInterrupt as e:
mc.logout()
sys.exit(1) | PypiClean |
/CodeChat_Server-0.2.18.tar.gz/CodeChat_Server-0.2.18/CodeChat_Server/renderer.py | import asyncio
import codecs
from contextlib import contextmanager
from enum import Enum, auto
import fnmatch
import io
import json
import os
from pathlib import Path
import re
import shlex
import shutil
import sys
from tempfile import NamedTemporaryFile
from typing import (
Any,
cast,
Callable,
Coroutine,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
# Third-party imports
# -------------------
from CodeChat.CodeToRest import code_to_rest_string, html_static_path # type: ignore
from CodeChat.CommentDelimiterInfo import SUPPORTED_GLOBS # type: ignore
import docutils.core
import docutils.writers.html4css1
import markdown # type: ignore
import pygments.util
import strictyaml
# Local imports
# -------------
# None
#
# Internal renderers
# ==================
# These renderers are invoked via function calls to 3rd party Python libraries.
#
# They all return ``(html, errors)``.
#
# Markdown
# --------
# Convert Markdown to HTML
def _render_markdown(text: str, file_path: str) -> Tuple[str, str]:
return (
markdown.markdown(
text,
# See https://python-markdown.github.io/extensions/. Enable a few by default.
extensions=[
"markdown.extensions.extra",
],
),
"",
)
# reStructuredText (reST)
# -----------------------
# Convert reStructuredText (reST) to HTML.
def _render_ReST(
text: str, filePath: str, use_codechat: bool = False
) -> Tuple[str, str]:
errStream = io.StringIO()
settingsDict = {
# Make sure to use Unicode everywhere. This name comes from
# ``docutils.core.publish_string`` version 0.12, lines 392 and following.
"output_encoding": "unicode",
# While ``unicode`` **should** work for ``input_encoding``, it doesn't if
# there's an ``.. include`` directive, since this encoding gets passed to
# ``docutils.io.FileInput.__init__``, in which line 236 of version 0.12
# tries to pass the ``unicode`` encoding to ``open``, producing:
#
# .. code:: python3
# :number-lines:
#
# File "...\python-3.4.4\lib\site-packages\docutils\io.py", line 236, in __init__
# self.source = open(source_path, mode, **kwargs)
# LookupError: unknown encoding: unicode
#
# So, use UTF-8 and encode the string first. Ugh.
"input_encoding": "utf-8",
# Don't stop processing, no matter what.
"halt_level": 5,
# Capture errors to a string and return it.
"warning_stream": errStream,
"stylesheet_dirs": html_static_path(),
"stylesheet_path": ["docutils.css"]
+ (["CodeChat.css"] if use_codechat else []),
}
htmlString = docutils.core.publish_string(
bytes(text, encoding="utf-8"),
writer_name="html",
settings_overrides=settingsDict,
)
errString = errStream.getvalue()
errStream.close()
return htmlString, errString
# CodeChat
# --------
# Convert source code to HTML.
def _render_CodeChat(text: str, filePath: str) -> Tuple[str, str]:
try:
rest_string = code_to_rest_string(text, filename=filePath)
except KeyError as e:
return (
"",
f"{filePath}:: ERROR: this file, recognized as {e.args[0]}, is not supported by CodeChat.",
)
except (ValueError, pygments.util.ClassNotFound):
# Although the file extension may be in the list of supported
# extensions, CodeChat may not support the lexer chosen by Pygments.
# For example, a ``.v`` file may be Verilog (supported by CodeChat)
# or Coq (not supported). In this case, provide an error message.
return (
"",
f"{filePath}:: ERROR: this file is not supported by CodeChat.",
)
return _render_ReST(rest_string, filePath, True)
# Fake renderers
# --------------
# "Render" (pass through) the provided text.
def _pass_through(text: str, file_path: str) -> Tuple[str, str]: # pragma: no cover
return text, ""
# The "error renderer" when a renderer can't be found.
def _error_renderer(text: str, file_path: str) -> Tuple[str, str]: # pragma: no cover
return "", "{}:: ERROR: No converter found for this file.".format(file_path)
# External renderers
# ==================
# These renderers run in an external program and are all invoked as a subprocess.
#
# Provide a type alias for the ``co_build`` function.
Co_Build = Callable[[str], Coroutine[Any, Any, None]]
# Single-file
# -----------
# Convert a single file using an external program.
async def _render_external_file(
# See text_.
text: str,
# See file_path_.
file_path: str,
# See html_path_.
html_path: Optional[str],
tool_or_project_path: List[Union[bool, str]],
# See co_build_.
co_build: Co_Build,
) -> Tuple[str, str]:
# Split the provided tool path.
uses_stdin, uses_stdout, *args_ = tool_or_project_path
args = cast(List[str], args_)
# Run from the directory containing the file.
cwd = Path(file_path).parent
# Save the text in a temporary file for use with the external tool.
with _optional_temp_file(not uses_stdin) as input_file, _optional_temp_file(
not uses_stdout
) as output_file:
if input_file:
# Write the text to the input file then close it, so that it can be opened on all platforms by the external tool. See `NamedTemporaryFile <https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile>`_.
input_file.write(text)
input_file.close()
if output_file:
# Close the output file for the same reason.
output_file.close()
# Do replacements on the args.
args = [
s.format(
input_file=input_file and input_file.name,
output_file=output_file and output_file.name,
)
for s in args
]
stdout, stderr = await _run_subprocess(
args, cwd, None if input_file else text, bool(output_file), co_build
)
# Gather the output from the file if necessary.
if output_file:
with open(
output_file.name, "r", encoding="utf-8", errors="backslashreplace"
) as f:
stdout = f.read()
return stdout, stderr
# Project
# -------
# Define the possible types of projects.
class ProjectTypeEnum(Enum):
general = auto()
PreTeXt = auto()
Doxygen = auto()
# This class reads and interprets a project configuration file.
class ProjectConfFile:
# The type of the project declared in this project's configuration file.
project_type: ProjectTypeEnum
# The absolute path to this project's root directory.
project_path: Path
# The absolute path to the currently open file.
file_path: Path
# The extension for HTML files.
html_ext: str
# The absolute path to this project's source files.
source_path: Path
# The absolute path to this project's output files.
output_path: Path
# Arguments used to invoke this project's renderer.
args: Union[list, str]
# Read and process a CodeChat project configuration file.
def __init__(self, project_config_file_path: Path, file_path: Path):
try:
with open(project_config_file_path, encoding="utf-8") as f:
data = f.read()
except Exception as e:
raise RuntimeError(
f"{project_config_file_path}:: ERROR: Unable to open. {e}"
)
schema = strictyaml.Map(
{
strictyaml.Optional("source_path", default="."): strictyaml.Str(),
"output_path": strictyaml.Str(),
"args": strictyaml.Str() | strictyaml.Seq(strictyaml.Str()),
strictyaml.Optional("html_ext", default=".html"): strictyaml.Str(),
strictyaml.Optional("project_type", default="general"): strictyaml.Enum(
[x.name for x in list(ProjectTypeEnum)]
),
}
)
try:
data_dict = strictyaml.load(data, schema).data
except strictyaml.YAMLError as e:
raise RuntimeError(
f"{project_config_file_path}:: ERROR: Unable to parse. {e}"
)
# Save items that don't need processing.
self.project_type = ProjectTypeEnum[data_dict["project_type"]]
self.html_ext = data_dict["html_ext"]
self.file_path = file_path
# Make paths absolute.
self.project_path = project_config_file_path.parent
def abs_path(path: Union[str, Path]) -> Path:
path_ = Path(path)
if not path_.is_absolute():
path_ = self.project_path / path_
return path_
self.source_path = abs_path(data_dict["source_path"])
self.output_path = abs_path(data_dict["output_path"])
# Find the first XML ID in the source file if this is a PreTeXt project.
xml_id = ""
if self.project_type == ProjectTypeEnum.PreTeXt:
# A crude approximation of an XML ID that should cover most cases.
match = re.search(r'xml:id="([\w:_\-.]+)', self.file_path.read_text())
if match:
xml_id = match.group(1)
# Perform replacement on the args.
def args_format(arg):
return arg.format(
project_path=self.project_path,
source_path=self.source_path,
output_path=self.output_path,
sys_executable=sys.executable,
xml_id=xml_id,
)
args = data_dict["args"]
self.args = (
args_format(args)
if isinstance(args, str)
else [args_format(arg) for arg in args]
)
# _`checkModificationTime`: Verify that ``source_file`` is older than the HTML file produced by this source, reporting errors if this isn't true.
def checkModificationTime(
self,
# A Path to the source file to examine.
source_file: Path,
# An optional Path to the previously displayed HTML file.
html_path: Optional[Path],
) -> Tuple[
# A path to the proposed or found HTML file.
Path,
# A list of error messages.
List[str],
]:
# Determine a first guess at the location of the rendered HTML.
prefix = (
self.output_path
if self.project_type != ProjectTypeEnum.Doxygen
else Path(".")
)
error_arr = []
try:
base_html_file = prefix / source_file.relative_to(self.source_path)
except ValueError as e:
# Give some arbitrary value to the output path, since it can't be determined.
base_html_file = prefix
error_arr.append(
"{}:: ERROR: unable to compute path relative to {}. {}".format(
source_file, self.source_path, e
)
)
# For PreTeXt, use the mapping of source files to XML IDs == output file name.
if self.project_type == ProjectTypeEnum.PreTeXt:
try:
mapping = self.load_pretext_mapping()
except Exception:
pass
else:
# Before looking up the file, ``resolve()`` it to get the canonical representation (fix case on Windows), then make this relative to the project directory. Make it posix-formatted, so that the mappings work cross-platform. (For example, this runs on Windows, but the PreTeXt CLI runs on Linux in WSL.)
xml_id_list = mapping.get(
str(source_file.resolve().relative_to(self.project_path).as_posix())
)
if xml_id_list:
# See if any of the mappings match the currently-displayed file. If so, use that one. Otherwise, pick the first mapping.
for id_ in reversed(xml_id_list):
base_html_file = self.output_path / id_
if html_path and base_html_file == html_path.with_suffix(""):
break
# For Doxygen, rename certain characters in the file name. See `util.cpp::escapeCharsInString <https://github.com/doxygen/doxygen/blob/master/src/util.cpp#L3443>`_.
elif self.project_type == ProjectTypeEnum.Doxygen:
doxygen_renamed_path = base_html_file.as_posix()
for old, new in (
[":", "_1"],
["/", "_2"],
["<", "_3"],
[">", "_4"],
["*", "_5"],
["&", "_6"],
["|", "_7"],
[".", "_8"],
["!", "_9"],
[",", "_00"],
[" ", "_01"],
["{", "_02"],
["}", "_03"],
["?", "_04"],
["^", "_05"],
["%", "_06"],
["(", "_07"],
[")", "_08"],
["+", "_09"],
["=", "_0a"],
["$", "_0b"],
["\\", "_0c"],
["@", "_0d"],
["]", "_0e"],
["[", "_0f"],
["#", "_0g"],
):
doxygen_renamed_path = doxygen_renamed_path.replace(old, new)
base_html_file = self.output_path / doxygen_renamed_path
# Look for the resulting HTML using this guess.
possible_html_file = base_html_file.with_suffix(self.html_ext)
if possible_html_file.exists():
html_file = possible_html_file
else:
possible_html_file = Path(str(base_html_file) + self.html_ext)
if possible_html_file.exists():
html_file = possible_html_file
else:
return (
base_html_file,
error_arr
+ [
f"{source_file}:: ERROR: CodeChat renderer - unable to find the HTML output file {base_html_file}."
],
)
# Recall that time is measured in seconds since the epoch,
# so that larger = newer.
try:
if html_file.stat().st_mtime > source_file.stat().st_mtime:
return html_file, []
else:
return (
html_file,
error_arr
+ [
f"{source_file}:: ERROR: CodeChat renderer - source file newer than the HTML file {html_file}."
],
)
except OSError as e:
return (
html_file,
error_arr
+ [
f"{source_file}:: ERROR: CodeChat renderer - unable to check modification time of the HTML file {html_file}: {e}."
],
)
def load_pretext_mapping(self):
return json.loads((self.output_path / ".mapping.json").read_text())
# Convert an project using an external renderer.
async def _render_external_project(
# See text_.
text: str,
# See file_path_.
file_path_: str,
# See html_path_.
html_path: Optional[Path],
_tool_or_project_path: str,
# See co_build_.
co_build: Co_Build,
) -> Tuple[str, str]:
# Run from the directory containing the project file.
project_conf_file_path = Path(_tool_or_project_path)
await co_build(
f"Loading project file {project_conf_file_path}.\n",
)
# The ``text`` argument isn't used, since this is an external project, meaning that everything must be saved to disk, instead of rendering the ``text`` currently being edited.
del text
file_path = Path(file_path_)
# Read the project configuration.
try:
project_conf = ProjectConfFile(project_conf_file_path, file_path)
except RuntimeError as e:
return "", str(e)
# Compare dates to see if the rendered file is current
html_path, error_arr = project_conf.checkModificationTime(file_path, html_path)
# If not, render and try again.
if error_arr:
# Render.
stdout, stderr = await _run_subprocess(
project_conf.args, project_conf_file_path.parent, None, True, co_build
)
html_path, error_arr = project_conf.checkModificationTime(file_path, html_path)
else:
stderr = ""
# Display an error in the main window if one exists.
if error_arr:
stderr += "\n".join(error_arr)
return str(html_path), stderr
# Support
# -------
# OS detection: This follows the `Python recommendations <https://docs.python.org/3/library/sys.html#sys.platform>`_.
is_win = sys.platform == "win32"
# These functions support external renderers.
# If need_temp_file is True, provide a NamedTemporaryFile; otherwise, return a dummy context manager.
def _optional_temp_file(need_temp_file: bool) -> Any:
return (
NamedTemporaryFile(mode="w", encoding="utf-8")
if need_temp_file
else _dummy_context_manager()
)
@contextmanager
def _dummy_context_manager() -> Generator:
yield
# Run a subprocess, optionally streaming the stdout.
async def _run_subprocess(
args: Union[List[str], str],
cwd: Path,
input_text: Optional[str],
stream_stdout: bool,
co_build: Co_Build,
) -> Tuple[str, str]:
# If the args were provided a single string, split it since the asyncio subprocess doesn't accept a string (the standard subprocess does).
if isinstance(args, str):
args = shlex.split(args, posix=not is_win)
# Turn ``args[0]`` into a fully-qualified path to avoid `platform-specific behavior <https://docs.python.org/3/library/subprocess.html#subprocess.Popen>`_.
#
# If the path isn't absolute, work on it.
if not Path(args[0]).is_absolute():
args[0] = (
# If this is a relative path, then prepend cwd.
str(cwd / args[0])
# Relative paths have a path separator. Note that `os.altsep <https://docs.python.org/3/library/os.html#os.altsep>`_ may be ``None``.
if os.sep in args[0] or (os.altsep is not None and os.altsep in args[0])
# Otherwise, search the PATH. If it's not found, then go with the original value, which should raise an error when subprocess can't find it.
else shutil.which(args[0]) or args[0]
)
# Explain what's going on.
await co_build("{} > {}\n".format(cwd, " ".join(args)))
# Start the process.
try:
proc = await asyncio.create_subprocess_exec(
*args,
cwd=cwd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
except FileNotFoundError:
return (
"",
":: ERROR: CodeChat renderer - when starting render process, unable to find renderer executable '{}'.\n".format(
args[0] if len(args) > 0 else "<undefined>"
),
)
except Exception as e:
return (
"",
":: ERROR: CodeChat renderer - when starting render process, {}.\n".format(
e
),
)
# Provide a way to send stdout from the process a line at a time to the web client.
async def stdout_streamer(stdout_stream: asyncio.StreamReader):
# Use an `incremental decoder <https://docs.python.org/3/library/codecs.html#codecs.getincrementaldecoder>`_ to decode a stream.
decoder_ = codecs.getincrementaldecoder("utf-8")(errors="backslashreplace")
# Wrap than with an incremental decoder for universal newlines. The `docs <https://docs.python.org/3/library/io.html#io.IncrementalNewlineDecoder>`_ are very sparse. From the Visual Studio Code help that pops up (likely from https://github.com/python/cpython/blob/master/Modules/_io/textio.c#L237):
#
# IncrementalNewlineDecoder(decoder: Optional[codecs.IncrementalDecoder], translate: bool, errors: str=...)
#
# Codec used when reading a file in universal newlines mode.
# It wraps another incremental decoder, translating \r\n and \r into \n. It also records the types of newlines encountered. When used with translate=False, it ensures that the newline sequence is returned in one piece. When used with decoder=None, it expects unicode strings as decode input and translates newlines without first invoking an external decoder.
decoder = io.IncrementalNewlineDecoder(decoder_, True, "")
while True:
ret = await stdout_stream.read(80)
if ret:
await co_build(decoder.decode(ret))
else:
# Tell the decoder the stream is done and collect any last output.
s = decoder.decode(b"", True)
if s:
await co_build(s)
break
# An awaitable sequence to interact with the subprocess.
aws = [proc.communicate(None if input_text is None else input_text.encode("utf-8"))]
# If we have an output file, then stream the stdout.
if stream_stdout:
assert proc.stdout
aws.append(stdout_streamer(proc.stdout))
# Hack: make it look like there's no stdout, so communicate won't use it.
proc.stdout = None
# Run the subprocess.
try:
(stdout, stderr), *junk = await asyncio.gather(*aws)
except Exception as e:
return "", "external command:: ERROR:When running. {}".format(e)
return (
stdout and stdout.decode("utf-8", errors="backslashreplace"),
stderr.decode("utf-8", errors="backslashreplace"),
)
# Select and invoke a renderer
# ============================
# Build a map of file names/extensions to the converter to use.
#
# TODO:
#
# #. Read this from a StrictYAML file instead.
# #. Use Pandoc to offer lots of other format conversions.
GLOB_TO_RENDERER: Dict[
# glob: The glob which accepts files this renderer can process.
str,
Tuple[
# The `renderer`_.
Callable,
# An list of parameters used to invoke the renderer.
Optional[List[Union[bool, str]]],
],
] = {glob: (_render_CodeChat, None) for glob in SUPPORTED_GLOBS}
GLOB_TO_RENDERER.update(
{
# Leave (X)HTML unchanged.
"*.xhtml": (_pass_through, None),
"*.html": (_pass_through, None),
"*.htm": (_pass_through, None),
# Use the integrated Python libraries for these.
"*.md": (_render_markdown, None),
"*.rst": (_render_ReST, None),
# External tools
#
# `Textile <https://www.promptworks.com/textile>`_:
"*.textile": (
_render_external_file,
[
# Does this tool read the input file from stdin?
True,
# Does this tool produce the output on stdout?
True,
# The remaining elements are the arguments used to invoke the tool.
"pandoc",
# Specify the input format https://pandoc.org/MANUAL.html#option--to>`_.
"--from=textile",
# `Output to HTML <https://pandoc.org/MANUAL.html#option--from>`_.
"--to=html",
# `Produce a complete (standalone) HTML file <https://pandoc.org/MANUAL.html#option--standalone>`_, not a fragment.
"--standalone",
],
),
}
)
# Return the converter for the provided file.
def _select_renderer(
# See file_path_.
file_path: Path,
) -> Tuple[
# _`renderer`: a function or coroutine which will perform the render.
Callable,
# tool_or_project_config_file_path:
#
# - The path to the CodeChat System configuration file if this is a project.
# - A sequence of parameters used to invoke a single-file renderer if one was found.
# - None if no renderer was found for ``file_path``.
Union[str, List[Union[bool, str]], None],
# is_project: True if this is a project; False if not.
bool,
]:
# If this is a directory, start searching there. Otherwise, assume it's a file and remove the file name to produce a directory.
project_path_search = file_path if file_path.is_dir() else file_path.parent
# Search for an external builder configuration file. I can't find an equivalent of `parents <https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.parents>`_ that includes the full path, so the list below constructs it.
for project_path in [project_path_search, *project_path_search.parents]:
project_config_file_path = project_path / "codechat_config.yaml"
if project_config_file_path.exists():
return _render_external_project, str(project_config_file_path), True
# Otherwise, look for a single-file converter.
str_file_path = str(file_path)
for glob, (converter, tool_or_project_config_file_path) in GLOB_TO_RENDERER.items():
if fnmatch.fnmatch(str_file_path, glob):
return converter, tool_or_project_config_file_path, False
return _error_renderer, None, False
# Run the appropriate converter for the provided file or return an error.
async def render_file(
# _`text`: The text to be converted. If this is a project, the text will be loaded from the disk by the external renderer instead.
text: str,
# _`file_path`: The path to the file which (mostly -- see ``is_dirty``) contains this text OR a directory containing a CodeChat project.
file_path: str,
# _`html_path`: The html file currently being displayed (if available).
html_path: Optional[Path],
# _`co_build`: A coroutine that an external renderer should call to stream build output.
co_build: Co_Build,
# True if the provided text hasn't been saved to disk.
is_dirty: bool,
) -> Tuple[
# was_performed: True if the render was performed. False if this is a project and the source file is dirty; in this case, the render is skipped.
bool,
# project_path: If this is a project, then ``project_path``` contains a path to the CodeChat project configuration file; otherwise, ``project_path`` is ``None``.
Optional[str],
# rendered_file_path: A path to the rendered file.
#
# - If this is a project, the rendered file is different from ``file_path``, since it points to the location on disk where the external renderer wrote the HTML. In this case, the ``html`` return value is ``None``, since the HTML should be read from the disk instead.
# - Otherwise, it's the same as the ``file_path``, and the resulting rendered HTML is returned in ``html``.
str,
# html: ``None`` for projects, or the resulting HTML otherwise; see the ``rendered_file_path`` return value.
Optional[str],
# err_string: A string containing error messages produced by the render.
str,
]:
# Determine the renderer for this file/project.
renderer, tool_or_project_path, is_project = _select_renderer(Path(file_path))
# Projects require a clean file in order to render.
if is_project and is_dirty:
return False, None, "", None, ""
if asyncio.iscoroutinefunction(renderer):
# Coroutines get the queue, so they can report progress during the build.
html_string_or_file_path, err_string = await renderer(
text, file_path, html_path, tool_or_project_path, co_build
)
else:
assert tool_or_project_path is None
html_string_or_file_path, err_string = renderer(text, file_path)
# Update the client's state, now that the rendering is complete.
if is_project:
# For projects, the rendered HTML is already on disk; a path to this rendered file is returned.
return (
True,
str(tool_or_project_path),
html_string_or_file_path,
None,
err_string,
)
else:
# Otherwise, the rendered HTML is returned as a string and can be directly used. Provide a path to the source file which was just rendered.
return True, None, file_path, html_string_or_file_path, err_string | PypiClean |
/EStruct-0.1.3.tar.gz/EStruct-0.1.3/README.txt | =======
EStruct
=======
EStruct provides an extended packaging/unpackaging library that that provided by the standard library's Struct module.
It provides packaging of conditional elements and arrays. The library was originally written by the author to simplify
packing and unpacking binary data within mpeg4 file.
Typical usage::
#!/usr/bin/env python
from estruct.estruct import EStruct
import sys
from datetime import date
packager = EStruct("Student", "Student.Name Student.Address Student.DOB", "![50s50sQ]")
records=[]
with open(sys.argv[1],"rb") as f:
records=packager.unpack(f.read())
for r in records:
print r.Student.Name, r.Student.Address, date.fromordinal(r.Student.DOB)
EStruct is also hosted on GitHub at https://github.com/simblack/EStruct
Installation
============
::
pip install EStruct
EStruct object
==============
The EStruct object __init__ method takes three parameters, the unpacked class name, the fields within the class and the packaging format.
*EStruct.__init__(self, name, fields_string, format_string)*
name
----
This is simply the name of the object that is created when unpacking values.
fields_string
-------------
The fields string is a description that contains fields and sub fields.
The string needs to be a space separated list of fields. Nested structures can be created by using a dot notation '.', i.e.
object.field or object.sub_object.field.
Example::
'f1 f2 f3.a1 f3.a2 f4 f5.a1.b1 f5.a1.b2 f5.a2.b1 f5.a2.b2'
This would create the object with fields:
f1,f2,f3,f4,f5
f3 is of a type of an object with fields:
a1, a2
f5 is of a type of an object with fields:
a1, a2 with both fields of an object of type with fields b1 and b2
format_string
-------------
::
format= byteorder + format_string
byte_order= '@'|'='|'<'|'>'|'!'
@ native byte-order native size native alignment
= native byte-order standard size no alignment
< little endian standard size no alignment
> big endian standard size no alignment
! network standard size no alignment
format_string= conditonal_format | array_format | normal_format | empty_format
emptry_format=
conditionl_format= '(' + condition + '?' + true_format + ':' + false_format + ')'
condition= [python evaluation]
true_format= format_string
false_format= format_string
An empty format_string implies that no field is required for packinging.
array_format= count[format_string]
count= empty_count | constant_count | field_count
empty_count=
constant_count= (0..9)*
field_count= '{' + [field_name] + '}'
The *\[field name\]* is any field defined prior to this packing element.
The *\[python evaluation\]* is a valid python statement equating to a True or False result.
Any field name prior to this packing element may be used. Also for evaulation of array's the index value *INDEX* may be used
to specify the current index.
Examples
~~~~~~~~
Network endian, 32 bit integers followed by a 64 but value if *f1==1* else another 32 bit integer::
!II(f1==1?Q|I)
Network endian, 32 bit integers followed by an array with the number of elements as specified in the field
*f1*. The array elements are a 32 bit integer followed by a number of characters, where that number is set in
the field *f2\[INDEX\].a1*, the index is the current iteration of the outer array.::
!I{f1}[I{f2[INDEX].a1}[c]]
Unpacking
=========
The unpack method is used to unpack binary data into an object
*EStruct.unpack(self, data, **kargs)*
Keyword arguments may also be passed if they are required for evaluations.
The method returns an object as described by the fields string of the constructor.
::
struct=EStruct('Test','f1 f2','!I{f1}[I]')
data=[0,0,0,3,0,0,0,2,0,0,0,1,0,0,0,0]
data_str="".join([chr(x) for x in data])
obj=struct.unpack(data_str)
print obj.f1, obj.f2
Packing
=======
The pack method is used to pack values into a binary string
*EStruct.pack(self, *args, **kargs)*
The correct number of arguments must be passed for packing, according to the packing format.
Keyword arguments may also be passed if they are required for evaluations.
The method returns the packed binary string.
::
struct=EStruct('Test','f1 f2','!I{f1}[I]')
packed_result=self.struct.pack(3,[2,1,0])
print packed_result.encode("hex") | PypiClean |
/Auto_FOX-1.0.0b1-py3-none-any.whl/FOX/functions/adf.py | from __future__ import annotations
import sys
from typing import (
Sequence,
Hashable,
Iterable,
Callable,
TypeVar,
Tuple,
List,
Any,
TYPE_CHECKING,
)
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
from numpy import float64 as f8, int64 as i8
_T = TypeVar("_T")
_SCT = TypeVar("_SCT", bound=np.generic)
_3Tuple = Tuple[_T, _T, _T]
NDArray = np.ndarray[Any, np.dtype[_SCT]]
__all__ = ['get_adf_df', 'get_adf']
def _adf_inner_cdktree(
m: NDArray[f8],
n: int,
r_max: float,
idx_list: Iterable[_3Tuple[NDArray[np.bool_]]],
lattice: None | NDArray[f8],
periodicity: Iterable[Literal[0, 1, 2]] = range(3),
weight: None | Callable[[NDArray[f8]], NDArray[f8]] = None,
) -> List[NDArray[f8]]:
"""Perform the loop of :meth:`.init_adf` with a distance cutoff."""
# Construct slices and a distance matrix
if lattice is not None:
with np.errstate(divide='ignore', invalid='ignore'):
dist, vec, idx = _adf_inner_cdktree_periodic(m, n, r_max, lattice, periodicity)
ang: NDArray[f8] = np.arccos(np.einsum('jkl,jml->jkm', vec, vec))
dist = np.maximum(dist[..., None], dist[..., None, :])
else:
tree = cKDTree(m)
dist, idx = tree.query(m, n, distance_upper_bound=r_max, p=2)
dist[dist == np.inf] = 0.0
idx[idx == len(m)] = 0
# Slice the Cartesian coordinates
coords13: NDArray[f8] = m[idx]
coords2: NDArray[f8] = m[..., None, :]
# Construct (3D) angle- and distance-matrices
with np.errstate(divide='ignore', invalid='ignore'):
vec = ((coords13 - coords2) / dist[..., None])
ang = np.arccos(np.einsum('jkl,jml->jkm', vec, vec))
dist = np.maximum(dist[..., None], dist[..., None, :])
ang[np.isnan(ang)] = 0.0
# Radian (float) to degrees (int)
ang_int: NDArray[i8] = np.degrees(ang).astype(np.int64)
# Construct and return the ADF
ret = []
for i, j, k in idx_list:
ijk: NDArray[np.bool_] = j[:, None, None] & i[idx][..., None] & k[idx][..., None, :]
weights = weight(dist[ijk]) if weight is not None else None
ret.append(get_adf(ang_int[ijk], weights=weights))
return ret
def _adf_inner_cdktree_periodic(
m: NDArray[f8],
n: int,
r_max: float,
lattice: NDArray[f8],
periodicity: Iterable[Literal[0, 1, 2]],
) -> Tuple[NDArray[f8], NDArray[f8], NDArray[np.intp]]:
# Construct the (full) distance matrix and vectors
dist, vec = _adf_inner_periodic(m, lattice, periodicity)
# Apply `n` and `r_max`: truncate the number of distances/vectors
idx1 = np.argsort(dist, axis=1)
if n < idx1.shape[1]:
idx1 = idx1[:, :n]
dist = np.take_along_axis(dist, idx1, axis=1)
mask = dist > r_max
idx1[mask] = 0
dist[mask] = 0.0
# Return the subsets
idx0 = np.empty_like(idx1)
idx0[:] = np.arange(len(idx0))[..., None]
i = idx0.ravel()
j = idx1.ravel()
vec_ret = vec[i, j].reshape(*dist.shape, 3)
return dist, vec_ret, idx1
def _adf_inner(
m: NDArray[f8],
idx_list: Iterable[_3Tuple[NDArray[np.bool_]]],
lattice: None | NDArray[f8],
periodicity: Iterable[Literal[0, 1, 2]] = range(3),
weight: None | Callable[[NDArray[f8]], NDArray[f8]] = None,
) -> List[NDArray[f8]]:
"""Perform the loop of :meth:`.init_adf` without a distance cutoff."""
# Construct (3D) angle- and distance-matrices
with np.errstate(divide='ignore', invalid='ignore'):
if lattice is None:
# Construct a distance matrix
dist: NDArray[f8] = cdist(m, m)
# Slice the Cartesian coordinates
coords13: NDArray[f8] = m
coords2: NDArray[f8] = m[..., None, :]
vec: NDArray[f8] = (coords13 - coords2) / dist[..., None]
else:
dist, vec = _adf_inner_periodic(m, lattice, periodicity)
ang: NDArray[f8] = np.arccos(np.einsum('jkl,jml->jkm', vec, vec))
dist = np.maximum(dist[..., :, None], dist[..., None, :])
ang[np.isnan(ang)] = 0.0
# Radian (float) to degrees (int)
ang_int: NDArray[i8] = np.degrees(ang).astype(np.int64)
# Construct and return the ADF
ret = []
for i, j, k in idx_list:
ijk: NDArray[np.bool_] = j[:, None, None] & i[..., None] & k[..., None, :]
weights = weight(dist[ijk]) if weight is not None else None
ret.append(get_adf(ang_int[ijk], weights=weights))
return ret
def _adf_inner_periodic(
m: NDArray[f8],
lattice: NDArray[f8],
periodicity: Iterable[Literal[0, 1, 2]],
) -> Tuple[NDArray[f8], NDArray[f8]]:
"""Construct the distance matrix and angle-defining vectors for periodic systems."""
vec = m - m[..., None, :]
lat_norm = np.linalg.norm(lattice, axis=-1)
iterator = ((i, lat_norm[i]) for i in periodicity)
for i, vec_len in iterator:
vec[..., i][vec[..., i] > (vec_len / 2)] -= vec_len
vec[..., i][vec[..., i] < -(vec_len / 2)] += vec_len
dist = np.linalg.norm(vec, axis=-1)
vec /= dist[..., None]
return dist, vec
def get_adf_df(atom_pairs: Sequence[Hashable]) -> pd.DataFrame:
"""Construct and return a pandas dataframe filled to hold angular distribution functions.
Parameters
----------
atom_pairs : |Sequence|_ [|Hashable|_]
A nested sequence of collumn names.
Returns
-------
|pd.DataFrame|_:
An empty dataframe.
"""
# Create and return the DataFrame
index = pd.RangeIndex(1, 181, name='phi / Degrees')
df = pd.DataFrame(0.0, index=index, columns=atom_pairs)
df.columns.name = 'Atom pairs'
return df
def get_adf(
ang: NDArray[np.integer[Any]],
weights: None | NDArray[np.number[Any]] = None,
) -> NDArray[f8]:
r"""Calculate and return the angular distribution function (ADF).
Parameters
----------
ang : |np.ndarray|_ [|np.int64|_]
A 1D array of angles (:code:`dtype=int`) with all angles.
Units should be in degrees.
weights : |np.ndarray|_ [|np.float|_], optional
A 1D array of weighting factors.
Should be of the same length as **ang**.
Returns
-------
:math:`m*180` |np.ndarray|_ [|np.float64|_]:
A 1D array with an angular distribution function spanning all values between 0 and 180
degrees.
"""
# Calculate and normalize the density
denominator = len(ang) / 180
at_count: NDArray[i8] = np.bincount(ang, minlength=181)[1:181]
dens: NDArray[f8] = at_count / denominator
if weights is None:
return dens
# Weight (and re-normalize) the density based on the distance matrix **dist**
area: f8 = dens.sum()
with np.errstate(divide='ignore', invalid='ignore'):
dens *= np.bincount(ang, weights=weights, minlength=181)[1:181] / at_count
dens *= area / np.nansum(dens)
dens[np.isnan(dens)] = 0.0
return dens | PypiClean |
/KD_Lib-0.0.32.tar.gz/KD_Lib-0.0.32/KD_Lib/Quantization/qat/qat.py | from copy import deepcopy
import torch
from KD_Lib.Quantization.common import Quantizer
class QAT_Quantizer(Quantizer):
"""
Implementation of Quantization-Aware Training (QAT) for PyTorch models.
:param model: (Quantizable) Model that needs to be quantized
:type model: torch.nn.Module
:param train_loader: DataLoader used for training
:type train_loader: torch.utils.data.DataLoader
:param test_loader: DataLoader used for testing
:type test_loader: torch.utils.data.DataLoader
:param optimizer: Optimizer for training
:type optimizer: torch.optim.*
:param qconfig: Configuration used for quantization
:type qconfig: Qconfig
:param criterion: Loss function used for training
:type criterion: Loss_fn
:param device: Device used for training ("cpu" or "cuda")
:type device: torch.device
"""
def __init__(
self,
model,
train_loader,
test_loader,
optimizer,
qconfig=torch.quantization.get_default_qat_qconfig("fbgemm"),
criterion=torch.nn.CrossEntropyLoss(),
device=torch.device("cpu"),
):
super(QAT_Quantizer, self).__init__(
model, qconfig, train_loader, test_loader, optimizer, criterion, device
)
def quantize(
self,
num_train_epochs=10,
num_train_batches=10,
param_freeze_epoch=3,
bn_freeze_epoch=2,
):
"""
Function used for quantization
:param num_train_epochs: Number of epochs used for training
:type num_train_epochs: int
:param num_train_batches: Number of batches used for training
:type num_train_batches: int
:param param_freeze_epoch: Epoch after which quantizer parameters need to be freezed
:type param_freeze_epoch: int
:param bn_freeze_epoch: Epoch after which batch norm mean and variance stats are freezed
:type bn_freeze_epoch: int
"""
qat_model = deepcopy(self.model)
qat_model.fuse_model()
optimizer = deepcopy(self.optimizer)
optimizer.params = qat_model.parameters()
qat_model.qconfig = self.qconfig
torch.quantization.prepare_qat(qat_model, inplace=True)
print("Training model...")
for epoch in range(num_train_epochs):
print(f"Epoch {epoch}")
loss, acc = self._train_model(qat_model, optimizer, num_train_batches)
print(f"Training Loss: {loss} | Training Acc: {acc}")
if epoch > param_freeze_epoch:
qat_model.apply(torch.quantization.disable_observer)
if epoch > bn_freeze_epoch:
qat_model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
self.quantized_model = torch.quantization.convert(
qat_model.eval(), inplace=False
)
acc = self._evaluate_model(self.quantized_model)
print(f"Evaluation accuracy: {acc}")
return self.quantized_model
def _train_model(self, model, optimizer, num_batches):
"""
Function used for training the model
:param model (torch.nn.Module): Model that needs to be trained
:param optimizer (torch.optim.*): Optimizer for training
:param num_batches (int): Number of batches used for calibration
"""
model.to(self.device)
model.train()
correct = 0
epoch_loss = 0
cnt = 0
len_dataset = min(
num_batches * self.train_loader.batch_size, len(self.train_loader.dataset)
)
for image, target in self.train_loader:
image, target = image.to(self.device), target.to(self.device)
output = model(image)
if isinstance(output, tuple):
output = output[0]
loss = self.criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
cnt += 1
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
epoch_loss += loss.item()
if cnt >= num_batches:
return epoch_loss, (correct / len_dataset)
return epoch_loss, (correct / len_dataset) | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/notify/notifiers/rapidpush.py | import json
from loguru import logger
from requests.exceptions import RequestException
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession
from flexget.utils.requests import TimedLimiter
plugin_name = 'rapidpush'
logger = logger.bind(name=plugin_name)
RAPIDPUSH_URL = 'https://rapidpush.net/api'
requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('rapidpush.net', '5 seconds'))
class RapidpushNotifier:
"""
Example::
notify:
entries:
via:
- rapidpush:
apikey: xxxxxxx (can also be a list of api keys)
[category: category, default FlexGet]
[group: device group, default no group]
[channel: the broadcast notif. channel; if provided it will be send to the channel subscribers instead of
your devices, default no channel]
[priority: 0 - 6 (6 = highest), default 2 (normal)]
"""
schema = {
'type': 'object',
'properties': {
'api_key': one_or_more({'type': 'string'}),
'category': {'type': 'string', 'default': 'Flexget'},
'group': {'type': 'string'},
'channel': {'type': 'string'},
'priority': {'type': 'integer', 'minimum': 0, 'maximum': 6},
},
'additionalProperties': False,
'required': ['api_key'],
'not': {
'anyOf': [
{'required': ['channel', 'group']},
{'required': ['channel', 'category']},
{'required': ['channel', 'priority']},
]
},
'error_not': 'Cannot use \'channel\' with \'group\', \'category\' or \'priority\'',
}
def notify(self, title, message, config):
"""
Send a Rapidpush notification
"""
notification = {'title': title, 'message': message}
if not isinstance(config['api_key'], list):
config['api_key'] = [config['api_key']]
if config.get('channel'):
params = {'command': 'broadcast'}
notification['channel'] = config['channel']
else:
params = {'command': 'notify'}
notification['category'] = config['category']
if config.get('group'):
notification['group'] = config['group']
if config.get('priority') is not None:
notification['priority'] = config['priority']
params['data'] = json.dumps(notification)
for key in config['api_key']:
params['apikey'] = key
try:
response = requests.post(RAPIDPUSH_URL, params=params)
except RequestException as e:
raise PluginWarning(e.args[0])
else:
if response.json()['code'] > 400:
raise PluginWarning(response.json()['desc'])
@event('plugin.register')
def register_plugin():
plugin.register(RapidpushNotifier, plugin_name, api_ver=2, interfaces=['notifiers']) | PypiClean |
/BIT_Online_Code_Helper-1.0.4-py3-none-any.whl/bit_online_code_helper/bitonline/OnlineTestCodeManager.py | import re
import time
from bit_online_code_helper.log.LogManager import *
class _CompileStatus(Enum):
PENDING = '正等待编译'
COMPILE_ERROR = '程序编译失败'
COMPILE_SUCCESS = '程序已处理完毕'
class _OnlineTestCodeManager:
def __init__(self):
self.__headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/77.0.3865.120 Safari/537.36 '
}
self.__session = None
def set_session(self, session):
self.__session = session
def __get_post_data(self, name, page_text):
regx = re.compile('<input[\\s\\S]+?name=\"' + name + '\"[\\s\\S]+?value=\"(.+?)\"')
return regx.findall(page_text)[0]
def run(self, source_file_path, problem_url):
if self.__commit_online(source_file_path, problem_url):
self.__is_commit_pass(problem_url)
def __get_online_support_language(self, commit_page_text):
regx = re.compile('<option value="(\\d+)">(.+?)</option>')
return regx.findall(commit_page_text)
def __get_language_type(self, commit_page_text, source_file_path):
source_file_ext_name = source_file_path.split('.')[-1]
# e.g. [('1', 'C (GCC 3.3)'), ('2', 'C++ (G++ 3.3)')]
online_support_language = self.__get_online_support_language(commit_page_text)
type_map = [('c', 'C'), ('cpp', 'C++')]
for local_support_type in type_map:
if source_file_ext_name == local_support_type[0]:
for online_support_type in online_support_language:
if local_support_type[1] in online_support_type[1]:
return online_support_type[0]
return '-1'
def __commit_online(self, source_file_path, problem_url):
tip(OnlineTestCodeLogs.COMMIT_START)
commit_page_url = problem_url.replace('view', 'submit')
data_item = ['sesskey', '_qf__submit_form', 'sourcefile', 'submitbutton']
data = {}
try:
commit_page_text = self.__session.get(commit_page_url, headers=self.__headers).text
if '时间已到' in commit_page_text:
tip(OnlineTestCodeLogs.DEADLINE_PASS_FAILED)
return False
for item in data_item:
data[item] = self.__get_post_data(item, commit_page_text)
self.__get_language_type(commit_page_text, source_file_path)
data['id'] = re.compile('php\\?id=(\\d+)').findall(commit_page_url)[0]
data['code'] = open(source_file_path, 'rb').read().decode()
language = self.__get_language_type(commit_page_text, source_file_path)
if language == '-1':
tip(OnlineTestCodeLogs.NOT_SUPPORT_LANGUAGE_FAILED)
return False
data['language'] = language
commit_url = 'http://lexue.bit.edu.cn/mod/programming/submit.php'
self.__session.post(commit_url, data=data, headers=self.__headers)
tip(OnlineTestCodeLogs.COMMIT_SUCCESS)
divide_line()
return True
except:
tip(OnlineTestCodeLogs.COMPIT_FAILED)
return False
def __get_compile_status(self, test_res_page_text):
if _CompileStatus.PENDING.value in test_res_page_text:
return _CompileStatus.PENDING
elif _CompileStatus.COMPILE_ERROR.value in test_res_page_text:
return _CompileStatus.COMPILE_ERROR
elif _CompileStatus.COMPILE_SUCCESS.value in test_res_page_text:
return _CompileStatus.COMPILE_SUCCESS
def __is_commit_pass(self, problem_url):
test_res_url = problem_url.replace('view', 'result')
test_res_page_text = ''
while True:
test_res_page_text = self.__session.get(test_res_url, headers=self.__headers).text
compile_status = self.__get_compile_status(test_res_page_text)
if compile_status == _CompileStatus.COMPILE_ERROR:
tip(OnlineTestCodeLogs.COMPILE_FAILED)
return False
elif compile_status == _CompileStatus.COMPILE_SUCCESS:
break
else:
time.sleep(1)
continue
total_test_case_num, \
test_case_pass_num, \
test_case_fail_num = self.__parse_test_res_baseinfo(test_res_page_text)
if total_test_case_num == test_case_pass_num:
tip(OnlineTestCodeLogs.TEST_SUCCESS)
else:
tip(OnlineTestCodeLogs.TEST_FAILED)
print('通过%d个用例,失败%d个用例。' % (test_case_pass_num, test_case_fail_num))
def __parse_test_res_baseinfo(self, test_res_page_text):
test_res_baseinfo_regx = re.compile('测试结果:共 (\\d+?) 个测试用例,'
'您的程序通过了其中的 (\\d+?) 个,未能通过的有 (\\d+?) 个')
regx_res = test_res_baseinfo_regx.findall(test_res_page_text)
total_test_case_num = int(regx_res[0][0])
test_case_pass_num = int(regx_res[0][1])
test_case_fail_num = int(regx_res[0][2])
return total_test_case_num, test_case_pass_num, test_case_fail_num
commit_online_manager = _OnlineTestCodeManager() | PypiClean |
/HAPI_Nile-1.6.0-py3-none-any.whl/Hapi/rrm/routing.py | from typing import Union
import numpy as np
class Routing:
"""Routing class contains routing method.
Methods
-------
1- Muskingum
2- Muskingum_V
3- TriangularRouting1
functions :
1- CalculateWeights
4- TriangularRouting2
functions
1- Tf
"""
def __init__(self):
"""Routing model does not need any parameters to be instantiated."""
pass
@staticmethod
def Muskingum(inflow, Qinitial, k, x, dt):
"""Muskingum.
Parameters
----------
inflow: [numpy array]
time series of inflow hydrograph
Qinitial: [numeric]
initial value for outflow
k: [numeric]
travelling time (hours)
x: [numeric]
surface nonlinearity coefficient (0,0.5)
dt: [numeric]
delta t
Returns
-------
outflow: [numpy array]
time series of routed hydrograph
Examples
--------
>>> q = [] # discharge time series
>>> time_resolution = 1 # hourly time step
>>> q_routed = Routing.Muskingum(q, q[0], k, x, time_resolution)
"""
c1 = (dt - 2 * k * x) / (2 * k * (1 - x) + dt)
c2 = (dt + 2 * k * x) / (2 * k * (1 - x) + dt)
c3 = (2 * k * (1 - x) - dt) / (2 * k * (1 - x) + dt)
# if c1+c2+c3!=1:
# raise("sim of c1,c2 & c3 is not 1")
outflow = np.zeros_like(inflow)
outflow[0] = Qinitial
for i in range(1, len(inflow)):
outflow[i] = c1 * inflow[i] + c2 * inflow[i - 1] + c3 * outflow[i - 1]
outflow = np.round(outflow, 4)
return outflow
@staticmethod
def Muskingum_V(
inflow: np.ndarray,
Qinitial: Union[int, float],
k: Union[int, float],
x: Union[int, float],
dt: Union[int, float],
) -> np.ndarray:
"""Muskingum_V.
Vectorized version of Muskingum
Parameters
----------
inflow: [numpy array]
time series of inflow hydrograph
Qinitial: [numeric]
initial value for outflow
k: [numeric]
travelling time (hours)
x: [numeric]
surface nonlinearity coefficient (0,0.5)
dt: [numeric]
delta t
Returns
-------
outflow:
[numpy array] time series of routed hydrograph
Examples
--------
>>> q = [] # discharge time series
>>> time_resolution = 1 # hourly time step
>>> q_routed = Routing.Muskingum_V(q, q[0], k, x, time_resolution)
"""
c1 = (dt - 2 * k * x) / (2 * k * (1 - x) + dt)
c2 = (dt + 2 * k * x) / (2 * k * (1 - x) + dt)
c3 = (2 * k * (1 - x) - dt) / (2 * k * (1 - x) + dt)
# if c1+c2+c3!=1:
# raise("sim of c1,c2 & c3 is not 1")
Q = np.zeros_like(inflow)
Q[0] = Qinitial
Q[1:] = c1 * np.asarray(inflow[1:]) + c2 * np.asarray(inflow[0:-1])
for i in range(1, len(inflow)):
# only if the
if not Q[i] + c3 * Q[i - 1] < 0:
Q[i] = Q[i] + c3 * Q[i - 1]
return Q
@staticmethod
def Tf(maxbas):
"""Tf.
Transfer function weight generator in a shape of a triangle.
Parameters
----------
maxbas: [integer]
number of time steps that the triangular routing function
is going to divide the discharge into, based on the weights
generated from this function, min value is 1 and default value is 1
Returns
-------
wi: [numpy array]
array of normalised weights
Examples
--------
>>> ws = Routing.Tf(5)
"""
wi = []
for x in range(1, maxbas + 1): # if maxbas=3 so x=[1,2,3]
if (
x <= (maxbas) / 2.0
): # x <= 1.5 # half of values will form the rising limb and half falling limb
# Growing transfer # rising limb
wi.append((x) / (maxbas + 2.0))
else:
# Receding transfer # falling limb
wi.append(1.0 - (x + 1) / (maxbas + 2.0))
# Normalise weights
wi = np.array(wi) / np.sum(wi)
return wi
@staticmethod
def TriangularRouting2(q, maxbas=1):
"""Triangular Routing.
The function implements the transfer function using a triangular function (considers only integer values of
Maxbas parameter)
Parameters
----------
q: [numpy array]
time series of discharge hydrographs
maxbas: [integer]
number of time steps that the triangular routing function
is going to divide the discharge into, based on the weights
generated from this function, min value is 1 and default value is 1
Returns
-------
q_r: [numpy array]
time series of routed hydrograph
Examples
--------
>>> q_sim = Routing.TriangularRouting2(np.array(q_sim), parameters[-1])
"""
# input data validation
assert maxbas >= 1, "Maxbas value has to be larger than 1"
# Get integer part of maxbas
maxbas = int(round(maxbas, 0))
# get the weights
w = Routing.Tf(maxbas)
# rout the discharge signal
q_r = np.zeros_like(q, dtype="float64")
q_temp = np.float32(q)
for w_i in w:
q_r += q_temp * w_i
q_temp = np.insert(q_temp, 0, 0.0)[:-1]
return q_r
@staticmethod
def CalculateWeights(MAXBAS):
"""Calculate Weights.
- calculate the MAXBAS Weights based on a MAXBAX number The MAXBAS is a HBV parameter that
controls the routing.
- It is important to mention that this function allows to obtain weights
not only for interger values but from decimals values as well.
Parameters
----------
MAXBAS: [Numeric]
Examples
--------
>>> maxbasW = Routing.CalculateWeights(5)
>>> print(maxbasW)
>>> 0.0800 0.2400 0.3600 0.2400 0.0800
"""
yant = 0
Total = 0 # Just to verify how far from the unit is the result
TotalA = (MAXBAS * MAXBAS * np.sin(np.pi / 3)) / 2
IntPart = np.floor(MAXBAS)
RealPart = MAXBAS - IntPart
PeakPoint = MAXBAS % 2
flag = 1 # 1 = "up" ; 2 = down
if RealPart > 0: # even number 2,4,6,8,10
maxbasW = np.ones(int(IntPart) + 1) # if even add 1
else: # odd number
maxbasW = np.ones(int(IntPart))
for x in range(int(MAXBAS)):
if x < (MAXBAS / 2.0) - 1:
# Integral of x dx with slope of 60 degree Equilateral triangle
ynow = np.tan(np.pi / 3) * (x + 1)
# ' Area / Total Area
maxbasW[x] = ((ynow + yant) / 2) / TotalA
else: # The area here is calculated by the formlua of a trapezoidal (B1+B2)*h /2
if flag == 1:
ynow = np.sin(np.pi / 3) * MAXBAS
if PeakPoint == 0:
maxbasW[x] = ((ynow + yant) / 2) / TotalA
else:
A1 = ((ynow + yant) / 2) * (MAXBAS / 2.0 - x) / TotalA
yant = ynow
ynow = (MAXBAS * np.sin(np.pi / 3)) - (
np.tan(np.pi / 3) * (x + 1 - MAXBAS / 2.0)
)
A2 = ((ynow + yant) * (x + 1 - MAXBAS / 2.0) / 2) / TotalA
maxbasW[x] = A1 + A2
flag = 2
else:
# 'sum of the two height in the descending part of the triangle
ynow = MAXBAS * np.sin(np.pi / 3) - np.tan(np.pi / 3) * (
x + 1 - MAXBAS / 2.0
)
# Multiplying by the height of the trapezoidal and dividing by 2
maxbasW[x] = ((ynow + yant) / 2) / TotalA
Total = Total + maxbasW[x]
yant = ynow
x = int(MAXBAS)
# x = x + 1
if RealPart > 0:
if np.floor(MAXBAS) == 0:
MAXBAS = 1
maxbasW[x] = 1
NumberofWeights = 1
else:
maxbasW[x] = (yant * (MAXBAS - (x)) / 2) / TotalA
Total = Total + maxbasW[x]
NumberofWeights = x
else:
NumberofWeights = x - 1
return maxbasW
@staticmethod
def TriangularRouting1(Q, MAXBAS):
"""TriangularRouting1.
calculate the routing from a input hydrograph using the MAXBAS parameter from the HBV
model (considers float values of Maxbas parameter).
Examples
--------
[Qout,maxbasW]=RoutingMAXBAS(Q,5);
where:
Qout = output hydrograph
maxbasW = MAXBAS weight
Q = input hydrograph
5 = MAXBAS parameter value.
"""
# CALCULATE MAXBAS WEIGHTS
maxbasW = Routing.CalculateWeights(MAXBAS)
Qw = np.ones((len(Q), len(maxbasW)))
# Calculate the matrix discharge
for i in range(len(Q)): # 0 to 10
for k in range(len(maxbasW)): # 0 to 4
Qw[i, k] = maxbasW[k] * Q[i]
def mm(A, s):
tot = []
for o in range(np.shape(A)[1]): # columns
for t in range(np.shape(A)[0]): # rows
tot.append(A[t, o])
Su = tot[s:-1:s]
return Su
# Calculate routing
j = 0
Qout = np.ones(shape=(len(Q)))
for i in range(len(Q)):
if i == 0:
Qout[i] = Qw[i, i]
elif i < len(maxbasW) - 1:
A = Qw[0 : i + 1, :]
s = len(A) - 1 # len(A) is the no of rows or use int(np.shape(A)[0])
Su = mm(A, s)
Qout[i] = sum(Su[0 : i + 1])
else:
A = Qw[j : i + 1, :]
s = len(A) - 1
Su = mm(A, s)
Qout[i] = sum(Su)
j = j + 1
return Qout # ,maxbasW | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/lib/agw/ribbon/art.py | # RibbonArtSetting
RIBBON_ART_TAB_SEPARATION_SIZE = 1
RIBBON_ART_PAGE_BORDER_LEFT_SIZE = 2
RIBBON_ART_PAGE_BORDER_TOP_SIZE = 3
RIBBON_ART_PAGE_BORDER_RIGHT_SIZE = 4
RIBBON_ART_PAGE_BORDER_BOTTOM_SIZE = 5
RIBBON_ART_PANEL_X_SEPARATION_SIZE = 6
RIBBON_ART_PANEL_Y_SEPARATION_SIZE = 7
RIBBON_ART_TOOL_GROUP_SEPARATION_SIZE = 8
RIBBON_ART_GALLERY_BITMAP_PADDING_LEFT_SIZE = 9
RIBBON_ART_GALLERY_BITMAP_PADDING_RIGHT_SIZE = 10
RIBBON_ART_GALLERY_BITMAP_PADDING_TOP_SIZE = 11
RIBBON_ART_GALLERY_BITMAP_PADDING_BOTTOM_SIZE = 12
RIBBON_ART_PANEL_LABEL_FONT = 13
RIBBON_ART_BUTTON_BAR_LABEL_FONT = 14
RIBBON_ART_TAB_LABEL_FONT = 15
RIBBON_ART_BUTTON_BAR_LABEL_COLOUR = 16
RIBBON_ART_BUTTON_BAR_HOVER_BORDER_COLOUR = 17
RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_TOP_COLOUR = 18
RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_TOP_GRADIENT_COLOUR = 19
RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_COLOUR = 20
RIBBON_ART_BUTTON_BAR_HOVER_BACKGROUND_GRADIENT_COLOUR = 21
RIBBON_ART_BUTTON_BAR_ACTIVE_BORDER_COLOUR = 22
RIBBON_ART_BUTTON_BAR_ACTIVE_BACKGROUND_TOP_COLOUR = 23
RIBBON_ART_BUTTON_BAR_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR = 24
RIBBON_ART_BUTTON_BAR_ACTIVE_BACKGROUND_COLOUR = 25
RIBBON_ART_BUTTON_BAR_ACTIVE_BACKGROUND_GRADIENT_COLOUR = 26
RIBBON_ART_GALLERY_BORDER_COLOUR = 27
RIBBON_ART_GALLERY_HOVER_BACKGROUND_COLOUR = 28
RIBBON_ART_GALLERY_BUTTON_BACKGROUND_COLOUR = 29
RIBBON_ART_GALLERY_BUTTON_BACKGROUND_GRADIENT_COLOUR = 30
RIBBON_ART_GALLERY_BUTTON_BACKGROUND_TOP_COLOUR = 31
RIBBON_ART_GALLERY_BUTTON_FACE_COLOUR = 32
RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_COLOUR = 33
RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_GRADIENT_COLOUR = 34
RIBBON_ART_GALLERY_BUTTON_HOVER_BACKGROUND_TOP_COLOUR = 35
RIBBON_ART_GALLERY_BUTTON_HOVER_FACE_COLOUR = 36
RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_COLOUR = 37
RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_GRADIENT_COLOUR = 38
RIBBON_ART_GALLERY_BUTTON_ACTIVE_BACKGROUND_TOP_COLOUR = 39
RIBBON_ART_GALLERY_BUTTON_ACTIVE_FACE_COLOUR = 40
RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_COLOUR = 41
RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_GRADIENT_COLOUR = 42
RIBBON_ART_GALLERY_BUTTON_DISABLED_BACKGROUND_TOP_COLOUR = 43
RIBBON_ART_GALLERY_BUTTON_DISABLED_FACE_COLOUR = 44
RIBBON_ART_GALLERY_ITEM_BORDER_COLOUR = 45
RIBBON_ART_TAB_LABEL_COLOUR = 46
RIBBON_ART_TAB_SEPARATOR_COLOUR = 47
RIBBON_ART_TAB_SEPARATOR_GRADIENT_COLOUR = 48
RIBBON_ART_TAB_CTRL_BACKGROUND_COLOUR = 49
RIBBON_ART_TAB_CTRL_BACKGROUND_GRADIENT_COLOUR = 50
RIBBON_ART_TAB_HOVER_BACKGROUND_TOP_COLOUR = 51
RIBBON_ART_TAB_HOVER_BACKGROUND_TOP_GRADIENT_COLOUR = 52
RIBBON_ART_TAB_HOVER_BACKGROUND_COLOUR = 53
RIBBON_ART_TAB_HOVER_BACKGROUND_GRADIENT_COLOUR = 54
RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_COLOUR = 55
RIBBON_ART_TAB_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR = 56
RIBBON_ART_TAB_ACTIVE_BACKGROUND_COLOUR = 57
RIBBON_ART_TAB_ACTIVE_BACKGROUND_GRADIENT_COLOUR = 58
RIBBON_ART_TAB_BORDER_COLOUR = 59
RIBBON_ART_PANEL_BORDER_COLOUR = 60
RIBBON_ART_PANEL_BORDER_GRADIENT_COLOUR = 61
RIBBON_ART_PANEL_MINIMISED_BORDER_COLOUR = 62
RIBBON_ART_PANEL_MINIMISED_BORDER_GRADIENT_COLOUR = 63
RIBBON_ART_PANEL_LABEL_BACKGROUND_COLOUR = 64
RIBBON_ART_PANEL_LABEL_BACKGROUND_GRADIENT_COLOUR = 65
RIBBON_ART_PANEL_LABEL_COLOUR = 66
RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_COLOUR = 67
RIBBON_ART_PANEL_HOVER_LABEL_BACKGROUND_GRADIENT_COLOUR = 68
RIBBON_ART_PANEL_HOVER_LABEL_COLOUR = 69
RIBBON_ART_PANEL_MINIMISED_LABEL_COLOUR = 70
RIBBON_ART_PANEL_ACTIVE_BACKGROUND_TOP_COLOUR = 71
RIBBON_ART_PANEL_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR = 72
RIBBON_ART_PANEL_ACTIVE_BACKGROUND_COLOUR = 73
RIBBON_ART_PANEL_ACTIVE_BACKGROUND_GRADIENT_COLOUR = 74
RIBBON_ART_PANEL_BUTTON_FACE_COLOUR = 75
RIBBON_ART_PANEL_BUTTON_HOVER_FACE_COLOUR = 76
RIBBON_ART_PAGE_BORDER_COLOUR = 77
RIBBON_ART_PAGE_BACKGROUND_TOP_COLOUR = 78
RIBBON_ART_PAGE_BACKGROUND_TOP_GRADIENT_COLOUR = 79
RIBBON_ART_PAGE_BACKGROUND_COLOUR = 80
RIBBON_ART_PAGE_BACKGROUND_GRADIENT_COLOUR = 81
RIBBON_ART_PAGE_HOVER_BACKGROUND_TOP_COLOUR = 82
RIBBON_ART_PAGE_HOVER_BACKGROUND_TOP_GRADIENT_COLOUR = 83
RIBBON_ART_PAGE_HOVER_BACKGROUND_COLOUR = 84
RIBBON_ART_PAGE_HOVER_BACKGROUND_GRADIENT_COLOUR = 85
RIBBON_ART_TOOLBAR_BORDER_COLOUR = 86
RIBBON_ART_TOOLBAR_HOVER_BORDER_COLOUR = 87
RIBBON_ART_TOOLBAR_FACE_COLOUR = 88
RIBBON_ART_TOOL_BACKGROUND_TOP_COLOUR = 89
RIBBON_ART_TOOL_BACKGROUND_TOP_GRADIENT_COLOUR = 90
RIBBON_ART_TOOL_BACKGROUND_COLOUR = 91
RIBBON_ART_TOOL_BACKGROUND_GRADIENT_COLOUR = 92
RIBBON_ART_TOOL_HOVER_BACKGROUND_TOP_COLOUR = 93
RIBBON_ART_TOOL_HOVER_BACKGROUND_TOP_GRADIENT_COLOUR = 94
RIBBON_ART_TOOL_HOVER_BACKGROUND_COLOUR = 95
RIBBON_ART_TOOL_HOVER_BACKGROUND_GRADIENT_COLOUR = 96
RIBBON_ART_TOOL_ACTIVE_BACKGROUND_TOP_COLOUR = 97
RIBBON_ART_TOOL_ACTIVE_BACKGROUND_TOP_GRADIENT_COLOUR = 98
RIBBON_ART_TOOL_ACTIVE_BACKGROUND_COLOUR = 99
RIBBON_ART_TOOL_ACTIVE_BACKGROUND_GRADIENT_COLOUR = 100
# RibbonScrollButtonStyle
RIBBON_SCROLL_BTN_LEFT = 0
"""Button will scroll to the left."""
RIBBON_SCROLL_BTN_RIGHT = 1
"""Button will scroll to the right."""
RIBBON_SCROLL_BTN_UP = 2
"""Button will scroll upward."""
RIBBON_SCROLL_BTN_DOWN = 3
"""Button will scroll downward."""
RIBBON_SCROLL_BTN_DIRECTION_MASK = 3
"""A mask to extract direction from a combination of flags."""
RIBBON_SCROLL_BTN_NORMAL = 0
"""Button is not active or hovered."""
RIBBON_SCROLL_BTN_HOVERED = 4
"""Button has a cursor hovering over it."""
RIBBON_SCROLL_BTN_ACTIVE = 8
"""Button is being pressed."""
RIBBON_SCROLL_BTN_STATE_MASK = 12
"""A mask to extract state from a combination of flags."""
RIBBON_SCROLL_BTN_FOR_OTHER = 0
"""Button is not for scrolling tabs nor pages."""
RIBBON_SCROLL_BTN_FOR_TABS = 16
"""Button is for scrolling tabs."""
RIBBON_SCROLL_BTN_FOR_PAGE = 32
"""Button is for scrolling pages."""
RIBBON_SCROLL_BTN_FOR_MASK = 48
"""A mask to extract purpose from a combination of flags."""
# RibbonButtonKind
RIBBON_BUTTON_NORMAL = 1 << 0
"""Normal button or tool with a clickable area which causes some generic action."""
RIBBON_BUTTON_DROPDOWN = 1 << 1
"""Dropdown button or tool with a clickable area which typically causes a dropdown menu."""
RIBBON_BUTTON_HYBRID = RIBBON_BUTTON_NORMAL | RIBBON_BUTTON_DROPDOWN
"""Button or tool with two clickable areas - one which causes a dropdown menu, and one which causes a generic action."""
RIBBON_BUTTON_TOGGLE = 1 << 2
"""Normal button or tool with a clickable area which toggles the button between a pressed and unpressed state."""
# RibbonButtonBarButtonState
RIBBON_BUTTONBAR_BUTTON_SMALL = 0 << 0
RIBBON_BUTTONBAR_BUTTON_MEDIUM = 1 << 0
RIBBON_BUTTONBAR_BUTTON_LARGE = 2 << 0
RIBBON_BUTTONBAR_BUTTON_SIZE_MASK = 3 << 0
RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED = 1 << 3
RIBBON_BUTTONBAR_BUTTON_DROPDOWN_HOVERED = 1 << 4
RIBBON_BUTTONBAR_BUTTON_HOVER_MASK = RIBBON_BUTTONBAR_BUTTON_NORMAL_HOVERED | RIBBON_BUTTONBAR_BUTTON_DROPDOWN_HOVERED
RIBBON_BUTTONBAR_BUTTON_NORMAL_ACTIVE = 1 << 5
RIBBON_BUTTONBAR_BUTTON_DROPDOWN_ACTIVE = 1 << 6
RIBBON_BUTTONBAR_BUTTON_ACTIVE_MASK = RIBBON_BUTTONBAR_BUTTON_NORMAL_ACTIVE | RIBBON_BUTTONBAR_BUTTON_DROPDOWN_ACTIVE
RIBBON_BUTTONBAR_BUTTON_DISABLED = 1 << 7
RIBBON_BUTTONBAR_BUTTON_TOGGLED = 1 << 8
RIBBON_BUTTONBAR_BUTTON_STATE_MASK = 0x1F8
# RibbonGalleryButtonState
RIBBON_GALLERY_BUTTON_NORMAL = 1
RIBBON_GALLERY_BUTTON_HOVERED = 2
RIBBON_GALLERY_BUTTON_ACTIVE = 3
RIBBON_GALLERY_BUTTON_DISABLED = 4
RIBBON_BAR_SHOW_PAGE_LABELS = 1 << 0
RIBBON_BAR_SHOW_PAGE_ICONS = 1 << 1
RIBBON_BAR_FLOW_HORIZONTAL = 0
RIBBON_BAR_FLOW_VERTICAL = 1 << 2
RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS = 1 << 3
RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS = 1 << 4
RIBBON_BAR_ALWAYS_SHOW_TABS = 1 << 5
RIBBON_BAR_DEFAULT_STYLE = RIBBON_BAR_FLOW_HORIZONTAL | RIBBON_BAR_SHOW_PAGE_LABELS \
| RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS
RIBBON_BAR_FOLDBAR_STYLE = RIBBON_BAR_FLOW_VERTICAL | RIBBON_BAR_SHOW_PAGE_ICONS \
| RIBBON_BAR_SHOW_PANEL_EXT_BUTTONS \
| RIBBON_BAR_SHOW_PANEL_MINIMISE_BUTTONS
RIBBON_TOOLBAR_TOOL_FIRST = 1 << 0
RIBBON_TOOLBAR_TOOL_LAST = 1 << 1
RIBBON_TOOLBAR_TOOL_POSITION_MASK = RIBBON_TOOLBAR_TOOL_FIRST | RIBBON_TOOLBAR_TOOL_LAST
RIBBON_TOOLBAR_TOOL_NORMAL_HOVERED = 1 << 3
RIBBON_TOOLBAR_TOOL_DROPDOWN_HOVERED = 1 << 4
RIBBON_TOOLBAR_TOOL_HOVER_MASK = RIBBON_TOOLBAR_TOOL_NORMAL_HOVERED | RIBBON_TOOLBAR_TOOL_DROPDOWN_HOVERED
RIBBON_TOOLBAR_TOOL_NORMAL_ACTIVE = 1 << 5
RIBBON_TOOLBAR_TOOL_DROPDOWN_ACTIVE = 1 << 6
RIBBON_TOOLBAR_TOOL_ACTIVE_MASK = RIBBON_TOOLBAR_TOOL_NORMAL_ACTIVE | RIBBON_TOOLBAR_TOOL_DROPDOWN_ACTIVE
RIBBON_TOOLBAR_TOOL_DISABLED = 1 << 7
RIBBON_TOOLBAR_TOOL_TOGGLED = 1 << 8
RIBBON_TOOLBAR_TOOL_STATE_MASK = 0x1F8
RIBBON_PANEL_NO_AUTO_MINIMISE = 1 << 0
RIBBON_PANEL_EXT_BUTTON = 1 << 3
RIBBON_PANEL_MINIMISE_BUTTON = 1 << 4
RIBBON_PANEL_STRETCH = 1 << 5
RIBBON_PANEL_FLEXIBLE = 1 << 6
RIBBON_PANEL_DEFAULT_STYLE = 0 | PypiClean |
/Linum-0.9.12.tar.gz/Linum-0.9.12/linum/svg_renderer/calendar/views/layer_list_view.py | import tkinter
from datetime import date, timedelta
from typing import Optional
from svgwrite import Drawing
from svgwrite.shapes import Rect
from linum.layer_list import LayerList
from linum.svg_renderer.base.style import Style
from linum.svg_renderer.calendar.views.layer_view import LayerView
class LayerListView:
def __init__(self, layer_list: LayerList, start: date, length: int,
width: Optional[float] = None,
tasks_style: Optional[Style] = None):
self.start = start
self.length = length
self.layer_list = self._trim_and_clean(layer_list)
self.width = width or tkinter.Tk().winfo_screenwidth()
self.tasks_style = tasks_style or Style("default layers")
@property
def height(self):
layer_height = self.tasks_style.get("height", 100)
indent = self.tasks_style.get("indent", 0)
return len(self.layer_list.layers) * (layer_height + indent) + indent
def render(self, drawing: Drawing, x: float, y: float):
# Rendering first indent
indent = self.tasks_style.get("indent", 0)
indent_background = Rect(insert=(x, y),
size=(self.width, indent),
class_=" ".join(["layer", "indent", "background"]),
debug=False)
drawing.add(indent_background)
y_ = y + indent
for layer in self.layer_list.layers:
# Rendering layer
lv = LayerView(layer, self.start, self.length, self.width, self.tasks_style)
lv.render(drawing, x, y_)
# Rendering indent
indent_background = Rect(insert=(x, y_ + lv.height),
size=(self.width, indent),
class_=" ".join(["layer", "indent", "background"]),
debug=False)
drawing.add(indent_background)
y_ += lv.height + indent
def _trim_and_clean(self, layer_list: LayerList) -> LayerList:
_, ll = layer_list.split(self.start)
ll, _ = ll.split(self.start + timedelta(self.length))
ll.cleanup()
return ll | PypiClean |
/HOGBEN-1.2.1.tar.gz/HOGBEN-1.2.1/README.md | [](https://zenodo.org/badge/latestdoi/366323997)
# HOGBEN
**H**olistic **O**ptimization for **G**aining **B**etter **E**vidence from **N**eutrons
## About the Project
**For the original repositories that this work is based on, see [fisher-information](https://github.com/James-Durant/fisher-information) and [experimental-design](https://github.com/James-Durant/experimental-design)**.
Using the Fisher information (FI), the design of neutron reflectometry experiments can be optimised, leading to greater confidence in parameters of interest and better use of experimental time. This package contains modules and data for optimising the design of a wide range of reflectometry experiments.
Please refer to the [notebooks](https://github.com/jfkcooper/HOGBEN/tree/main/notebooks) for an introduction on how to use the code.
This repository is named after Lancelot Hogben, whose relentless opposition of eugenics (and vocal criticism of Ronald Fisher's views on it) we applaud.
### Citation
Please cite the following [article](https://arxiv.org/abs/2108.05605) if you intend on including elements of this work in your own publications:
> Durant, J. H., Wilkins, L. and Cooper, J. F. K. Optimising experimental design in neutron reflectometry. arXiv:2108.05605 (2021).
Or with BibTeX as:
```
@misc{Durant2021,
title = {Optimising experimental design in neutron reflectometry},
author = {Durant, J. H. and Wilkins, L. and Cooper, J. F. K.},
year = {2021},
eprint = {2108.05605},
archivePrefix = {arXiv},
primaryClass = {physics.data-an}
}
```
## Contact
Jos Cooper - [email protected] \
James Durant - [email protected] \
Lucas Wilkins - [email protected]
## Acknowledgements
We thank Luke Clifton for his assistance and expertise in fitting the lipid monolayer and lipid bilayer data sets.
## License
Distributed under the BSD 3-Clause License. See [license](/LICENSE) for more information.
| PypiClean |
/Digenpy-1.4.1.linux-i686.tar.gz/usr/local/lib/python2.7/dist-packages/Digenpy_/Spanish.py | import string, hashlib
class Dlink():
def __init__(self, *args):
if not len(args[0]) > 1:
self.return_=1
return
self.sm=args[0][1].split(':')
self.dicts=[]
self.S2=self.hexa_minus_one(self.sm[-1][1])
self.S1=""
if self.sm[-1][1] is 0: self.S1=self.hexa_minus_one(self.sm[-1][0])
if self.S1 is -1: self.S2, self.S1 = [0,0]
self.static="%s%s%s%s" %(self.sm[4], self.sm[1], self.sm[2],self.sm[3])
@property
def dictionary(self):
if hasattr(self, 'return_'):
return "Usage: Dlink Bssid "
for self.F in string.hexdigits.replace('abcdef',''):
for self.I in string.hexdigits.replace('abcdef',''):
self.dicts.append("%s%s%s%s%s%s%s%s"
%(self.sm[5], self.sm[0], self.static, self.sm[5], self.static, self.sm[1], self.F, self.I))
self.dicts.append("%s%s%s%s%s%s%s%s%s%s%s" %(
self.S1, self.S2, self.sm[0],self.static,
self.S1, self.S2, self.sm[5],self.static,self.sm[0], self.F, self.I))
self.dicts.append("%s%s%s%s%s%s%s%s%s"
%(self.S1, self.S2, self.sm[0], self.static, self.sm[5], self.static, self.sm[1], self.F, self.I))
return self.dicts
def hexa_minus_one(self, hexa):
if hex(int(hexa, 16) - 1).split('x')[0] == "-": return -1
return hex(int(hexa, 16) - 1).split('x')[1]
class Tele2():
def __init__(self, *args):
if not len(args[0]) > 1:
self.return_=1
return
self.sm=args[0][1].split(':')
self.year=args[0][1]
self.dicts=[]
self.fixed="IX1V" + args[0][2]
@property
def dictionary(self):
if hasattr(self, 'return_'):
return "Usage: Tele2 year fixed_data [''|'IV']"
[[[[[[ self.dicts.append("%s%s%s%s%s%s%s%s" %(self.fixed, self.year, a, b, c, d, e, f) ) for a in range(0,9)] for b in range(0,9)] for c in range(0,9)] for d in range(0,9)] for e in range(0,9)] for f in range(0,9)]
return self.dicts
class Jazztel():
def __init__(self, *args):
if not len(args[0]) > 1:
self.dictionary="Usage: Jazztel Bssid Essid [WPA]"
return
self.dictionary=JazztelAndTelefonica(args[0][1], args[0][2], { '00:1A:2B' : ['Comtrend', 'E001D20'] } ).dictionary
class Telefonica():
def __init__(self, *args):
if not len(args[0]) > 1:
self.dictionary="Usage: Telefonica Bssid Essid"
return
self.dictionary=JazztelAndTelefonica(args[0][1], args[0][2], {
"00:60:B3": [ "Z-com", "Z001349"],
"00:01:38": [ "Xavi 7768r", "X000138"] ,
"00:03:C9": [ "Comtrend 535", "C0030DA"],
"00:A0:C5": [ "Zyxel 650HW/660HW", "Z001349"],
"00:16:38": [ "Comtrend_536+", "C0030DA"],
"00:1A:2B": [ "Comtrend 5361", "C001D20"],
"00:02:CF": [ "P-660HW-D1", "Z0002CF" ],
"00:23:F8": [ "P-660HW-D1", "Z0023F8"],
"40:4A:03": [ "Zyxel p660", "Z404A03"],
"50:67:F0": [ "Zyxel", "Z5067F0"],
"E0:91:53" : [ "Xavi 7968-Solos 4610RD/461x", "E0:91:53" ],
"00:1F:9F" : ["Thomson 2009", "T5YF69A"],
"00:18:03": [ "Huawei echolife hg520v (ftth)", "H4A60BA", "H538FBF"],
"00:13:49": [ "P-660HW-D1", "Z001349", "Z0002CF" ] }).dictionary
class TelefonicaWPA():
def __init__(self, *args):
if not len(args[0]) > 1:
self.dictionary="Usage: TelefonicaWPA Bssid Essid"
return
self.dictionary=JazztelAndTelefonicaWPA(*args).dictionary
class JazztelWPA():
def __init__(self, *args):
if not len(args[0]) > 1:
self.dictionary="Usage: JazztelWPA Bssid Essid"
return
self.dictionary=JazztelAndTelefonicaWPA(*args).dictionary
class JazztelAndTelefonicaWPA():
def __init__(self, *args):
args = args[0]
if not len(args) > 1:
self.return_=1
return
self.mac=args[1].replace(':','')
self.essid=args[2].split("_")[1]
self.static="bcgbghgg"
@property
def dictionary(self):
if hasattr(self, 'return_'):
return
return [hashlib.md5(self.static + self.mac[:-4] + self.essid + self.mac).hexdigest()[:-12], ]
class JazztelAndTelefonica():
def __init__(self, *args):
if not len(args[0]) > 1:
self.return_=1
return
self.sm=args[0].split(':')
self.essid=args[1]
self.dicts=[]
self.valid_routers=args[2]
@property
def dictionary(self):
if hasattr(self, 'return_'):
return
a=string.hexdigits.replace('abcdef','')
try:
for i in range(1, len(self.valid_routers[':'.join(self.sm[:3])])):
[[[[ self.dicts.append( "%s%s%s%s%s%s%s"
%(self.valid_routers[':'.join(self.sm[:3])][i], \
m, j, k, l, self.essid[-2], self.essid[-1])) for m in a]
for j in a ] for k in a ] for l in a]
except KeyError:
raise Exception('Error: Router not valid for a jazztel or telefonica network')
return self.dicts | PypiClean |
/FaSh-0.0.4.tar.gz/FaSh-0.0.4/README.md | # iSpace 应用标准化开发框架法式(FaSh) v0.0.3
## 简介
iSpace 应用标准化开发框架法式(Fash)有助于快速构建 iSpace 应用。通过遵守一定的文件组织方式和通信格式,FaSh 能够帮助开发者监视素材文件夹、初始化数据库、以多进程方式运行 iSpace 组件,并以 Redis 为中介实现进程间通信。此外,通过 Node-Red 还可以进一步拓展不同类型的组件,实现多种传感器、反应器、学习器、数据库的接入。
FaSh 库的数据存储依赖 SQlite,SQlite 是一个软件库,实现了自给自足的、无服务器的、零配置的、事务性的 SQL 数据库引擎, 简而言之能够在本地建立数据库,其数据结构近似于 Excel。
FaSh 库中多个 iSpace 组件之间使用 Redis 数据库进行数据交换,redis 是一个开源的内存型 Key-Value数据库,简而言之就是在内存中新建了一个高性能字典型数据库,每条数据都由一个 key 和一个 value 组成,查询 key 即可获得 value。
## 环境
### 安装 FaSh 库
```python
pip install FaSh
```
### 安装并启动 redis
#### redis 下载
至 [redis 官方页面](https://redis.io/)下载 redis 安装包,或使用官方命令进行安装。
windows 环境请至 tporadowski 大佬的[Redis for Windows 项目](https://github.com/tporadowski/redis/releases)下载Windows版本,目前支持到5.0.10版本。
#### redis 安装与启动
安装过程中请勾选 "Add the Redis installation folder to PATH environment variable.";
请勿修改默认端口号(6379),并勾选 "Add an exception to the Windows Firewall";
其他按照默认选项安装即可。
安装后打开 redis 所在位置,运行 redis-server.exe,窗口可能闪退。
redis 已经运行,地址为127.0.0.1: 6379。
### 安装并启动 Node-Red
施工中……
### 调试工具
#### redis 调试工具
[Another Redis Desktop Manager](https://github.com/qishibo/AnotherRedisDesktopManager/releases) 是一款开源免费的图形化 redis 调试工具,安装后将自动连接默认数据库,也可指定连接自定数据库。
#### SQlite调试工具
[SQliteSpy](https://www.yunqa.de/delphi/apps/sqlitespy/index) 是一款免费的轻量级图形化SQlite调试工具,可用于打开 SQlite 生成的数据库文件。
## 基础
### 总体架构&程序框架
Fash 库规定了一个 iSpace 应用四个主要方面:程序框架、文件组织方式、数据库格式和通信格式,其中,程序框架是理解 FaSh 库运行的核心。
FaSh 认为,一个 iSpace 应用一般逻辑为:

为实现一般逻辑,Fash 规定一个 iSpace 应用包括六个独立的组件:msghub、sensor、performer、selector、corrector、learner。其示意图如下:

* msghub 是一个 redis 数据库实例,sensor、performer、selector、corrector、learner 等五个组件分别独立地向其中更新或请求信息,并且根据获取到的信息进行执行内部代码。
* sensor 是传感器组件,不断向 msghub 更新传感数据。
* performer 是反应器组件,不断从 msghub 中拉取反应信息,并上报反应器是否在运行
* selector 是选择器组件,不断从 msghub 拉取传感数据,以及对应的反应器是否在运行,若符合条件,则读取素材和参数数据库,向 msghub 更新反应信息
* corrector 是更新器组件,不断从 msghub 拉取更新数据,并对涉及的数据库进行更新
* learner 是学习器组件,不断从 msghub 拉取传感数据、反应数据、反应器运行状态,并将其打包封存,按照一定条件运行学习模型,并上报更新数据
Fash 库并未规定组件间的对应关系,也就是说,一个 sensor 上传的数据可供多个 performer 使用,多个 performer 及其 sensor 的数据又可供一个 learner 学习,任意个 learner 对参数的更新均可通过一个corrector 进行更新,等等。组件更新和查询信息时所使用的 key 在运行组件程序前就设置好,所以为了准确调用信息,Fash 库要求每个运行中的 sensor、performer、learner 具有唯一的名称。
一般而言,一个 selector 只为一个 performer 服务。
### 文件组织方式
目前 Fash 仅规定了 performer 程序的文件组织方式。performer 文件夹的名称应当与该 performer 的名字一致(FaSh 会自动获取文件夹名称作为 performer 程序的名称),必须包含 rmlib(rated-materials-lib)和 database 两个文件夹。除此以外,对其他文件夹或文件并无规定。脚本应当放在根目录下,应当为如下结构:

rmlib 文件夹用于存放各种需要评分的素材,请将**同一用途**的素材文件(也就是需要从中进行比较和选择的一组文件)放在同一个文件夹内。请勿新建二级子文件夹,无法对其下的素材建库。
database 文件夹用于存放数据库文件,名字为 performer_name.db
### 数据库格式
在每个 performer 的 database 文件夹下都存在一个 performer_name.db 文件夹,包含若干张表单,共有两种表单:
* parameters 表单:用于记录 performer 运行时需要选择的参数信息,包含 key(用于储存参数名称)、value(用于储存选择依据)、path(用于储存所属程序),checked(用于数据库自检)四个字段;
* 其他表单:记录着 rmlib 文件夹下某个文件夹中所有文件的信息,包含 key(用于文件名称)、value(用于储存选择依据)、path(用于储存文件路径),checked(用于数据库自检)四个字段;
key、path 字段下储存的是字符串,value 字段下储存的是以字符串形式保留的字典,checked 只储存 0 或 1
value 字段下储存采用字典格式可以同时容纳多个参数或者多个判断依据。

### 通信协议
在 FaSh 框架中,各 iSpace 组件都必须与 Msghub 通信,向其中写入或者拉取数据,所使用数据都遵循一定的格式。
#### redis 库中的 key-value 规定
redis 数据库中包括以下四种 key-value 对:
* sensor_name : sensing_json -- 由传感器上报,告知当前读数
* performer_name : perform_json -- 由选择器上报,告知反应器反应中运用的素材和参数
* performer_name_active :0/1 -- 由反应器上报,告知当前反应器是否运行,value 只能为 0 或 1,0 表示反应停止
* corrector : correct_json -- 由学习器上报,告知更新器如何更新数据库
#### json 格式规定
redis 数据库种储存的值大多为 json 字符串,其格式为:
```python
# 传感器上报格式
sensing_json = {
'index1_name' : value1,
'index2_name' : value2,
'index3_name' : value3}
# 选择器上报的格式
perform_json = {
'materials' : {
'material_name' : 'path1',
'material_name' : 'path2',
'materia3_name' : 'path3'},
'parameters' : {
'parameterl_name' : value1,
'parameterl_name' : value2,
'parameterl_name' : value3}}
# 学习器上报的格式
correct_json = {
'db' : 'db_path',
'table1' : '[(key1,value1),(key2,value2),(key3,value3)]',
'table2' : '[(key1,value1),(key2,value2),(key3,value3)]',
'table3' : '[(key1,value1),(key2,value2),(key3,value3)]'}
```
上报前请先将字典文件转化为 json 字符串:
```python
# 声明 json 库
import json
# 示例数据字典
sensing_data = {
'index1_name' : value1,
'index2_name' : value2,
'index3_name' : value3}
# 转化为 json 字符串
sensing_json = json.dumps(sensing_data)
```
## 组件类及其程序逻辑
### Rmlib类
Rmlib 类可实现对 rmlib 文件夹下每个素材文件夹的实时监控(目前仅有监控功能)。
#### 实例化
无需传入任何参数,只要文件组织结构正确,脚本位于根目录下,即可建立对象。
#### 方法
使用 start 方法即可开启监听进程。
#### 示例
```python
# 引用声明
import FaSh as fs
# 实例化
performer_rmlib = fs.Rmlib()
# 开启监听
performer_rmlib.start()
```
### Db类
Db 类可实现对 performer_name.db 的初始化。如果 performer_name.db 不存在则创建,若存在则进行检查(目前仅检查)。
#### 实例化
无需传入任何参数,只要文件组织结构正确,脚本位于根目录下,即可建立对象。但若不传入 parameter_dic 参数,就不会创建 “parameters” 表单
parameter_dic是一个字典,其数据结构为:
```python
{'index_name1':'value_generate_func1','index_name2':'value_generate_func2,'index_name3':'value_generate_func3'}
```
#### 方法
使用 init 方法,如果 performer_name.db 不存在则创建,若存在则进行检查。
#### 示例
```python
# 引用声明
import FaSh as fs
# 实例化
performer_db = db()
# 数据库初始化
performer_db.init()
```
### Sensor类
Sensor 类可实现传感器信息的上报。建立 sensor 类至少需要提供传感器名称、初始化函数、主函数三个参数。
Sensor 的运行逻辑为:
* 运行初始化函数一次
* 循环运行主函数,上报获取的内容
主函数应当返回一个符合通信协议的 sensing_json 字符串
```python
sensing_json = {
'index1_name' : value1,
'index2_name' : value2,
'index3_name' : value3}
```
#### 实例化
传入传感器名称、初始化函数、主函数进行实例化,推荐使用关键字传参:
```python
# 引用声明
import FaSh as fs
# 实例化
sensor = fs.Sensor(
name="sensor_name", #传感器名称
setup_func=setup_func, #初始化函数,一段代码,传感器程序运行时仅运行一次
main_func=main_func, #主函数,一段代码,传感器程序运行时循环运行
)
```
#### 方法
使用 start 方法就可以运行 sensor。
#### 示例
```python
# 引用声明
import json
import FaSh as fs
# 定义初始化函数
def setup_func():
global value1, value2, value3 #如果想在初始化函数和主函数之间传参,请做全局变量声明
value1 = 'test_value1'
value2 = 'test_value2'
value3 = 'test_value3'
# 定义主函数
def main_func():
global value1, value2, value3 #如果想在初始化函数和主函数之间传参,请做全局变量声明
return json.dumps{'index1':value1,'index2':value2,'index3':value3} #主函数必须返回符合 sensing_json 格式的 json 字符串
# 实例化
sensor = fs.Sensor(
name="sensor_name", #传感器名称
setup_func=setup_func, #初始化函数,一段代码,传感器程序运行时仅运行一次
main_func=main_func, #主函数,一段代码,传感器程序运行时循环运行
)
# 打开传感器
sensor.start()
```
### Performer类
Performer 类可根据获取到的反应信息驱动反应。建立 sensor 类至少需要提供反应器名称、初始化函数、主函数三个参数。
Sensor 的运行逻辑为:
* 读取 Msghub 中的反应信息
* 上报 performer_name_active, 1
* 运行初始化函数一次
* 运行主函数一次,当主函数结束,上报 performer_name_active, 0
* 主函数运行的过程中,会不断读取 Msghub 中的反应信息,如果反应信息更新,进程会停止运行中的主程序,上报 performer_name_active, 0,并且以新的反应信息重新运行主函数
* 重新运行主函数会重复第 2-4 步
#### 实例化
传入传反应器名称、初始化函数、主函数进行实例化,推荐使用关键字传参:
```python
# 引用声明
import FaSh as fs
# 实例化
performer = fs.Performer(
name="performer_name", #传感器名称
setup_func=setup_func, #初始化函数,一段代码,传感器程序运行时仅运行一次
main_func=main_func, #主函数,一段代码,传感器程序运行时循环运行
)
```
如果 Performer 依赖某些需要评价的参数,可以传入 parameter_dic,即可建立完整的数据库:
```python
# 引用声明
import random
import Fash as fs
#建立包含三个 parameter 的 parameter_dic,数值初始化方法均为 random.randint(0,1)
parameter_dic = {'index1': random.randint(0,1), 'index2': random.randint(0,1), 'index3': random.randint(0,1)}
# 实例化
performer = fs.Performer(
name="performer_name", #传感器名称
setup_func=setup_func, #初始化函数,一段代码,传感器程序运行时仅运行一次
main_func=main_func, #主函数,一段代码,传感器程序运行时循环运行
parameter_dic=parameter_dic
)
# 使用实例的 parameter_dic 属性即可得到 parameter_dic,此时会根据 parameter_dic 创建表格
performer_db = Db(parameter_dic=performer.parameter_dic)
performer_db.init()
```
#### 方法
使用 start 方法就可以运行 performer。
#### 示例
```python
# 引用声明
import pyaudio
import wave
import FaSh as fs
# 定义初始化函数
def MusicPlayer_setup_func():
global waveopen
waveopen = pyaudio.PyAudio()
# 定义主函数
def MusicPlayer_main_func(dic):
global waveopen
wavefile = wave.open(dic['materials']['music'], 'rb')
stream = waveopen.open(
format=waveopen.get_format_from_width(wavefile.getsampwidth()),
channels=wavefile.getnchannels(),
rate=wavefile.getframerate(),
output=True)
while True:
data = wavefile.readframes(10000)
if data == '':
break
stream.write(data)
stream.stop_stream()
stream.close()
waveopen.terminate()
# 创建 Performer 类
MusicPlayer = fs.Performer(
name="MusicPlayer", #反应器名称
setup_func=setup_func, #初始化函数,一段代码,反应器程序运行时仅运行一次
main_func=main_func, #主函数,一段代码,反应器程序运行时运行一次,但会循环激活
)
MusicPlayer.start()
```
### Selector类
Selector 类会不断从 Msghub 中拉取传感器数据和反应器工作状态数据。建立 selector 类至少需要提供传感器名称,评价函数,反应器名称,选择函数以建立该类。
Sensor 的运行逻辑为:
* 从 Msghub 中读取所有传感器信息,创建感应字典
* 读取反应器活动状态
* 若反应器在活动,且将传感器信息传入评价函数(用于判断是否需要改变反应),结果为 True,则重新开始循环
* 否则,读取 performer_name.db,将所有表单转化为 dataframe,储存在字典中,并传入选择函数
* 上报反应信息,重新开始循环
选择函数返回值应当是一个满足 perform_json 格式要求的 json 字符串:
```python
perform_json = {
'materials' : {
'material_name' : 'path1',
'material_name' : 'path2',
'materia3_name' : 'path3'},
'parameters' : {
'parameterl_name' : value1,
'parameterl_name' : value2,
'parameterl_name' : value3}}
```
#### 实例化
传入传反应器名称、,评价函数,反应器名称,选择函数进行实例化,推荐使用关键字传参:
```python
# 引用声明
import FaSh as fs
# 实例化
performer_selector = fs.Selector(
name="performer_name", #反应器名称
judge_func=judge_func, #判断函数,传入传感器读数,返回 True 或 False
select_func=select_func #选择函数,传入 dataframe,传出 perform_json)
```
#### 方法
使用 start 方法就可以运行 selector。
#### 示例
```python
# 引用声明
import FaSh as fs
# 定义选择函数,返回值为 True 或 False
def judge_func(sensing_dic):
if sensing_dic['sensor1']['index1'] != 0 and sensing_dic['sensor2']['index2'] != 0:
result = True
else:
result = True
return result
# 定义选择函数,返回对象必须符合 perform_json 要求
def selector_func(df_dic):
perform_data = {}
parameters_data = {}
materials_data = {}
for key in df_dic.keys():
if key == 'parameters':
for index, row in df_dic[key].iterrows():
parameters_data[index] = row['value']-5
perform_data['parameters'] = json.dumps(parameters_data) #需要先将 json 中嵌套的 json 字典转化为 json 字符串
else:
if key == 'music':
index = df_dic[key]['value'].idxmax(axis=0)
materials_data[key] = index
perform_data['materials'] = json.dumps(materials_data) #需要先将 json 中嵌套的 json 字典转化为 json 字符串
return json.dumps(perform_data)
# 实例化
performer_selector = fs.Selector(
name="performer_name",
judge_func=judge_func,
select_func=select_func)
performer_selector.start()
```
### Corrector类
施工中……
### Learner类
施工中……
### MsgHub类
用于创建一个 redis 客户端。前提是 redis 已经运行在默认地址。当 Fash 库默认模块无法满足使用需求时,可用于自由构建各类程序。
#### 实例化
无需传入任何参数就可以实例化
#### 方法
使用 .read(key) 方法可以读取 redis 数据库中 key 对应的 value
使用 .write(key,value) 方法可以在数据库中设定键 key 的值为 value
#### 示例
```python
# 引用声明
import FaSh as fs
# 实例化
msg_hub = fs.MsgHub()
# 读写操作
msg_hub.write('test_client','test_msg')
msg_hub.read('test_client') #应当返回 test_msg
```
## 功能函数的写法
### 将函数传入类
唯一要注意的事情是,若要将函数作为对象传入类,不要加上最后的括号。
```python
# 创建一个参数
def test_func():
return 666
# 正确示例
obj = ClassName(
func=test_func)
# 错误示例
obj = ClassName(
func=test_func())
```
### 函数间传参
若想在同一个程序的不同函数之间传参,请使用全局变量声明
```python
# 引用声明
import random
# 创建两个参数,传参
def number_generator():
global a
a = random.random()
def a_printer():
global a
print(a)
```
| PypiClean |
/dirtrav-1.0.0.tar.gz/dirtrav-1.0.0/src/flask/app.py | import functools
import inspect
import json
import logging
import os
import sys
import typing as t
import weakref
from collections.abc import Iterator as _abc_Iterator
from datetime import timedelta
from itertools import chain
from threading import Lock
from types import TracebackType
import click
from werkzeug.datastructures import Headers
from werkzeug.datastructures import ImmutableDict
from werkzeug.exceptions import Aborter
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import BadRequestKeyError
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import InternalServerError
from werkzeug.routing import BuildError
from werkzeug.routing import Map
from werkzeug.routing import MapAdapter
from werkzeug.routing import RequestRedirect
from werkzeug.routing import RoutingException
from werkzeug.routing import Rule
from werkzeug.serving import is_running_from_reloader
from werkzeug.urls import url_quote
from werkzeug.utils import redirect as _wz_redirect
from werkzeug.wrappers import Response as BaseResponse
from . import cli
from . import typing as ft
from .config import Config
from .config import ConfigAttribute
from .ctx import _AppCtxGlobals
from .ctx import AppContext
from .ctx import RequestContext
from .globals import _cv_app
from .globals import _cv_request
from .globals import g
from .globals import request
from .globals import request_ctx
from .globals import session
from .helpers import _split_blueprint_path
from .helpers import get_debug_flag
from .helpers import get_flashed_messages
from .helpers import get_load_dotenv
from .helpers import locked_cached_property
from .json.provider import DefaultJSONProvider
from .json.provider import JSONProvider
from .logging import create_logger
from .scaffold import _endpoint_from_view_func
from .scaffold import _sentinel
from .scaffold import find_package
from .scaffold import Scaffold
from .scaffold import setupmethod
from .sessions import SecureCookieSessionInterface
from .sessions import SessionInterface
from .signals import appcontext_tearing_down
from .signals import got_request_exception
from .signals import request_finished
from .signals import request_started
from .signals import request_tearing_down
from .templating import DispatchingJinjaLoader
from .templating import Environment
from .wrappers import Request
from .wrappers import Response
if t.TYPE_CHECKING: # pragma: no cover
import typing_extensions as te
from .blueprints import Blueprint
from .testing import FlaskClient
from .testing import FlaskCliRunner
T_before_first_request = t.TypeVar(
"T_before_first_request", bound=ft.BeforeFirstRequestCallable
)
T_shell_context_processor = t.TypeVar(
"T_shell_context_processor", bound=ft.ShellContextProcessorCallable
)
T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
if sys.version_info >= (3, 8):
iscoroutinefunction = inspect.iscoroutinefunction
else:
def iscoroutinefunction(func: t.Any) -> bool:
while inspect.ismethod(func):
func = func.__func__
while isinstance(func, functools.partial):
func = func.func
return inspect.iscoroutinefunction(func)
def _make_timedelta(value: t.Union[timedelta, int, None]) -> t.Optional[timedelta]:
if value is None or isinstance(value, timedelta):
return value
return timedelta(seconds=value)
class Flask(Scaffold):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
.. versionadded:: 0.11
The `root_path` parameter was added.
.. versionadded:: 1.0
The ``host_matching`` and ``static_host`` parameters were added.
.. versionadded:: 1.0
The ``subdomain_matching`` parameter was added. Subdomain
matching needs to be enabled manually now. Setting
:data:`SERVER_NAME` does not implicitly enable it.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: The folder with static files that is served at
``static_url_path``. Relative to the application ``root_path``
or an absolute path. Defaults to ``'static'``.
:param static_host: the host to use when adding the static route.
Defaults to None. Required when using ``host_matching=True``
with a ``static_folder`` configured.
:param host_matching: set ``url_map.host_matching`` attribute.
Defaults to False.
:param subdomain_matching: consider the subdomain relative to
:data:`SERVER_NAME` when matching routes. Defaults to False.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
:param root_path: The path to the root of the application files.
This should only be set manually when it can't be detected
automatically, such as for namespace packages.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class of the object assigned to :attr:`aborter`, created by
#: :meth:`create_aborter`. That object is called by
#: :func:`flask.abort` to raise HTTP errors, and can be
#: called directly as well.
#:
#: Defaults to :class:`werkzeug.exceptions.Aborter`.
#:
#: .. versionadded:: 2.2
aborter_class = Aborter
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is now application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate test helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute("TESTING")
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute("SECRET_KEY")
@property
def session_cookie_name(self) -> str:
"""The name of the cookie set by the session interface.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Use ``app.config["SESSION_COOKIE_NAME"]``
instead.
"""
import warnings
warnings.warn(
"'session_cookie_name' is deprecated and will be removed in Flask 2.3. Use"
" 'SESSION_COOKIE_NAME' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.config["SESSION_COOKIE_NAME"]
@session_cookie_name.setter
def session_cookie_name(self, value: str) -> None:
import warnings
warnings.warn(
"'session_cookie_name' is deprecated and will be removed in Flask 2.3. Use"
" 'SESSION_COOKIE_NAME' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
self.config["SESSION_COOKIE_NAME"] = value
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute(
"PERMANENT_SESSION_LIFETIME", get_converter=_make_timedelta
)
@property
def send_file_max_age_default(self) -> t.Optional[timedelta]:
"""The default value for ``max_age`` for :func:`~flask.send_file`. The default
is ``None``, which tells the browser to use conditional requests instead of a
timed cache.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Use
``app.config["SEND_FILE_MAX_AGE_DEFAULT"]`` instead.
.. versionchanged:: 2.0
Defaults to ``None`` instead of 12 hours.
"""
import warnings
warnings.warn(
"'send_file_max_age_default' is deprecated and will be removed in Flask"
" 2.3. Use 'SEND_FILE_MAX_AGE_DEFAULT' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
return _make_timedelta(self.config["SEND_FILE_MAX_AGE_DEFAULT"])
@send_file_max_age_default.setter
def send_file_max_age_default(self, value: t.Union[int, timedelta, None]) -> None:
import warnings
warnings.warn(
"'send_file_max_age_default' is deprecated and will be removed in Flask"
" 2.3. Use 'SEND_FILE_MAX_AGE_DEFAULT' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
self.config["SEND_FILE_MAX_AGE_DEFAULT"] = _make_timedelta(value)
@property
def use_x_sendfile(self) -> bool:
"""Enable this to use the ``X-Sendfile`` feature, assuming the server supports
it, from :func:`~flask.send_file`.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Use ``app.config["USE_X_SENDFILE"]`` instead.
"""
import warnings
warnings.warn(
"'use_x_sendfile' is deprecated and will be removed in Flask 2.3. Use"
" 'USE_X_SENDFILE' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.config["USE_X_SENDFILE"]
@use_x_sendfile.setter
def use_x_sendfile(self, value: bool) -> None:
import warnings
warnings.warn(
"'use_x_sendfile' is deprecated and will be removed in Flask 2.3. Use"
" 'USE_X_SENDFILE' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
self.config["USE_X_SENDFILE"] = value
_json_encoder: t.Union[t.Type[json.JSONEncoder], None] = None
_json_decoder: t.Union[t.Type[json.JSONDecoder], None] = None
@property # type: ignore[override]
def json_encoder(self) -> t.Type[json.JSONEncoder]: # type: ignore[override]
"""The JSON encoder class to use. Defaults to
:class:`~flask.json.JSONEncoder`.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Customize
:attr:`json_provider_class` instead.
.. versionadded:: 0.10
"""
import warnings
warnings.warn(
"'app.json_encoder' is deprecated and will be removed in Flask 2.3."
" Customize 'app.json_provider_class' or 'app.json' instead.",
DeprecationWarning,
stacklevel=2,
)
if self._json_encoder is None:
from . import json
return json.JSONEncoder
return self._json_encoder
@json_encoder.setter
def json_encoder(self, value: t.Type[json.JSONEncoder]) -> None:
import warnings
warnings.warn(
"'app.json_encoder' is deprecated and will be removed in Flask 2.3."
" Customize 'app.json_provider_class' or 'app.json' instead.",
DeprecationWarning,
stacklevel=2,
)
self._json_encoder = value
@property # type: ignore[override]
def json_decoder(self) -> t.Type[json.JSONDecoder]: # type: ignore[override]
"""The JSON decoder class to use. Defaults to
:class:`~flask.json.JSONDecoder`.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Customize
:attr:`json_provider_class` instead.
.. versionadded:: 0.10
"""
import warnings
warnings.warn(
"'app.json_decoder' is deprecated and will be removed in Flask 2.3."
" Customize 'app.json_provider_class' or 'app.json' instead.",
DeprecationWarning,
stacklevel=2,
)
if self._json_decoder is None:
from . import json
return json.JSONDecoder
return self._json_decoder
@json_decoder.setter
def json_decoder(self, value: t.Type[json.JSONDecoder]) -> None:
import warnings
warnings.warn(
"'app.json_decoder' is deprecated and will be removed in Flask 2.3."
" Customize 'app.json_provider_class' or 'app.json' instead.",
DeprecationWarning,
stacklevel=2,
)
self._json_decoder = value
json_provider_class: t.Type[JSONProvider] = DefaultJSONProvider
"""A subclass of :class:`~flask.json.provider.JSONProvider`. An
instance is created and assigned to :attr:`app.json` when creating
the app.
The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses
Python's built-in :mod:`json` library. A different provider can use
a different JSON library.
.. versionadded:: 2.2
"""
#: Options that are passed to the Jinja environment in
#: :meth:`create_jinja_environment`. Changing these options after
#: the environment is created (accessing :attr:`jinja_env`) will
#: have no effect.
#:
#: .. versionchanged:: 1.1.0
#: This is a ``dict`` instead of an ``ImmutableDict`` to allow
#: easier configuration.
#:
jinja_options: dict = {}
#: Default configuration parameters.
default_config = ImmutableDict(
{
"ENV": None,
"DEBUG": None,
"TESTING": False,
"PROPAGATE_EXCEPTIONS": None,
"SECRET_KEY": None,
"PERMANENT_SESSION_LIFETIME": timedelta(days=31),
"USE_X_SENDFILE": False,
"SERVER_NAME": None,
"APPLICATION_ROOT": "/",
"SESSION_COOKIE_NAME": "session",
"SESSION_COOKIE_DOMAIN": None,
"SESSION_COOKIE_PATH": None,
"SESSION_COOKIE_HTTPONLY": True,
"SESSION_COOKIE_SECURE": False,
"SESSION_COOKIE_SAMESITE": None,
"SESSION_REFRESH_EACH_REQUEST": True,
"MAX_CONTENT_LENGTH": None,
"SEND_FILE_MAX_AGE_DEFAULT": None,
"TRAP_BAD_REQUEST_ERRORS": None,
"TRAP_HTTP_EXCEPTIONS": False,
"EXPLAIN_TEMPLATE_LOADING": False,
"PREFERRED_URL_SCHEME": "http",
"JSON_AS_ASCII": None,
"JSON_SORT_KEYS": None,
"JSONIFY_PRETTYPRINT_REGULAR": None,
"JSONIFY_MIMETYPE": None,
"TEMPLATES_AUTO_RELOAD": None,
"MAX_COOKIE_SIZE": 4093,
}
)
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: The map object to use for storing the URL rules and routing
#: configuration parameters. Defaults to :class:`werkzeug.routing.Map`.
#:
#: .. versionadded:: 1.1.0
url_map_class = Map
#: The :meth:`test_client` method creates an instance of this test
#: client class. Defaults to :class:`~flask.testing.FlaskClient`.
#:
#: .. versionadded:: 0.7
test_client_class: t.Optional[t.Type["FlaskClient"]] = None
#: The :class:`~click.testing.CliRunner` subclass, by default
#: :class:`~flask.testing.FlaskCliRunner` that is used by
#: :meth:`test_cli_runner`. Its ``__init__`` method should take a
#: Flask app object as the first argument.
#:
#: .. versionadded:: 1.0
test_cli_runner_class: t.Optional[t.Type["FlaskCliRunner"]] = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface: SessionInterface = SecureCookieSessionInterface()
def __init__(
self,
import_name: str,
static_url_path: t.Optional[str] = None,
static_folder: t.Optional[t.Union[str, os.PathLike]] = "static",
static_host: t.Optional[str] = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: t.Optional[str] = "templates",
instance_path: t.Optional[str] = None,
instance_relative_config: bool = False,
root_path: t.Optional[str] = None,
):
super().__init__(
import_name=import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
root_path=root_path,
)
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
"If an instance path is provided it must be absolute."
" A relative path was given instead."
)
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
#: An instance of :attr:`aborter_class` created by
#: :meth:`make_aborter`. This is called by :func:`flask.abort`
#: to raise HTTP errors, and can be called directly as well.
#:
#: .. versionadded:: 2.2
#: Moved from ``flask.abort``, which calls this object.
self.aborter = self.make_aborter()
self.json: JSONProvider = self.json_provider_class(self)
"""Provides access to JSON methods. Functions in ``flask.json``
will call methods on this provider when the application context
is active. Used for handling JSON requests and responses.
An instance of :attr:`json_provider_class`. Can be customized by
changing that attribute on a subclass, or by assigning to this
attribute afterwards.
The default, :class:`~flask.json.provider.DefaultJSONProvider`,
uses Python's built-in :mod:`json` library. A different provider
can use a different JSON library.
.. versionadded:: 2.2
"""
#: A list of functions that are called by
#: :meth:`handle_url_build_error` when :meth:`.url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function is called
#: with ``error``, ``endpoint`` and ``values``. If a function
#: returns ``None`` or raises a ``BuildError``, it is skipped.
#: Otherwise, its return value is returned by ``url_for``.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers: t.List[
t.Callable[[Exception, str, t.Dict[str, t.Any]], str]
] = []
#: A list of functions that will be called at the beginning of the
#: first request to this instance. To register a function, use the
#: :meth:`before_first_request` decorator.
#:
#: .. deprecated:: 2.2
#: Will be removed in Flask 2.3. Run setup code when
#: creating the application instead.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs: t.List[ft.BeforeFirstRequestCallable] = []
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs: t.List[ft.TeardownCallable] = []
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors: t.List[ft.ShellContextProcessorCallable] = []
#: Maps registered blueprint names to blueprint objects. The
#: dict retains the order the blueprints were registered in.
#: Blueprints can be registered multiple times, this dict does
#: not track how often they were attached.
#:
#: .. versionadded:: 0.7
self.blueprints: t.Dict[str, "Blueprint"] = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things.
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions: dict = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = self.url_map_class()
self.url_map.host_matching = host_matching
self.subdomain_matching = subdomain_matching
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# Add a static route using the provided static_url_path, static_host,
# and static_folder if there is a configured static_folder.
# Note we do this without checking if static_folder exists.
# For one, it might be created while the server is running (e.g. during
# development). Also, Google App Engine stores static files somewhere
if self.has_static_folder:
assert (
bool(static_host) == host_matching
), "Invalid static_host/host_matching combination"
# Use a weakref to avoid creating a reference cycle between the app
# and the view function (see #3761).
self_ref = weakref.ref(self)
self.add_url_rule(
f"{self.static_url_path}/<path:filename>",
endpoint="static",
host=static_host,
view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore # noqa: B950
)
# Set the name of the Click group in case someone wants to add
# the app's commands to another CLI tool.
self.cli.name = self.name
def _check_setup_finished(self, f_name: str) -> None:
if self._got_first_request:
raise AssertionError(
f"The setup method '{f_name}' can no longer be called"
" on the application. It has already handled its first"
" request, any changes will not be applied"
" consistently.\n"
"Make sure all imports, decorators, functions, etc."
" needed to set up the application are done before"
" running it."
)
@locked_cached_property
def name(self) -> str: # type: ignore
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == "__main__":
fn = getattr(sys.modules["__main__"], "__file__", None)
if fn is None:
return "__main__"
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self) -> bool:
"""Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. deprecated:: 2.2
Will be removed in Flask 2.3.
.. versionadded:: 0.7
"""
import warnings
warnings.warn(
"'propagate_exceptions' is deprecated and will be removed in Flask 2.3.",
DeprecationWarning,
stacklevel=2,
)
rv = self.config["PROPAGATE_EXCEPTIONS"]
if rv is not None:
return rv
return self.testing or self.debug
@locked_cached_property
def logger(self) -> logging.Logger:
"""A standard Python :class:`~logging.Logger` for the app, with
the same name as :attr:`name`.
In debug mode, the logger's :attr:`~logging.Logger.level` will
be set to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be
added. See :doc:`/logging` for more information.
.. versionchanged:: 1.1.0
The logger takes the same name as :attr:`name` rather than
hard-coding ``"flask.app"``.
.. versionchanged:: 1.0.0
Behavior was simplified. The logger is always named
``"flask.app"``. The level is only set during configuration,
it doesn't check ``app.debug`` each time. Only one format is
used, not different ones depending on ``app.debug``. No
handlers are removed, and a handler is only added if no
handlers are already configured.
.. versionadded:: 0.3
"""
return create_logger(self)
@locked_cached_property
def jinja_env(self) -> Environment:
"""The Jinja environment used to load templates.
The environment is created the first time this property is
accessed. Changing :attr:`jinja_options` after that will have no
effect.
"""
return self.create_jinja_environment()
@property
def got_first_request(self) -> bool:
"""This attribute is set to ``True`` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative: bool = False) -> Config:
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
defaults = dict(self.default_config)
defaults["ENV"] = os.environ.get("FLASK_ENV") or "production"
defaults["DEBUG"] = get_debug_flag()
return self.config_class(root_path, defaults)
def make_aborter(self) -> Aborter:
"""Create the object to assign to :attr:`aborter`. That object
is called by :func:`flask.abort` to raise HTTP errors, and can
be called directly as well.
By default, this creates an instance of :attr:`aborter_class`,
which defaults to :class:`werkzeug.exceptions.Aborter`.
.. versionadded:: 2.2
"""
return self.aborter_class()
def auto_find_instance_path(self) -> str:
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, "instance")
return os.path.join(prefix, "var", f"{self.name}-instance")
def open_instance_resource(self, resource: str, mode: str = "rb") -> t.IO[t.AnyStr]:
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
@property
def templates_auto_reload(self) -> bool:
"""Reload templates when they are changed. Used by
:meth:`create_jinja_environment`. It is enabled by default in debug mode.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Use ``app.config["TEMPLATES_AUTO_RELOAD"]``
instead.
.. versionadded:: 1.0
This property was added but the underlying config and behavior
already existed.
"""
import warnings
warnings.warn(
"'templates_auto_reload' is deprecated and will be removed in Flask 2.3."
" Use 'TEMPLATES_AUTO_RELOAD' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
rv = self.config["TEMPLATES_AUTO_RELOAD"]
return rv if rv is not None else self.debug
@templates_auto_reload.setter
def templates_auto_reload(self, value: bool) -> None:
import warnings
warnings.warn(
"'templates_auto_reload' is deprecated and will be removed in Flask 2.3."
" Use 'TEMPLATES_AUTO_RELOAD' in 'app.config' instead.",
DeprecationWarning,
stacklevel=2,
)
self.config["TEMPLATES_AUTO_RELOAD"] = value
def create_jinja_environment(self) -> Environment:
"""Create the Jinja environment based on :attr:`jinja_options`
and the various Jinja-related methods of the app. Changing
:attr:`jinja_options` after this will have no effect. Also adds
Flask-related globals and filters to the environment.
.. versionchanged:: 0.11
``Environment.auto_reload`` set in accordance with
``TEMPLATES_AUTO_RELOAD`` configuration option.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if "autoescape" not in options:
options["autoescape"] = self.select_jinja_autoescape
if "auto_reload" not in options:
auto_reload = self.config["TEMPLATES_AUTO_RELOAD"]
if auto_reload is None:
auto_reload = self.debug
options["auto_reload"] = auto_reload
rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=self.url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g,
)
rv.policies["json.dumps_function"] = self.json.dumps
return rv
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str) -> bool:
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith((".html", ".htm", ".xml", ".xhtml"))
def update_template_context(self, context: dict) -> None:
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
names: t.Iterable[t.Optional[str]] = (None,)
# A template may be rendered outside a request context.
if request:
names = chain(names, reversed(request.blueprints))
# The values passed to render_template take precedence. Keep a
# copy to re-apply after all context functions.
orig_ctx = context.copy()
for name in names:
if name in self.template_context_processors:
for func in self.template_context_processors[name]:
context.update(func())
context.update(orig_ctx)
def make_shell_context(self) -> dict:
"""Returns the shell context for an interactive shell for this
application. This runs all the registered shell context
processors.
.. versionadded:: 0.11
"""
rv = {"app": self, "g": g}
for processor in self.shell_context_processors:
rv.update(processor())
return rv
@property
def env(self) -> str:
"""What environment the app is running in. This maps to the :data:`ENV` config
key.
**Do not enable development when deploying in production.**
Default: ``'production'``
.. deprecated:: 2.2
Will be removed in Flask 2.3.
"""
import warnings
warnings.warn(
"'app.env' is deprecated and will be removed in Flask 2.3."
" Use 'app.debug' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.config["ENV"]
@env.setter
def env(self, value: str) -> None:
import warnings
warnings.warn(
"'app.env' is deprecated and will be removed in Flask 2.3."
" Use 'app.debug' instead.",
DeprecationWarning,
stacklevel=2,
)
self.config["ENV"] = value
@property
def debug(self) -> bool:
"""Whether debug mode is enabled. When using ``flask run`` to start the
development server, an interactive debugger will be shown for unhandled
exceptions, and the server will be reloaded when code changes. This maps to the
:data:`DEBUG` config key. It may not behave as expected if set late.
**Do not enable debug mode when deploying in production.**
Default: ``False``
"""
return self.config["DEBUG"]
@debug.setter
def debug(self, value: bool) -> None:
self.config["DEBUG"] = value
if self.config["TEMPLATES_AUTO_RELOAD"] is None:
self.jinja_env.auto_reload = value
def run(
self,
host: t.Optional[str] = None,
port: t.Optional[int] = None,
debug: t.Optional[bool] = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :doc:`/deploying/index` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
# Ignore this call so that it doesn't start another server if
# the 'flask run' command is used.
if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
if not is_running_from_reloader():
click.secho(
" * Ignoring a call to 'app.run()' that would block"
" the current 'flask' CLI command.\n"
" Only call 'app.run()' in an 'if __name__ =="
' "__main__"\' guard.',
fg="red",
)
return
if get_load_dotenv(load_dotenv):
cli.load_dotenv()
# if set, let env vars override previous values
if "FLASK_ENV" in os.environ:
print(
"'FLASK_ENV' is deprecated and will not be used in"
" Flask 2.3. Use 'FLASK_DEBUG' instead.",
file=sys.stderr,
)
self.config["ENV"] = os.environ.get("FLASK_ENV") or "production"
self.debug = get_debug_flag()
elif "FLASK_DEBUG" in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
server_name = self.config.get("SERVER_NAME")
sn_host = sn_port = None
if server_name:
sn_host, _, sn_port = server_name.partition(":")
if not host:
if sn_host:
host = sn_host
else:
host = "127.0.0.1"
if port or port == 0:
port = int(port)
elif sn_port:
port = int(sn_port)
else:
port = 5000
options.setdefault("use_reloader", self.debug)
options.setdefault("use_debugger", self.debug)
options.setdefault("threaded", True)
cli.show_server_banner(self.debug, self.name)
from werkzeug.serving import run_simple
try:
run_simple(t.cast(str, host), port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> "FlaskClient":
"""Creates a test client for this application. For information
about unit testing head over to :doc:`/testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, *args, **kwargs):
self._authentication = kwargs.pop("authentication")
super(CustomClient,self).__init__( *args, **kwargs)
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from .testing import FlaskClient as cls # type: ignore
return cls( # type: ignore
self, self.response_class, use_cookies=use_cookies, **kwargs
)
def test_cli_runner(self, **kwargs: t.Any) -> "FlaskCliRunner":
"""Create a CLI runner for testing CLI commands.
See :ref:`testing-cli`.
Returns an instance of :attr:`test_cli_runner_class`, by default
:class:`~flask.testing.FlaskCliRunner`. The Flask app object is
passed as the first argument.
.. versionadded:: 1.0
"""
cls = self.test_cli_runner_class
if cls is None:
from .testing import FlaskCliRunner as cls # type: ignore
return cls(self, **kwargs) # type: ignore
@setupmethod
def register_blueprint(self, blueprint: "Blueprint", **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on the application. Keyword
arguments passed to this method will override the defaults set on the
blueprint.
Calls the blueprint's :meth:`~flask.Blueprint.register` method after
recording the blueprint in the application's :attr:`blueprints`.
:param blueprint: The blueprint to register.
:param url_prefix: Blueprint routes will be prefixed with this.
:param subdomain: Blueprint routes will match on this subdomain.
:param url_defaults: Blueprint routes will use these default values for
view arguments.
:param options: Additional keyword arguments are passed to
:class:`~flask.blueprints.BlueprintSetupState`. They can be
accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 0.7
"""
blueprint.register(self, options)
def iter_blueprints(self) -> t.ValuesView["Blueprint"]:
"""Iterates over all blueprints by the order they were registered.
.. versionadded:: 0.11
"""
return self.blueprints.values()
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: t.Optional[str] = None,
view_func: t.Optional[ft.RouteCallable] = None,
provide_automatic_options: t.Optional[bool] = None,
**options: t.Any,
) -> None:
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func) # type: ignore
options["endpoint"] = endpoint
methods = options.pop("methods", None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, "methods", None) or ("GET",)
if isinstance(methods, str):
raise TypeError(
"Allowed methods must be a list of strings, for"
' example: @app.route(..., methods=["POST"])'
)
methods = {item.upper() for item in methods}
# Methods that should always be added
required_methods = set(getattr(view_func, "required_methods", ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
if provide_automatic_options is None:
provide_automatic_options = getattr(
view_func, "provide_automatic_options", None
)
if provide_automatic_options is None:
if "OPTIONS" not in methods:
provide_automatic_options = True
required_methods.add("OPTIONS")
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options # type: ignore
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError(
"View function mapping is overwriting an existing"
f" endpoint function: {endpoint}"
)
self.view_functions[endpoint] = view_func
@setupmethod
def template_filter(
self, name: t.Optional[str] = None
) -> t.Callable[[T_template_filter], T_template_filter]:
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f: T_template_filter) -> T_template_filter:
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(
self, f: ft.TemplateFilterCallable, name: t.Optional[str] = None
) -> None:
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(
self, name: t.Optional[str] = None
) -> t.Callable[[T_template_test], T_template_test]:
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f: T_template_test) -> T_template_test:
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(
self, f: ft.TemplateTestCallable, name: t.Optional[str] = None
) -> None:
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(
self, name: t.Optional[str] = None
) -> t.Callable[[T_template_global], T_template_global]:
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f: T_template_global) -> T_template_global:
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(
self, f: ft.TemplateGlobalCallable, name: t.Optional[str] = None
) -> None:
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_first_request(self, f: T_before_first_request) -> T_before_first_request:
"""Registers a function to be run before the first request to this
instance of the application.
The function will be called without any arguments and its return
value is ignored.
.. deprecated:: 2.2
Will be removed in Flask 2.3. Run setup code when creating
the application instead.
.. versionadded:: 0.8
"""
import warnings
warnings.warn(
"'before_first_request' is deprecated and will be removed"
" in Flask 2.3. Run setup code while creating the"
" application instead.",
DeprecationWarning,
stacklevel=2,
)
self.before_first_request_funcs.append(f)
return f
@setupmethod
def teardown_appcontext(self, f: T_teardown) -> T_teardown:
"""Registers a function to be called when the application
context is popped. The application context is typically popped
after the request context for each request, at the end of CLI
commands, or after a manually pushed context ends.
.. code-block:: python
with app.app_context():
...
When the ``with`` block exits (or ``ctx.pop()`` is called), the
teardown functions are called just before the app context is
made inactive. Since a request context typically also manages an
application context it would also be called when you pop a
request context.
When a teardown function was called because of an unhandled
exception it will be passed an error object. If an
:meth:`errorhandler` is registered, it will handle the exception
and the teardown will not receive it.
Teardown functions must avoid raising exceptions. If they
execute code that might fail they must surround that code with a
``try``/``except`` block and log any errors.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def shell_context_processor(
self, f: T_shell_context_processor
) -> T_shell_context_processor:
"""Registers a shell context processor function.
.. versionadded:: 0.11
"""
self.shell_context_processors.append(f)
return f
def _find_error_handler(self, e: Exception) -> t.Optional[ft.ErrorHandlerCallable]:
"""Return a registered error handler for an exception in this order:
blueprint handler for a specific code, app handler for a specific code,
blueprint handler for an exception class, app handler for an exception
class, or ``None`` if a suitable handler is not found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
names = (*request.blueprints, None)
for c in (code, None) if code is not None else (None,):
for name in names:
handler_map = self.error_handler_spec[name][c]
if not handler_map:
continue
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
return handler
return None
def handle_http_exception(
self, e: HTTPException
) -> t.Union[HTTPException, ft.ResponseReturnValue]:
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionchanged:: 1.0.3
``RoutingException``, used internally for actions such as
slash redirects during routing, is not passed to error
handlers.
.. versionchanged:: 1.0
Exceptions are looked up by code *and* by MRO, so
``HTTPException`` subclasses can be handled with a catch-all
handler for the base ``HTTPException``.
.. versionadded:: 0.3
"""
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
# RoutingExceptions are used internally to trigger routing
# actions, such as slash redirects raising RequestRedirect. They
# are not raised or handled in user code.
if isinstance(e, RoutingException):
return e
handler = self._find_error_handler(e)
if handler is None:
return e
return self.ensure_sync(handler)(e)
def trap_http_exception(self, e: Exception) -> bool:
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
if self.config["TRAP_HTTP_EXCEPTIONS"]:
return True
trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"]
# if unset, trap key errors in debug mode
if (
trap_bad_request is None
and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
def handle_user_exception(
self, e: Exception
) -> t.Union[HTTPException, ft.ResponseReturnValue]:
"""This method is called whenever an exception occurs that
should be handled. A special case is :class:`~werkzeug
.exceptions.HTTPException` which is forwarded to the
:meth:`handle_http_exception` method. This function will either
return a response value or reraise the exception with the same
traceback.
.. versionchanged:: 1.0
Key errors raised from request data like ``form`` show the
bad key in debug mode rather than a generic bad request
message.
.. versionadded:: 0.7
"""
if isinstance(e, BadRequestKeyError) and (
self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]
):
e.show_exception = True
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
handler = self._find_error_handler(e)
if handler is None:
raise
return self.ensure_sync(handler)(e)
def handle_exception(self, e: Exception) -> Response:
"""Handle an exception that did not have an error handler
associated with it, or that was raised from an error handler.
This always causes a 500 ``InternalServerError``.
Always sends the :data:`got_request_exception` signal.
If :attr:`propagate_exceptions` is ``True``, such as in debug
mode, the error will be re-raised so that the debugger can
display it. Otherwise, the original exception is logged, and
an :exc:`~werkzeug.exceptions.InternalServerError` is returned.
If an error handler is registered for ``InternalServerError`` or
``500``, it will be used. For consistency, the handler will
always receive the ``InternalServerError``. The original
unhandled exception is available as ``e.original_exception``.
.. versionchanged:: 1.1.0
Always passes the ``InternalServerError`` instance to the
handler, setting ``original_exception`` to the unhandled
error.
.. versionchanged:: 1.1.0
``after_request`` functions and other finalization is done
even for the default 500 response when there is no handler.
.. versionadded:: 0.3
"""
exc_info = sys.exc_info()
got_request_exception.send(self, exception=e)
propagate = self.config["PROPAGATE_EXCEPTIONS"]
if propagate is None:
propagate = self.testing or self.debug
if propagate:
# Re-raise if called with an active exception, otherwise
# raise the passed in exception.
if exc_info[1] is e:
raise
raise e
self.log_exception(exc_info)
server_error: t.Union[InternalServerError, ft.ResponseReturnValue]
server_error = InternalServerError(original_exception=e)
handler = self._find_error_handler(server_error)
if handler is not None:
server_error = self.ensure_sync(handler)(server_error)
return self.finalize_request(server_error, from_error_handler=True)
def log_exception(
self,
exc_info: t.Union[
t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]
],
) -> None:
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error(
f"Exception on {request.path} [{request.method}]", exc_info=exc_info
)
def raise_routing_exception(self, request: Request) -> "te.NoReturn":
"""Intercept routing exceptions and possibly do something else.
In debug mode, intercept a routing redirect and replace it with
an error if the body will be discarded.
With modern Werkzeug this shouldn't occur, since it now uses a
308 status which tells the browser to resend the method and
body.
.. versionchanged:: 2.1
Don't intercept 307 and 308 redirects.
:meta private:
:internal:
"""
if (
not self.debug
or not isinstance(request.routing_exception, RequestRedirect)
or request.routing_exception.code in {307, 308}
or request.method in {"GET", "HEAD", "OPTIONS"}
):
raise request.routing_exception # type: ignore
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self) -> ft.ResponseReturnValue:
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = request_ctx.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule: Rule = req.url_rule # type: ignore[assignment]
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if (
getattr(rule, "provide_automatic_options", False)
and req.method == "OPTIONS"
):
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
view_args: t.Dict[str, t.Any] = req.view_args # type: ignore[assignment]
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args)
def full_dispatch_request(self) -> Response:
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
# Run before_first_request functions if this is the thread's first request.
# Inlined to avoid a method call on subsequent requests.
# This is deprecated, will be removed in Flask 2.3.
if not self._got_first_request:
with self._before_request_lock:
if not self._got_first_request:
for func in self.before_first_request_funcs:
self.ensure_sync(func)()
self._got_first_request = True
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
def finalize_request(
self,
rv: t.Union[ft.ResponseReturnValue, HTTPException],
from_error_handler: bool = False,
) -> Response:
"""Given the return value from a view function this finalizes
the request by converting it into a response and invoking the
postprocessing functions. This is invoked for both normal
request dispatching as well as error handlers.
Because this means that it might be called as a result of a
failure a special safe mode is available which can be enabled
with the `from_error_handler` flag. If enabled, failures in
response processing will be logged and otherwise ignored.
:internal:
"""
response = self.make_response(rv)
try:
response = self.process_response(response)
request_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception(
"Request finalizing failed with an error while handling an error"
)
return response
def make_default_options_response(self) -> Response:
"""This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
adapter = request_ctx.url_adapter
methods = adapter.allowed_methods() # type: ignore[union-attr]
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def ensure_sync(self, func: t.Callable) -> t.Callable:
"""Ensure that the function is synchronous for WSGI workers.
Plain ``def`` functions are returned as-is. ``async def``
functions are wrapped to run and wait for the response.
Override this method to change how the app runs async views.
.. versionadded:: 2.0
"""
if iscoroutinefunction(func):
return self.async_to_sync(func)
return func
def async_to_sync(
self, func: t.Callable[..., t.Coroutine]
) -> t.Callable[..., t.Any]:
"""Return a sync function that will run the coroutine function.
.. code-block:: python
result = app.async_to_sync(func)(*args, **kwargs)
Override this method to change how the app converts async code
to be synchronously callable.
.. versionadded:: 2.0
"""
try:
from asgiref.sync import async_to_sync as asgiref_async_to_sync
except ImportError:
raise RuntimeError(
"Install Flask with the 'async' extra in order to use async views."
) from None
return asgiref_async_to_sync(func)
def url_for(
self,
endpoint: str,
*,
_anchor: t.Optional[str] = None,
_method: t.Optional[str] = None,
_scheme: t.Optional[str] = None,
_external: t.Optional[bool] = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This is called by :func:`flask.url_for`, and can be called
directly as well.
An *endpoint* is the name of a URL rule, usually added with
:meth:`@app.route() <route>`, and usually the same name as the
view function. A route defined in a :class:`~flask.Blueprint`
will prepend the blueprint's name separated by a ``.`` to the
endpoint.
In some cases, such as email messages, you want URLs to include
the scheme and domain, like ``https://example.com/hello``. When
not in an active request, URLs will be external by default, but
this requires setting :data:`SERVER_NAME` so Flask knows what
domain to use. :data:`APPLICATION_ROOT` and
:data:`PREFERRED_URL_SCHEME` should also be configured as
needed. This config is only used when not in an active request.
Functions can be decorated with :meth:`url_defaults` to modify
keyword arguments before the URL is built.
If building fails for some reason, such as an unknown endpoint
or incorrect values, the app's :meth:`handle_url_build_error`
method is called. If that returns a string, that is returned,
otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it
is external.
:param _external: If given, prefer the URL to be internal
(False) or require it to be external (True). External URLs
include the scheme and domain. When not in an active
request, URLs are external by default.
:param values: Values to use for the variable parts of the URL
rule. Unknown keys are appended as query string arguments,
like ``?a=b&c=d``.
.. versionadded:: 2.2
Moved from ``flask.url_for``, which calls this method.
"""
req_ctx = _cv_request.get(None)
if req_ctx is not None:
url_adapter = req_ctx.url_adapter
blueprint_name = req_ctx.request.blueprint
# If the endpoint starts with "." and the request matches a
# blueprint, the endpoint is relative to the blueprint.
if endpoint[:1] == ".":
if blueprint_name is not None:
endpoint = f"{blueprint_name}{endpoint}"
else:
endpoint = endpoint[1:]
# When in a request, generate a URL without scheme and
# domain by default, unless a scheme is given.
if _external is None:
_external = _scheme is not None
else:
app_ctx = _cv_app.get(None)
# If called by helpers.url_for, an app context is active,
# use its url_adapter. Otherwise, app.url_for was called
# directly, build an adapter.
if app_ctx is not None:
url_adapter = app_ctx.url_adapter
else:
url_adapter = self.create_url_adapter(None)
if url_adapter is None:
raise RuntimeError(
"Unable to build URLs outside an active request"
" without 'SERVER_NAME' configured. Also configure"
" 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as"
" needed."
)
# When outside a request, generate a URL with scheme and
# domain by default.
if _external is None:
_external = True
# It is an error to set _scheme when _external=False, in order
# to avoid accidental insecure URLs.
if _scheme is not None and not _external:
raise ValueError("When specifying '_scheme', '_external' must be True.")
self.inject_url_defaults(endpoint, values)
try:
rv = url_adapter.build( # type: ignore[union-attr]
endpoint,
values,
method=_method,
url_scheme=_scheme,
force_external=_external,
)
except BuildError as error:
values.update(
_anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external
)
return self.handle_url_build_error(error, endpoint, values)
if _anchor is not None:
rv = f"{rv}#{url_quote(_anchor)}"
return rv
def redirect(self, location: str, code: int = 302) -> BaseResponse:
"""Create a redirect response object.
This is called by :func:`flask.redirect`, and can be called
directly as well.
:param location: The URL to redirect to.
:param code: The status code for the redirect.
.. versionadded:: 2.2
Moved from ``flask.redirect``, which calls this method.
"""
return _wz_redirect(location, code=code, Response=self.response_class)
def make_response(self, rv: ft.ResponseReturnValue) -> Response:
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str``
A response object is created with the string encoded to UTF-8
as the body.
``bytes``
A response object is created with the bytes as the body.
``dict``
A dictionary that will be jsonify'd before being returned.
``list``
A list that will be jsonify'd before being returned.
``generator`` or ``iterator``
A generator that returns ``str`` or ``bytes`` to be
streamed as the response.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 2.2
A generator will be converted to a streaming response.
A list will be converted to a JSON response.
.. versionchanged:: 1.1
A dict will be converted to a JSON response.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv # type: ignore[misc]
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv
else:
rv, status = rv # type: ignore[assignment,misc]
# other sized tuples are not allowed
else:
raise TypeError(
"The view function did not return a valid response tuple."
" The tuple must have the form (body, status, headers),"
" (body, status), or (body, headers)."
)
# the body must not be None
if rv is None:
raise TypeError(
f"The view function for {request.endpoint!r} did not"
" return a valid response. The function either returned"
" None or ended without a return statement."
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, _abc_Iterator):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(
rv,
status=status,
headers=headers, # type: ignore[arg-type]
)
status = headers = None
elif isinstance(rv, (dict, list)):
rv = self.json.response(rv)
elif isinstance(rv, BaseResponse) or callable(rv):
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(
rv, request.environ # type: ignore[arg-type]
)
except TypeError as e:
raise TypeError(
f"{e}\nThe view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it"
f" was a {type(rv).__name__}."
).with_traceback(sys.exc_info()[2]) from None
else:
raise TypeError(
"The view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it was a"
f" {type(rv).__name__}."
)
rv = t.cast(Response, rv)
# prefer the status if it was provided
if status is not None:
if isinstance(status, (str, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.update(headers) # type: ignore[arg-type]
return rv
def create_url_adapter(
self, request: t.Optional[Request]
) -> t.Optional[MapAdapter]:
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
"""
if request is not None:
# If subdomain matching is disabled (the default), use the
# default subdomain in all cases. This should be the default
# in Werkzeug but it currently does not have that feature.
if not self.subdomain_matching:
subdomain = self.url_map.default_subdomain or None
else:
subdomain = None
return self.url_map.bind_to_environ(
request.environ,
server_name=self.config["SERVER_NAME"],
subdomain=subdomain,
)
# We need at the very least the server name to be set for this
# to work.
if self.config["SERVER_NAME"] is not None:
return self.url_map.bind(
self.config["SERVER_NAME"],
script_name=self.config["APPLICATION_ROOT"],
url_scheme=self.config["PREFERRED_URL_SCHEME"],
)
return None
def inject_url_defaults(self, endpoint: str, values: dict) -> None:
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
names: t.Iterable[t.Optional[str]] = (None,)
# url_for may be called outside a request context, parse the
# passed endpoint instead of using request.blueprints.
if "." in endpoint:
names = chain(
names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0]))
)
for name in names:
if name in self.url_default_functions:
for func in self.url_default_functions[name]:
func(endpoint, values)
def handle_url_build_error(
self, error: BuildError, endpoint: str, values: t.Dict[str, t.Any]
) -> str:
"""Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
"""
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
except BuildError as e:
# make error available outside except block
error = e
else:
if rv is not None:
return rv
# Re-raise if called with an active exception, otherwise raise
# the passed in exception.
if error is sys.exc_info()[1]:
raise
raise error
def preprocess_request(self) -> t.Optional[ft.ResponseReturnValue]:
"""Called before the request is dispatched. Calls
:attr:`url_value_preprocessors` registered with the app and the
current blueprint (if any). Then calls :attr:`before_request_funcs`
registered with the app and the blueprint.
If any :meth:`before_request` handler returns a non-None value, the
value is handled as if it was the return value from the view, and
further request handling is stopped.
"""
names = (None, *reversed(request.blueprints))
for name in names:
if name in self.url_value_preprocessors:
for url_func in self.url_value_preprocessors[name]:
url_func(request.endpoint, request.view_args)
for name in names:
if name in self.before_request_funcs:
for before_func in self.before_request_funcs[name]:
rv = self.ensure_sync(before_func)()
if rv is not None:
return rv
return None
def process_response(self, response: Response) -> Response:
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = request_ctx._get_current_object() # type: ignore[attr-defined]
for func in ctx._after_request_functions:
response = self.ensure_sync(func)(response)
for name in chain(request.blueprints, (None,)):
if name in self.after_request_funcs:
for func in reversed(self.after_request_funcs[name]):
response = self.ensure_sync(func)(response)
if not self.session_interface.is_null_session(ctx.session):
self.session_interface.save_session(self, ctx.session, response)
return response
def do_teardown_request(
self, exc: t.Optional[BaseException] = _sentinel # type: ignore
) -> None:
"""Called after the request is dispatched and the response is
returned, right before the request context is popped.
This calls all functions decorated with
:meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
if a blueprint handled the request. Finally, the
:data:`request_tearing_down` signal is sent.
This is called by
:meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
which may be delayed during testing to maintain access to
resources.
:param exc: An unhandled exception raised while dispatching the
request. Detected from the current exception information if
not passed. Passed to each teardown function.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for name in chain(request.blueprints, (None,)):
if name in self.teardown_request_funcs:
for func in reversed(self.teardown_request_funcs[name]):
self.ensure_sync(func)(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(
self, exc: t.Optional[BaseException] = _sentinel # type: ignore
) -> None:
"""Called right before the application context is popped.
When handling a request, the application context is popped
after the request context. See :meth:`do_teardown_request`.
This calls all functions decorated with
:meth:`teardown_appcontext`. Then the
:data:`appcontext_tearing_down` signal is sent.
This is called by
:meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
self.ensure_sync(func)(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self) -> AppContext:
"""Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
block to push the context, which will make :data:`current_app`
point at this application.
An application context is automatically pushed by
:meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
when handling a request, and when running a CLI command. Use
this to manually create a context outside of these situations.
::
with app.app_context():
init_db()
See :doc:`/appcontext`.
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ: dict) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` representing a
WSGI environment. Use a ``with`` block to push the context,
which will make :data:`request` point at this request.
See :doc:`/reqcontext`.
Typically you should not call this from your own code. A request
context is automatically pushed by the :meth:`wsgi_app` when
handling a request. Use :meth:`test_request_context` to create
an environment and context instead of this method.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` for a WSGI
environment created from the given values. This is mostly useful
during testing, where you may want to run a function that uses
request data without dispatching a full request.
See :doc:`/reqcontext`.
Use a ``with`` block to push the context, which will make
:data:`request` point at the request for the created
environment. ::
with test_request_context(...):
generate_report()
When using the shell, it may be easier to push and pop the
context manually to avoid indentation. ::
ctx = app.test_request_context(...)
ctx.push()
...
ctx.pop()
Takes the same arguments as Werkzeug's
:class:`~werkzeug.test.EnvironBuilder`, with some defaults from
the application. See the linked Werkzeug docs for most of the
available arguments. Flask-specific behavior is listed here.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to
:data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param data: The request body, either as a string or a dict of
form keys and values.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
from .testing import EnvironBuilder
builder = EnvironBuilder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
See :ref:`callbacks-and-errors`.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
ctx = self.request_context(environ)
error: t.Optional[BaseException] = None
try:
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
except: # noqa: B001
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if "werkzeug.debug.preserve_context" in environ:
environ["werkzeug.debug.preserve_context"](_cv_app.get())
environ["werkzeug.debug.preserve_context"](_cv_request.get())
if error is not None and self.should_ignore_error(error):
error = None
ctx.pop(error)
def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:
"""The WSGI server calls the Flask application object as the
WSGI application. This calls :meth:`wsgi_app`, which can be
wrapped to apply middleware.
"""
return self.wsgi_app(environ, start_response) | PypiClean |
/DuHast-1.0.7-py3-none-any.whl/duHast/APISamples/Revit_Categories.py |
import clr
import System
from System.Collections.Generic import List
import RevitCommonAPI as com
import RevitFamilyUtils as rFamUtils
import RevitLinks as rLink
import Result as res
import RevitLineStylesPatterns as rPat
clr.AddReference('System.Core')
clr.ImportExtensions(System.Linq)
import Autodesk.Revit.DB as rdb
#: subcategory renaming sampled dictionary
#: key is the current subcategory name, value is the new subcategory name
CAT_RENAMING = {
'Clearance Zones': 'AMAZING'
}
#: list of built in parameters attached to family elements containing subcategory ids
ELEMENTS_PARAS_SUB = [
rdb.BuiltInParameter.FAMILY_CURVE_GSTYLE_PLUS_INVISIBLE,
rdb.BuiltInParameter.FAMILY_CURVE_GSTYLE_PLUS_INVISIBLE_MINUS_ANALYTICAL,
rdb.BuiltInParameter.FAMILY_ELEM_SUBCATEGORY,
rdb.BuiltInParameter.CLINE_SUBCATEGORY
]
#-------------------------- get category properties ---------------------------------
#: category properties dictionary key names and default values
#: material name
PROPERTY_MATERIAL_NAME = 'MaterialName'
#: material name default value
PROPERTY_MATERIAL_NAME_VALUE_DEFAULT = 'None'
#: material id
PROPERTY_MATERIAL_ID = 'MaterialId'
#: line weight projection name
PROPERTY_LINE_WEIGHT_PROJECTION_NAME = 'LineWeightProjection'
#: line weight cut name
PROPERTY_LINE_WEIGHT_CUT_NAME = 'LineWeightCut'
#: line colour red name
PROPERTY_LINE_COLOUR_RED_NAME = 'Red'
#: line colour green name
PROPERTY_LINE_COLOUR_GREEN_NAME = 'Green'
#: line colour blue name
PROPERTY_LINE_COLOUR_BLUE_NAME = 'Blue'
#: graphic styles used for elements in families
#: graphic style projection name
CATEGORY_GRAPHIC_STYLE_PROJECTION = 'Projection'
#: graphic style cut name
CATEGORY_GRAPHIC_STYLE_CUT = 'Cut'
#: graphic style 3D name
CATEGORY_GRAPHIC_STYLE_3D = '3D'
# -------------------------------------------- common variables --------------------
#: Header used in report files
REPORT_CATEGORIES_HEADER = [
'HOSTFILE',
'FAMILY CATEGORY',
'MAINCATEGORYNAME',
'SUBCATEGORYNAME',
'CATEGORYID',
PROPERTY_MATERIAL_NAME.upper(),
PROPERTY_MATERIAL_ID.upper(),
rPat.PROPERTY_PATTERN_NAME.upper(),
rPat.PROPERTY_PATTERN_ID.upper(),
PROPERTY_LINE_WEIGHT_PROJECTION_NAME.upper(),
PROPERTY_LINE_WEIGHT_CUT_NAME.upper(),
PROPERTY_LINE_COLOUR_RED_NAME.upper(),
PROPERTY_LINE_COLOUR_GREEN_NAME.upper(),
PROPERTY_LINE_COLOUR_BLUE_NAME.upper(),
CATEGORY_GRAPHIC_STYLE_3D.upper(),
CATEGORY_GRAPHIC_STYLE_PROJECTION.upper(),
CATEGORY_GRAPHIC_STYLE_CUT.upper()
]
def GetMainSubCategories(doc):
'''
Returns all subcategories of the family category in a dictionary where\
key: sub category name
value: sub category
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary.
:rtype: dictionary {str: Autodesk.Revit.DB.Category}
'''
catData = {}
# get the family category
familyCategoryName = doc.OwnerFamily.FamilyCategory.Name
# get all subcategories in Document
for mainCat in doc.Settings.Categories:
# find the category matching this docs category
# to ensure default subcategories with an id less then 0 are also extracted
if (mainCat.Name == familyCategoryName):
# loop over all sub categories
for subCat in mainCat.SubCategories:
catData[subCat.Name] = subCat
return catData
def GetFamilyCategory(doc):
'''
Gets the family category in a dictionary where\
key: category name
value: category
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary.
:rtype: dictionary {str: Autodesk.Revit.DB.Category}
'''
catData = {}
# get the family category
currentFamCat = doc.OwnerFamily.FamilyCategory
catData [currentFamCat.Name] = currentFamCat
return catData
def GetOtherSubCategories(doc):
'''
Returns all family subcategories which do not belong to the actual family category.
key: category name
value: dictionary : key sub cat name, value: subcategory
Note: custom subcategories have an Id greater 0
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary.
:rtype: dictionary {str: {str:Autodesk.Revit.DB.Category} }
'''
catData = {}
# get the family category
familyCategoryName = doc.OwnerFamily.FamilyCategory.Name
# get all subcategories in Document
for mainCat in doc.Settings.Categories:
# find the category not matching this docs category
# to ensure default subcategories with an id less then 0 are also extracted
if (mainCat.Name != familyCategoryName):
if (mainCat.Name not in catData):
catData[mainCat.Name] = {}
# loop over all sub categories
for subCat in mainCat.SubCategories:
catData[mainCat.Name][subCat.Name] = subCat
return catData
def GetOtherCustomSubCategories(doc):
'''
Returns all family custom subcategories which do not belong to the actual family category.
Custom categories have an Id greater then 0.
key: category name
value: dictionary : key sub cat name, value: subcategory
Note: custom subcategories have an Id greater 0
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary.
:rtype: dictionary {str: {str:Autodesk.Revit.DB.Category} }
'''
catData = {}
# get the family category
familyCategoryName = doc.OwnerFamily.FamilyCategory.Name
# get all subcategories in Document
for mainCat in doc.Settings.Categories:
# find the category not matching this docs category
# to ensure default subcategories with an id less then 0 are also extracted
if (mainCat.Name != familyCategoryName):
if (mainCat.Name not in catData):
catData[mainCat.Name] = {}
# loop over all sub categories
for subCat in mainCat.SubCategories:
if(subCat.Id.IntegerValue > 0):
catData[mainCat.Name][subCat.Name] = subCat
return catData
def GetOtherCategories(doc):
'''
Returns all family pre defined categories which do not belong to the actual family category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A list of categories.
:rtype: [Autodesk.Revit.DB.Category]
'''
catData = []
# get the family category
familyCategoryName = doc.OwnerFamily.FamilyCategory.Name
# get all subcategories in Document
for mainCat in doc.Settings.Categories:
# find the category not matching this docs category
# to ensure default subcategories with an id less then 0 are also extracted
if (mainCat.Name != familyCategoryName):
if (mainCat not in catData):
catData.append(mainCat)
return catData
def GetCategoryByBuiltInDefName(doc, builtInDefs):
'''
Returns categories by their built in definition
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param builtInDefs: list of BuiltInCategory Enumeration values
:type builtInDefs: [Autodesk.Revit.DB.BuiltInCategory]
:return: list of categories
:rtype: [Autodesk.Revit.DB.Category]
'''
cats = []
documentSettings = doc.Settings
groups = documentSettings.Categories
for builtInDef in builtInDefs:
cat = groups.get_Item(builtInDef)
if cat!=None:
cats.append(cat)
return cats
def GetCategoryGraphicStyleIds(cat):
'''
Returns a dictionary with keys: Projection, Cut, 3D and their respective ids
:param cat: A category.
:type cat: Autodesk.REvit.DB.Category
:return: A dictionary
:rtype: dictionary {str: Autodesk.Revit.DB.ElementId}
'''
iDGraphicStyleProjection = cat.GetGraphicsStyle(rdb.GraphicsStyleType.Projection).Id
# check if this category has a cut style ( some families always appear in elevation only!)
graphicStyleCut = cat.GetGraphicsStyle(rdb.GraphicsStyleType.Cut)
# set as default invalid element id
iDGraphicStyleCut = rdb.ElementId.InvalidElementId
if(graphicStyleCut != None):
iDGraphicStyleCut = cat.GetGraphicsStyle(rdb.GraphicsStyleType.Cut).Id
# build category dictionary where key is the style type, values is the corresponding Id
dic = {}
dic[CATEGORY_GRAPHIC_STYLE_PROJECTION] = iDGraphicStyleProjection
dic[CATEGORY_GRAPHIC_STYLE_CUT] = iDGraphicStyleCut
dic[CATEGORY_GRAPHIC_STYLE_3D] = cat.Id
return dic
def GetCategoryMaterial(cat):
'''
Returns the material properties name and id as a dictionary where key is property name and\
value the property id.
:param cat: A category.
:type cat: Autodesk.REvit.DB.Category
:return: A dictionary
:rtype: dictionary {str: Autodesk.Revit.DB.ElementId}\
If no material is assigned to a category it will return {'None: Autodesk.Revit.DB.ElementId.InvalidElementId}
'''
dicMaterial = {}
dicMaterial[PROPERTY_MATERIAL_NAME] = PROPERTY_MATERIAL_NAME_VALUE_DEFAULT
dicMaterial[PROPERTY_MATERIAL_ID] = rdb.ElementId.InvalidElementId
material = cat.Material
if(material != None):
dicMaterial[PROPERTY_MATERIAL_NAME] = rdb.Element.Name.GetValue(material)
dicMaterial[PROPERTY_MATERIAL_ID] = material.Id
return dicMaterial
def GetCategoryLineWeights(cat):
'''
Returns the line weight properties (cut and projection) as a dictionary\
where key is property description and value the property value
:param cat: A category.
:type cat: Autodesk.REvit.DB.Category
:return: A dictionary.
:rtype: dictionary {str: nullable integer}
'''
dicLineWeights = {}
dicLineWeights[PROPERTY_LINE_WEIGHT_PROJECTION_NAME] = cat.GetLineWeight(rdb.GraphicsStyleType.Projection)
dicLineWeights[PROPERTY_LINE_WEIGHT_CUT_NAME] = cat.GetLineWeight(rdb.GraphicsStyleType.Cut)
return dicLineWeights
def GetCategoryColour(cat):
'''
Returns the colour properties (RGB) and values as a dictionary where key is colour name\
and value the property value
:param cat: A category.
:type cat: Autodesk.REvit.DB.Category
:return: A dictionary.
:rtype: dictionary {str: byte}
'''
dicColour = {}
dicColour[PROPERTY_LINE_COLOUR_RED_NAME] = 0
dicColour[PROPERTY_LINE_COLOUR_GREEN_NAME] = 0
dicColour[PROPERTY_LINE_COLOUR_BLUE_NAME] = 0
if (cat.LineColor.IsValid):
dicColour[PROPERTY_LINE_COLOUR_RED_NAME] = cat.LineColor.Red
dicColour[PROPERTY_LINE_COLOUR_GREEN_NAME] = cat.LineColor.Green
dicColour[PROPERTY_LINE_COLOUR_BLUE_NAME] = cat.LineColor.Blue
return dicColour
def GetCategoryProperties(cat, doc):
'''
Returns a dictionary where keys are category property names and value is the associated property value.
:param cat: A category.
:type cat: Autodesk.REvit.DB.Category
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: A dictionary.
:rtype: list [{str: var}]
'''
properties = []
# material
dicMaterial = GetCategoryMaterial(cat)
properties.append(dicMaterial)
# line pattern
dicPattern = rPat.GetLinePatternFromCategory(cat, doc)
properties.append(dicPattern)
# line weights
dicLineWeights = GetCategoryLineWeights(cat)
properties.append(dicLineWeights)
# category colour
dicColour = GetCategoryColour(cat)
properties.append(dicColour)
return properties
def GetSavedCategoryPropertyByName(properties, propNames):
'''
Returns property values matching property names in saved category data.
:param properties: List of dictionaries in format as per GetCategoryProperties(cat) method.
:type properties: list [{str: var}]
:param propNames: List of property names of which the values are to be returned
:type propNames: list str
:return: A list of values.
:rtype: list var
'''
propValues = []
for propName in propNames:
match = False
for savedProp in properties:
if (propName in savedProp):
propValues.append(savedProp[propName])
match = True
if(match == False):
propValues.append(None)
return propValues
#-------------------------- set category properties ---------------------------------
def SetCategoryMaterial(doc, cat, materialId):
'''
Updates material property of a given category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:param materialId: The new material element id.
:type materialId: Autodesk.Revit.DB.ElementId
:return: True if material property was updated successfully, otherwise False.
:rtype: bool
'''
flag = True
try:
mat = doc.GetElement(materialId)
def action():
cat.Material = mat
transaction = rdb.Transaction(doc,'Updating subcategory material: ' + str(rdb.Element.Name.GetValue(mat)))
updateMat = com.InTransaction(transaction, action)
except Exception as e:
print('SetCategoryMaterial ' + str(e))
flag = False
return flag
def SetCategoryLinePattern(doc, cat, linePatternId):
'''
Updates line pattern property of a given category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:param materialId: The new material element id.
:type materialId: Autodesk.Revit.DB.ElementId
:return: True if line pattern property was updated successfully, otherwise False.
:rtype: bool
'''
flag = True
try:
def action():
cat.SetLinePatternId(linePatternId, rdb.GraphicsStyleType.Cut)
cat.SetLinePatternId(linePatternId, rdb.GraphicsStyleType.Projection)
transaction = rdb.Transaction(doc,'Updating subcategory line pattern')
updateLinePattern = com.InTransaction(transaction, action)
except Exception as e:
print('SetCategoryLinePattern ' + str(e))
flag = False
return flag
def SetCategoryLineWeights(doc, cat, lineThickNessCut, lineThicknessProjection):
'''
Updates line weight properties of a given category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:param lineThickNessCut: The cut line weight.
:type lineThickNessCut: int
:param lineThicknessProjection: The projection line weight.
:type lineThicknessProjection: int
:return: True if line weight property was updated successfully, otherwise False.
:rtype: bool
'''
flag = True
try:
def action():
cat.SetLineWeight(lineThickNessCut, rdb.GraphicsStyleType.Cut)
cat.SetLineWeight(lineThicknessProjection, rdb.GraphicsStyleType.Projection)
transaction = rdb.Transaction(doc,'Updating subcategory line weights')
updateLineWeights = com.InTransaction(transaction, action)
except Exception as e:
print('SetCategoryLineWeights:' + str(e))
flag = False
return flag
def SetCategoryColour(doc, cat, red, green, blue):
'''
Updates colour properties of a given category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:param red: The colour red channel.
:type red: byte
:param green: The colour green channel.
:type green: byte
:param blue: The colour blue channel.
:type blue: byte
:return: True if colour property was updated successfully, otherwise False.
:rtype: bool
'''
flag = True
try:
def action():
newColour = rdb.Color(red, green, blue)
cat.LineColor = newColour
transaction = rdb.Transaction(doc,'Updating subcategory colour')
updateColour = com.InTransaction(transaction, action)
except Exception as e:
print('SetCategoryColour ' + str(e))
flag = False
return flag
def SetCategoryProperties(doc, cat, properties):
'''
Updates varies property values of a given category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:param properties: List of property values to be applied to category.
:type properties: list of dictionaries in format as per GetCategoryProperties(cat) method.
:return: True if all properties where updated successfully, otherwise False.
:rtype: bool
'''
# material
matId = GetSavedCategoryPropertyByName(properties, [PROPERTY_MATERIAL_ID])
flagMat = SetCategoryMaterial(doc, cat, matId[0])
# line pattern
linePatternId = GetSavedCategoryPropertyByName(properties, [rPat.PROPERTY_PATTERN_ID])
flagPattern = SetCategoryLinePattern(doc, cat, linePatternId[0])
# line weights
lineWeights = GetSavedCategoryPropertyByName(properties, [PROPERTY_LINE_WEIGHT_CUT_NAME, PROPERTY_LINE_WEIGHT_PROJECTION_NAME])
flagLineWeights = SetCategoryLineWeights(doc, cat, lineWeights[0], lineWeights[1])
# category colour
colourRGB = GetSavedCategoryPropertyByName(properties, [PROPERTY_LINE_COLOUR_RED_NAME, PROPERTY_LINE_COLOUR_GREEN_NAME, PROPERTY_LINE_COLOUR_BLUE_NAME])
flagColours = SetCategoryColour(doc, cat, colourRGB[0], colourRGB[1], colourRGB[2])
return flagMat & flagPattern & flagLineWeights & flagColours
#-------------------------- utilities ---------------------------------
# doc current family document
# newCategoryName
def SetFamilyCategory(doc, newCategoryName):
'''
Changes the family category to new one specified by name.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param newCategoryName: The name of the new family category.
:type newCategoryName: str
:return: True only if the category was changed successfully. Any other case False! (That includes situations when the family is already of the new category)
:rtype: bool
'''
returnValue = res.Result()
cat = doc.OwnerFamily.FamilyCategory
if (cat.Name != newCategoryName):
if (doc.Settings.Categories.Contains(newCategoryName)):
def action():
doc.OwnerFamily.FamilyCategory = doc.Settings.Categories.get_Item(newCategoryName)
transaction = rdb.Transaction(doc,'Changing family category to:' + str(newCategoryName))
changeCat = com.InTransaction(transaction, action)
if(changeCat.status):
returnValue.UpdateSep(True, 'Successfully changed family category to: '+str(newCategoryName))
else:
returnValue.Update(changeCat)
else:
returnValue.UpdateSep(False, 'Invalid Category name supplied: ' + str(newCategoryName))
else:
returnValue.UpdateSep(False, 'Family is already of category: '+str(newCategoryName))
return returnValue
def CreateNewSubCategoryToFamilyCategory(doc, newSubCategoryName):
'''
Creates a new subcategory to the family category and returns it.
TODO: Bubble up exception if subcategory already exists!
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param newSubCategoryName: The new subcategory name
:type newSubCategoryName: str
:return: The new subcategory. Exception "The name 'xys' is already in use" if subcategory with the same name is already in file.
:rtype: A category. (or str if exception occurred)
'''
returnValue = res.Result()
if (doc.IsFamilyDocument):
# get the family category
currentFamCat = doc.OwnerFamily.FamilyCategory
parentCategory = None
for mainCat in doc.Settings.Categories:
if (mainCat.Name == currentFamCat.Name):
parentCategory = mainCat
break
if(newSubCategoryName != parentCategory.Name):
def action():
actionReturnValue = res.Result()
try:
newSubCategory = doc.Settings.Categories.NewSubcategory(parentCategory, newSubCategoryName)
actionReturnValue.UpdateSep(True, 'Created subcategory ' + str(newSubCategoryName))
actionReturnValue.result = newSubCategory
except Exception as e:
actionReturnValue.UpdateSep(False, 'Failed to create ' + str(newSubCategoryName) + ' with exception: ' + str(e))
return actionReturnValue
transaction = rdb.Transaction(doc,'Creating subcategory: ' + str(newSubCategoryName))
returnValue = com.InTransaction(transaction, action)
else:
returnValue.UpdateSep(False, 'Cant create subcategory with the same name as the family category!')
else:
returnValue.UpdateSep(False, 'This is not a family document!')
return returnValue
def SortElementsByCategory(elements, elementDic):
'''
Returns a dictionary of element ids where key is the category they belong to.
:param elements: List of revit elements.
:type elements: [Autodesk.Revit.DB.Element]
:param elementDic: Dictionary where key is subcategory and values are element ids.
:type elementDic: {Autodesk.Revit.DB.Category: [Autodesk.Revit.DB.ElementId]}
:return: Dictionary where key is subcategory id and values are element ids.
:rtype: {Autodesk.Revit.DB.ElementId: [Autodesk.Revit.DB.ElementId]}
'''
for el in elements:
for builtinDef in ELEMENTS_PARAS_SUB:
value = com.GetBuiltInParameterValue(el, builtinDef, com.GetParameterValueAsElementId)
if (value != None):
if(value in elementDic):
elementDic[value].append(el.Id)
else:
elementDic[value] = [el.Id]
break
return elementDic
def SortGeometryElementsByCategory(elements, elementDic, doc):
counter = 0
for el in elements:
counter = counter + 1
graphicStyleId = rdb.ElementId.InvalidElementId
if(type(el) is rdb.Solid):
# get graphic style id from edges
edgeArray = el.Edges
if(edgeArray.IsEmpty == False):
for edge in edgeArray:
graphicStyleId = edge.GraphicsStyleId
else:
graphicStyleId = el.GraphicsStyleId
# failed to get an id?
if(graphicStyleId != rdb.ElementId.InvalidElementId):
graphicStyle = doc.GetElement(graphicStyleId)
graphCatId = graphicStyle.GraphicsStyleCategory.Id
# geometry elements have no Id property ... Doh!! pass in invalid element id...
if (graphCatId != None):
if(graphCatId in elementDic):
elementDic[graphCatId].append(rdb.ElementId.InvalidElementId)
else:
elementDic[graphCatId] = [rdb.ElementId.InvalidElementId]
return elementDic
def _sortAllElementsByCategory(doc):
'''
Sorts all elements in a family by category.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: Dictionary where key is subcategory id and values are element ids.
:rtype: {Autodesk.Revit.DB.ElementId: [Autodesk.Revit.DB.ElementId]}
'''
# get all elements in family
dic = {}
elCurve = rFamUtils.GetAllCurveBasedElementsInFamily(doc)
elForms = rFamUtils.GetAllGenericFormsInFamily(doc)
elMText = rFamUtils.GetAllModelTextElementsInFamily(doc)
elRefPlanes = rFamUtils.GetAllReferencePlanesInFamily(doc)
# get import Instance elements
elImport = rLink.GetAllCADImportInstancesGeometry(doc)
# build dictionary where key is category or graphic style id of a category
dic = SortElementsByCategory(elCurve, dic)
dic = SortElementsByCategory(elForms, dic)
dic = SortElementsByCategory(elMText, dic)
dic = SortElementsByCategory(elRefPlanes, dic)
# geometry instances use a property rather then a parameter to store the category style Id
dic = SortGeometryElementsByCategory(elImport, dic, doc)
return dic
def GetElementsByCategory(doc, cat):
'''
Returns elements in family assigned to a specific category
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param cat: A category.
:type cat: Autodesk.Revit.DB.Category
:return: Dictionary where key is subcategory and values are element ids.
:rtype: {Autodesk.Revit.DB.Category: [Autodesk.Revit.DB.ElementId]}
'''
# get all elements in family
dic = _sortAllElementsByCategory(doc)
# get id and graphic style id of category to be filtered by
categoryIds = GetCategoryGraphicStyleIds(cat)
# check whether category past in is same as owner family category
if(doc.OwnerFamily.FamilyCategory.Name == cat.Name):
# 3d elements within family which have subcategory set to 'none' belong to owner family
# category. Revit uses a None value as id rather then the actual category id
# my get parameter value translates that into -1 (invalid element id)
categoryIds[CATEGORY_GRAPHIC_STYLE_3D] = rdb.ElementId.InvalidElementId
dicFiltered = {}
# filter elements by category ids
for key,value in categoryIds.items():
# print (key + ' ' + str(value))
if value in dic:
dicFiltered[key] = dic[value]
else:
dicFiltered[key] = []
return dicFiltered
def GetUsedCategoryIds(doc):
'''
Returns all category ids in a family which have an element assigned to them
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:return: List of categories.
:rtype: [Autodesk.Revit.DB.Category]
'''
# get all elements in family
dic = _sortAllElementsByCategory(doc)
return dic.keys ()
def CreateNewCategoryAndTransferProperties(doc, newCatName, existingCatName):
'''
Creates a new subcategory and transfer properties from existing subcategory.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param newCatName: The new sub category name.
:type newCatName: str
:param existingCatName: The existing subcategory name
:type existingCatName: str
:return:
Result class instance.
- result.status. True if category was created or already existed in file, otherwise False.
- result.message will contain the name of the category created.
- result.result returns new category, if category already exists in file it will return that
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
returnValue = res.Result()
cats = GetMainSubCategories(doc)
# check if existing category actually exists in family
if(existingCatName in cats):
# check whether the new category already exists!
if (newCatName not in cats):
copyFromCat = cats[existingCatName]
catProps = GetCategoryProperties(copyFromCat, doc)
resultNewSubCat = CreateNewCategoryFromSavedProperties(doc, newCatName, catProps)
returnValue.Update(resultNewSubCat)
else:
returnValue.UpdateSep(True, 'Category already in file:'+ str(newCatName))
returnValue.result = cats[newCatName]
else:
returnValue.UpdateSep(False, 'Template category '+ str(existingCatName) + ' does not exist in file!')
return returnValue
def CreateNewCategoryFromSavedProperties(doc, newCatName, savedCatProps):
'''
Creates a new category and applies properties stored.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param newCatName: The new sub category name.
:type newCatName: str
:param savedCatProps: Dictionary containing subcategory properties.
:type savedCatProps: list of dictionaries in format as per GetCategoryProperties(cat) method.
:return:
Result class instance.
- result.status. True if category was created or already existed in file, otherwise False.
- result.message will contain the name of the category created.
- result.result returns new category, if category already exists in file it will return that
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
returnValue = res.Result()
resultNewSubCat = CreateNewSubCategoryToFamilyCategory(doc, newCatName)
if(resultNewSubCat.result):
newSubCat = resultNewSubCat.result
flag = SetCategoryProperties(newSubCat, savedCatProps)
if(flag):
returnValue.UpdateSep(True, 'Successfully created category '+ str(newCatName))
returnValue.result = newSubCat
else:
returnValue.UpdateSep(False, 'Failed to apply properties to new category: '+ str(newCatName))
return returnValue
def MoveElementsFromSubCategoryToSubCategory(doc, fromCategoryName, toCategoryName):
'''
Moves elements from one subcategory to another one identified by their names.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param fromCategoryName: The source subcategory name.
:type fromCategoryName: str
:param toCategoryName: The destination subcategory name.
:type toCategoryName: str
:return:
Result class instance.
- result.status. True if all elements from source subcategory where moved to destination subcategory, otherwise False.
- result.message will contain the name of the destination subcategory by element.
- result.result empty list
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
returnValue = res.Result()
# check whether source and destination category exist in file
cats = GetMainSubCategories(doc)
if(fromCategoryName in cats):
if(toCategoryName in cats):
# dictionary containing destination category ids (3D, cut and projection)
destinationCatIds = GetCategoryGraphicStyleIds(cats[toCategoryName])
# get elements on source category
dic = GetElementsByCategory(doc, cats[fromCategoryName])
# move elements
returnValue = MoveElementsToCategory(doc, dic, toCategoryName, destinationCatIds)
else:
returnValue.UpdateSep(False, 'Destination category '+ str(toCategoryName) + ' does not exist in file!')
else:
returnValue.UpdateSep(False, 'Source category '+ str(fromCategoryName) + ' does not exist in file!')
return returnValue
def MoveElementsToCategory(doc, elements, toCategoryName, destinationCatIds):
'''
Moves elements provided in dictionary to another category specified by name.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param elements: Dictionary of elements, key are graphic style names.
:type elements: {Autodesk.Revit.DB.Category: [Autodesk.Revit.DB.ElementId]}
:param toCategoryName: The name of the subcategory elements are to be moved to.
:type toCategoryName: str
:param destinationCatIds: Dictionary of ids of graphic style, key are graphic style names
:type destinationCatIds: dictionary {str: Autodesk.Revit.DB.ElementId}
:return:
Result class instance.
- result.status. True if all elements where moved to destination subcategories, otherwise False.
- result.message will contain the name of the destination subcategory by element.
- result.result empty list
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
returnValue = res.Result()
# check whether destination category exist in file
cats = GetMainSubCategories(doc)
if(toCategoryName in cats):
for key,value in elements.items():
# anything needing moving?
if(len(value)>0):
for elId in value:
el = doc.GetElement(elId)
paras = el.GetOrderedParameters()
for p in paras:
if (p.Definition.BuiltInParameter in ELEMENTS_PARAS_SUB):
targetId = destinationCatIds[key]
updatedPara = com.setParameterValue(p, str(targetId), doc)
returnValue.Update(updatedPara)
break
else:
returnValue.UpdateSep(False, 'Destination category '+ str(toCategoryName) + ' does not exist in file!')
return returnValue
def ChangeFamilyCategory(doc, newCategoryName):
'''
Changes the current family category to the new one specified.
Revit's default behavior when changing the category of a family is to discard all custom subcategories created and assign elements which are on those custom subcategories\
to the new family category.
This function will also re-create any user created subcategories under the new category and assign elements to it to match the subcategory they where on before\
the category change.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param newCategoryName: The new family category
:type newCategoryName: str
:return:
Result class instance.
- result.status. True if all custom subcategories where re-created under the new family category and elements where moved to those subcategories, otherwise False.
- result.message will confirm successful creation of subcategories and element move.
- result.result empty list
On exception:
- result.status (bool) will be False.
- result.message will contain generic exception message.
- result.result will be empty
:rtype: :class:`.Result`
'''
returnValue = res.Result()
# get sub categories in family
subCats = GetMainSubCategories (doc)
# get all elements on custom subcategories
elements = {}
for subCat in subCats:
el = GetElementsByCategory(doc, subCats[subCat])
elements[subCat] = el
# get properties of all custom sub categories
props = {}
for subCat in subCats:
prop = GetCategoryProperties(subCats[subCat], doc)
props[subCat] = props
# change family category
changeFam = SetFamilyCategory(doc, newCategoryName)
if(changeFam.status):
# re-create custom sub categories
for subCat in subCats:
# only re-create custom sub categories (id greater then 0)
if(subCats[subCat].Id.IntegerValue > 0):
createCat = CreateNewCategoryFromSavedProperties(doc, subCat, props[subCat])
if(createCat.status):
# move elements back onto custom subcategories
moveEl = MoveElementsToCategory(doc, elements[subCat], subCat, props[subCat])
returnValue.Update(moveEl)
else:
returnValue.Update(createCat)
else:
returnValue.UpdateSep(False, 'Failed to change family category:' + changeFam.message)
return returnValue
def BuildReportDataByCategory(doc, dic, familyCat, mainCatName, docFilePath):
'''
Formats category properties into lists for reports
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param dic: dictionary containing category properties
:type dic: _type_
:param familyCat: The family category name.
:type familyCat: str
:param mainCatName: A hard coded revit category name. Can be the same as familyCat.
:type mainCatName: str
:param docFilePath: The fully qualified family file path.
:type docFilePath: str
:return: A list of list of strings. Each row represents one category.
:rtype: list[list[str]]
'''
data = []
for key in dic:
row = [str(docFilePath).encode('utf-8'),
familyCat.encode('utf-8'),
mainCatName.encode('utf-8'),
key.encode('utf-8'),
str(dic[key].Id)]
# get elements
elements = GetElementsByCategory (doc, dic[key])
# get properties
dicMaterial = GetCategoryMaterial(dic[key])
row.append(str(dicMaterial[PROPERTY_MATERIAL_NAME]).encode('utf-8'))
row.append(str(dicMaterial[PROPERTY_MATERIAL_ID]).encode('utf-8'))
# line pattern
dicPattern = rPat.GetLinePatternFromCategory(dic[key], doc)
row.append(str(dicPattern[rPat.PROPERTY_PATTERN_NAME]).encode('utf-8'))
row.append(str(dicPattern[rPat.PROPERTY_PATTERN_ID]).encode('utf-8'))
# line weights
dicLineWeights = GetCategoryLineWeights(dic[key])
row.append(str(dicLineWeights[PROPERTY_LINE_WEIGHT_PROJECTION_NAME]).encode('utf-8'))
row.append(str(dicLineWeights[PROPERTY_LINE_WEIGHT_CUT_NAME]).encode('utf-8'))
# category colour
dicColour = GetCategoryColour(dic[key])
row.append(str(dicColour[PROPERTY_LINE_COLOUR_RED_NAME]).encode('utf-8'))
row.append(str(dicColour[PROPERTY_LINE_COLOUR_GREEN_NAME]).encode('utf-8'))
row.append(str(dicColour[PROPERTY_LINE_COLOUR_BLUE_NAME]).encode('utf-8'))
# elements
row.append(str(len(elements[CATEGORY_GRAPHIC_STYLE_3D])).encode('utf-8'))
row.append(str(len(elements[CATEGORY_GRAPHIC_STYLE_PROJECTION])).encode('utf-8'))
row.append(str(len(elements[CATEGORY_GRAPHIC_STYLE_CUT])).encode('utf-8'))
data.append(row)
return data
def GetReportData(doc, revitFilePath):
'''
Reports all categories, their properties and all elements belonging to them.
:param doc: Current Revit family document.
:type doc: Autodesk.Revit.DB.Document
:param revitFilePath: The fully qualified family file path.
:type revitFilePath: str
:return: A list of list of strings. Each row represents one category.
:rtype: list[list[str]]
'''
data = []
# get all sub categories in family and associates elements;
subCats = GetMainSubCategories(doc)
familyCat = GetFamilyCategory(doc)
otherCats = GetOtherSubCategories(doc)
familyCatName = list(familyCat.keys())[0]
# build output
data = BuildReportDataByCategory(doc, familyCat, familyCatName, familyCatName, revitFilePath)
data = data + BuildReportDataByCategory(doc, subCats, familyCatName, familyCatName, revitFilePath)
for cat in otherCats:
data = data + BuildReportDataByCategory(doc, otherCats[cat], familyCatName, cat, revitFilePath)
return data | PypiClean |
/dr14-t.meter-1.0.16.tar.gz/dr14-t.meter-1.0.16/dr14tmeter/dynamic_range_meter.py |
import os
import threading
import sys
import codecs
import tempfile
import multiprocessing as mp
from ctypes import Structure, c_double, c_wchar_p, c_int
from dr14tmeter.compute_dr14 import compute_dr14
from dr14tmeter.compute_drv import compute_DRV
from dr14tmeter.compute_dr import *
from dr14tmeter.audio_track import *
from dr14tmeter.table import *
from dr14tmeter.dr_histogram import *
from dr14tmeter.lev_histogram import *
from dr14tmeter.spectrogram import *
from dr14tmeter.compressor import DynCompressor
from dr14tmeter.wav_write import wav_write
from dr14tmeter.read_metadata import RetirveMetadata
from dr14tmeter.audio_decoder import AudioDecoder
from dr14tmeter.duration import StructDuration
from dr14tmeter.write_dr import WriteDr, WriteDrExtended
import dr14tmeter.dr14_global as dr14
from dr14tmeter.out_messages import print_msg, print_out, dr14_log_debug
class SharedDrResObj(Structure):
_fields_ = [('file_name', c_wchar_p), ('dr14', c_double) , ('dB_peak' , c_double) , ( 'dB_rms' , c_double ) , ( 'duration' , c_double ) ]
class DynamicRangeMeter:
def __init__( self ):
self.res_list = []
self.dir_name = ''
self.dr14 = 0
self.meta_data = RetirveMetadata()
self.compute_dr = ComputeDR14()
def scan_file( self , file_name):
at = AudioTrack()
duration = StructDuration()
if at.open( file_name ):
self.__compute_and_append( at , file_name )
return 1
else:
return 0
def scan_dir( self , dir_name ):
if not os.path.isdir(dir_name) :
return 0
dir_list = sorted( os.listdir( dir_name ) )
self.dir_name = dir_name
self.dr14 = 0
duration = StructDuration()
at = AudioTrack()
for file_name in dir_list:
full_file = os.path.join( dir_name , file_name )
#print_msg( full_file )
if at.open( full_file ):
self.__compute_and_append( at , file_name )
self.meta_data.scan_dir( dir_name )
if len( self.res_list ) > 0:
self.dr14 = int( round( self.dr14 / len( self.res_list ) ) )
return len( self.res_list )
else:
return 0
def __compute_and_append( self , at , file_name ):
duration = StructDuration()
#( dr14, dB_peak, dB_rms ) = self.compute_dr.compute( at.Y , at.Fs )
( dr14, dB_peak, dB_rms ) = compute_dr14( at.Y , at.Fs , duration )
self.dr14 = self.dr14 + dr14
res = { 'file_name': file_name , 'dr14': dr14 , 'dB_peak': dB_peak , 'dB_rms': dB_rms , 'duration':duration.to_str() }
self.res_list.append(res)
print_msg( file_name + ": \t DR " + str( int(dr14) ) )
def fwrite_dr( self , file_name , tm , ext_table=False , std_out=False , append=False , dr_database=True ):
if ext_table :
wr = WriteDrExtended()
else :
wr = WriteDr()
wr.set_dr_database( dr_database )
self.table_txt = wr.write_dr( self , tm )
if std_out:
print_out( self.table_txt )
return
if append :
file_mode = "a"
else :
file_mode = "w"
try:
out_file = codecs.open( file_name , file_mode , "utf-8-sig" )
except:
print_msg ( "File opening error [%s] :" % file_name , sys.exc_info()[0] )
return False
out_file.write( self.table_txt )
out_file.close()
return True
def scan_mp( self , dir_name="" , thread_cnt=2 , files_list=[] ):
if sys.version_info[0] == 2 :
dir_name = dir_name.decode('utf-8')
self.dr14 = 0
if files_list == [] :
if not os.path.isdir(dir_name) :
return 0
dir_list = sorted( os.listdir( dir_name ) )
self.dir_name = dir_name
files_list = None
else:
dir_list = sorted( files_list )
ad = AudioDecoder()
jobs = []
for file_name in dir_list:
( fn , ext ) = os.path.splitext( file_name )
if ext in ad.formats:
jobs.append( file_name )
res_array=[SharedDrResObj() for i in range( len(jobs) )]
for i in range( len(jobs) ) :
res_array[i].file_name = jobs[i]
res_array[i].dr14 = dr14.min_dr()
lock_j = mp.Lock()
lock_res_list = mp.Lock()
threads = [1 for i in range(thread_cnt)]
#job_free = [0]
job_free = mp.Value( 'i' , 0 )
res_array_sh = mp.Array( SharedDrResObj , res_array )
for t in range( thread_cnt ):
threads[t] = mp.Process( target=self.run_mp , args=( dir_name , lock_j , lock_res_list , job_free , res_array_sh ) )
for t in range( thread_cnt ):
threads[t].start()
for t in range( thread_cnt ):
threads[t].join()
succ = 0
#empty_res = { 'file_name': '' , 'dr14': dr14.min_dr() , 'dB_peak': -100 , 'dB_rms': -100 , 'duration':"0:0" }
self.res_list = [] # [empty_res for i in range( len(jobs) )]
#i = 0
dur = StructDuration()
for res in res_array_sh:
self.res_list.append( { 'file_name': res.file_name ,
'dr14': res.dr14 ,
'dB_peak': res.dB_peak ,
'dB_rms': res.dB_rms ,
'duration': dur.float_to_str( res.duration ) } )
# i = i + 1
for d in self.res_list:
if d['dr14'] > dr14.min_dr():
self.dr14 = self.dr14 + d['dr14']
succ = succ + 1
self.meta_data.scan_dir( dir_name , files_list )
if len( self.res_list ) > 0 and succ > 0 :
self.dr14 = int( round( self.dr14 / succ ) )
return succ
else:
return 0
def run_mp( self , dir_name , lock_j , lock_res_list , job_free , res_array_sh ):
at = AudioTrack()
duration = StructDuration()
#print_msg("start .... ")
while True:
#Aquire the next free job
lock_j.acquire()
if job_free.value >= len(res_array_sh):
lock_j.release()
return
curr_job = job_free.value
file_name = res_array_sh[curr_job].file_name
job_free.value = job_free.value + 1
lock_j.release()
full_file = os.path.join( dir_name , file_name )
#print ( full_file )
if at.open( full_file ):
( dr14, dB_peak, dB_rms ) = compute_dr14( at.Y , at.Fs , duration )
lock_res_list.acquire()
print_msg( file_name + ": \t DR " + str( int(dr14) ) )
#res_list[curr_job] = { 'file_name': file_name , 'dr14': dr14 , 'dB_peak': dB_peak , 'dB_rms': dB_rms , 'duration':duration.to_str() }
res_array_sh[curr_job].dr14 = dr14
res_array_sh[curr_job].dB_peak = dB_peak
res_array_sh[curr_job].dB_rms = dB_rms
res_array_sh[curr_job].duration = duration.to_float()
lock_res_list.release()
else:
print_msg( "- fail - " + full_file ) | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/addons/management/commands/list_addons.py |
from textwrap import wrap
from weblate.addons.events import EVENT_NAMES
from weblate.addons.models import ADDONS, Addon
from weblate.trans.models import Component, Project
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "List installed add-ons"
@staticmethod
def get_help_text(field, name):
result = []
if field.help_text:
result.append(str(field.help_text))
choices = getattr(field, "choices", None)
if choices and name not in ("component", "engines", "file_format"):
if result:
result.append("")
result.append("Available choices:")
for value, description in choices:
result.append("")
result.append(f"``{value}`` -- {description}".replace("\\", "\\\\"))
return result
def handle(self, *args, **options):
"""List installed add-ons."""
fake_addon = Addon(component=Component(project=Project(pk=-1), pk=-1))
for _unused, obj in sorted(ADDONS.items()):
self.stdout.write(f".. _addon-{obj.name}:")
self.stdout.write("\n")
self.stdout.write(obj.verbose)
self.stdout.write("-" * len(obj.verbose))
self.stdout.write("\n")
self.stdout.write(f":Add-on ID: ``{obj.name}``")
if obj.settings_form:
form = obj(fake_addon).get_settings_form(None)
table = [
(f"``{name}``", str(field.label), self.get_help_text(field, name))
for name, field in form.fields.items()
]
prefix = ":Configuration: "
name_width = max(len(name) for name, _label, _help_text in table)
label_width = max(len(label) for _name, label, _help_text in table)
help_text_width = max(
max(len(line) for line in help_text) if help_text else 0
for _name, _label, help_text in table
)
name_row = "-" * (name_width + 2)
label_row = "-" * (label_width + 2)
help_text_row = "-" * (help_text_width + 2)
for name, label, help_text in table:
if not prefix.isspace():
self.stdout.write(
f"{prefix}+{name_row}+{label_row}+{help_text_row}+"
)
prefix = " "
if not help_text:
line = ""
self.stdout.write(
f"{prefix}| {name:<{name_width}s} | {label:<{label_width}s} | {line:<{help_text_width}s} |"
)
for pos, line in enumerate(help_text):
if pos > 0:
name = label = ""
self.stdout.write(
f"{prefix}| {name:<{name_width}s} | {label:<{label_width}s} | {line:<{help_text_width}s} |"
)
self.stdout.write(
f"{prefix}+{name_row}+{label_row}+{help_text_row}+"
)
else:
self.stdout.write(":Configuration: `This add-on has no configuration.`")
events = ", ".join(EVENT_NAMES[event] for event in obj.events)
self.stdout.write(f":Triggers: {events}")
self.stdout.write("\n")
self.stdout.write("\n".join(wrap(obj.description, 79)))
self.stdout.write("\n") | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/code_generation/AttributeCodes.py | from nuitka import Options
from .CodeHelpers import (
decideConversionCheckNeeded,
generateChildExpressionsCode,
generateExpressionCode,
withObjectCodeTemporaryAssignment,
)
from .ErrorCodes import getErrorExitBoolCode, getErrorExitCode, getReleaseCode
from .PythonAPICodes import (
generateCAPIObjectCode,
generateCAPIObjectCode0,
makeArgDescFromExpression,
)
def generateAssignmentAttributeCode(statement, emit, context):
lookup_source = statement.subnode_expression
attribute_name = statement.getAttributeName()
value = statement.subnode_source
value_name = context.allocateTempName("assattr_value")
generateExpressionCode(
to_name=value_name, expression=value, emit=emit, context=context
)
target_name = context.allocateTempName("assattr_target")
generateExpressionCode(
to_name=target_name, expression=lookup_source, emit=emit, context=context
)
with context.withCurrentSourceCodeReference(
value.getSourceReference()
if Options.is_full_compat
else statement.getSourceReference()
):
if attribute_name == "__dict__":
getAttributeAssignmentDictSlotCode(
target_name=target_name,
value_name=value_name,
emit=emit,
context=context,
)
elif attribute_name == "__class__":
getAttributeAssignmentClassSlotCode(
target_name=target_name,
value_name=value_name,
emit=emit,
context=context,
)
else:
getAttributeAssignmentCode(
target_name=target_name,
value_name=value_name,
attribute_name=context.getConstantCode(constant=attribute_name),
emit=emit,
context=context,
)
def generateDelAttributeCode(statement, emit, context):
target_name = context.allocateTempName("attrdel_target")
generateExpressionCode(
to_name=target_name,
expression=statement.subnode_expression,
emit=emit,
context=context,
)
with context.withCurrentSourceCodeReference(
statement.subnode_expression.getSourceReference()
if Options.is_full_compat
else statement.getSourceReference()
):
getAttributeDelCode(
target_name=target_name,
attribute_name=context.getConstantCode(
constant=statement.getAttributeName()
),
emit=emit,
context=context,
)
def getAttributeLookupCode(
to_name, source_name, attribute_name, needs_check, emit, context
):
if attribute_name == "__dict__":
emit("%s = LOOKUP_ATTRIBUTE_DICT_SLOT(tstate, %s);" % (to_name, source_name))
elif attribute_name == "__class__":
emit("%s = LOOKUP_ATTRIBUTE_CLASS_SLOT(tstate, %s);" % (to_name, source_name))
else:
emit(
"%s = LOOKUP_ATTRIBUTE(tstate, %s, %s);"
% (to_name, source_name, context.getConstantCode(attribute_name))
)
getErrorExitCode(
check_name=to_name,
release_name=source_name,
needs_check=needs_check,
emit=emit,
context=context,
)
context.addCleanupTempName(to_name)
def generateAttributeLookupCode(to_name, expression, emit, context):
(source_name,) = generateChildExpressionsCode(
expression=expression,
emit=emit,
context=context,
)
attribute_name = expression.getAttributeName()
with withObjectCodeTemporaryAssignment(
to_name, "attribute_value", expression, emit, context
) as value_name:
getAttributeLookupCode(
to_name=value_name,
source_name=source_name,
attribute_name=attribute_name,
needs_check=expression.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=BaseException, attribute_name=attribute_name
),
emit=emit,
context=context,
)
def getAttributeAssignmentCode(target_name, attribute_name, value_name, emit, context):
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE(tstate, %s, %s, %s);"
% (res_name, target_name, attribute_name, value_name)
)
getErrorExitBoolCode(
condition="%s == false" % res_name,
release_names=(value_name, target_name, attribute_name),
emit=emit,
context=context,
)
def getAttributeAssignmentDictSlotCode(target_name, value_name, emit, context):
"""Code for special case target.__dict__ = value"""
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_DICT_SLOT(tstate, %s, %s);"
% (res_name, target_name, value_name)
)
getErrorExitBoolCode(
condition="%s == false" % res_name,
release_names=(value_name, target_name),
emit=emit,
context=context,
)
def getAttributeAssignmentClassSlotCode(target_name, value_name, emit, context):
"""Get code for special case target.__class__ = value"""
res_name = context.getBoolResName()
emit(
"%s = SET_ATTRIBUTE_CLASS_SLOT(tstate, %s, %s);"
% (res_name, target_name, value_name)
)
getErrorExitBoolCode(
condition="%s == false" % res_name,
release_names=(value_name, target_name),
emit=emit,
context=context,
)
def getAttributeDelCode(target_name, attribute_name, emit, context):
res_name = context.getIntResName()
emit("%s = PyObject_DelAttr(%s, %s);" % (res_name, target_name, attribute_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(target_name, attribute_name),
emit=emit,
context=context,
)
def generateAttributeLookupSpecialCode(to_name, expression, emit, context):
(source_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
attribute_name = expression.getAttributeName()
getAttributeLookupSpecialCode(
to_name=to_name,
source_name=source_name,
attr_name=context.getConstantCode(constant=attribute_name),
needs_check=expression.subnode_expression.mayRaiseExceptionAttributeLookupSpecial(
exception_type=BaseException, attribute_name=attribute_name
),
emit=emit,
context=context,
)
def getAttributeLookupSpecialCode(
to_name, source_name, attr_name, needs_check, emit, context
):
emit("%s = LOOKUP_SPECIAL(tstate, %s, %s);" % (to_name, source_name, attr_name))
getErrorExitCode(
check_name=to_name,
release_names=(source_name, attr_name),
emit=emit,
needs_check=needs_check,
context=context,
)
context.addCleanupTempName(to_name)
def generateBuiltinHasattrCode(to_name, expression, emit, context):
source_name, attr_name = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
res_name = context.getIntResName()
emit(
"%s = BUILTIN_HASATTR_BOOL(tstate, %s, %s);"
% (res_name, source_name, attr_name)
)
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_names=(source_name, attr_name),
needs_check=expression.mayRaiseException(BaseException),
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s != 0" % res_name, emit=emit
)
def generateAttributeCheckCode(to_name, expression, emit, context):
(source_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
if expression.mayRaiseExceptionOperation():
res_name = context.getIntResName()
emit(
"%s = HAS_ATTR_BOOL2(tstate, %s, %s);"
% (
res_name,
source_name,
context.getConstantCode(constant=expression.getAttributeName()),
)
)
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_name=source_name,
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s != 0" % res_name, emit=emit
)
else:
res_name = context.getBoolResName()
emit(
"%s = HAS_ATTR_BOOL(tstate, %s, %s);"
% (
res_name,
source_name,
context.getConstantCode(constant=expression.getAttributeName()),
)
)
getReleaseCode(release_name=source_name, emit=emit, context=context)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition=res_name, emit=emit
)
def generateBuiltinGetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode(
to_name=to_name,
capi="BUILTIN_GETATTR",
tstate=True,
arg_desc=makeArgDescFromExpression(expression),
may_raise=expression.mayRaiseException(BaseException),
conversion_check=decideConversionCheckNeeded(to_name, expression),
source_ref=expression.getCompatibleSourceReference(),
none_null=True,
emit=emit,
context=context,
)
def generateBuiltinSetattrCode(to_name, expression, emit, context):
generateCAPIObjectCode0(
to_name=to_name,
capi="BUILTIN_SETATTR",
tstate=False,
arg_desc=makeArgDescFromExpression(expression),
may_raise=expression.mayRaiseException(BaseException),
conversion_check=decideConversionCheckNeeded(to_name, expression),
source_ref=expression.getCompatibleSourceReference(),
emit=emit,
context=context,
) | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/lang/bn.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['bn']={"dir":"ltr","editor":"Rich Text Editor","common":{"editorHelp":"Press ALT 0 for help","browseServer":"ব্রাউজ সার্ভার","url":"URL","protocol":"প্রোটোকল","upload":"আপলোড","uploadSubmit":"ইহাকে সার্ভারে প্রেরন কর","image":"ছবির লেবেল যুক্ত কর","flash":"ফ্লাশ লেবেল যুক্ত কর","form":"ফর্ম","checkbox":"চেক বাক্স","radio":"রেডিও বাটন","textField":"টেক্সট ফীল্ড","textarea":"টেক্সট এরিয়া","hiddenField":"গুপ্ত ফীল্ড","button":"বাটন","select":"বাছাই ফীল্ড","imageButton":"ছবির বাটন","notSet":"<সেট নেই>","id":"আইডি","name":"নাম","langDir":"ভাষা লেখার দিক","langDirLtr":"বাম থেকে ডান (LTR)","langDirRtl":"ডান থেকে বাম (RTL)","langCode":"ভাষা কোড","longDescr":"URL এর লম্বা বর্ণনা","cssClass":"স্টাইল-শীট ক্লাস","advisoryTitle":"পরামর্শ শীর্ষক","cssStyle":"স্টাইল","ok":"ওকে","cancel":"বাতিল","close":"Close","preview":"প্রিভিউ","resize":"Resize","generalTab":"General","advancedTab":"এডভান্সড","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"Some of the options have been changed. Are you sure to close the dialog?","options":"Options","target":"টার্গেট","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"বাম থেকে ডান (LTR)","langDirRTL":"ডান থেকে বাম (RTL)","styles":"স্টাইল","cssClasses":"স্টাইল-শীট ক্লাস","width":"প্রস্থ","height":"দৈর্ঘ্য","align":"এলাইন","alignLeft":"বামে","alignRight":"ডানে","alignCenter":"মাঝখানে","alignTop":"উপর","alignMiddle":"মধ্য","alignBottom":"নীচে","invalidValue":"Invalid value.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"About CKEditor","help":"Check $1 for help.","moreInfo":"For licensing information please visit our web site:","title":"About CKEditor","userGuide":"CKEditor User's Guide"},"basicstyles":{"bold":"বোল্ড","italic":"ইটালিক","strike":"স্ট্রাইক থ্রু","subscript":"অধোলেখ","superscript":"অভিলেখ","underline":"আন্ডারলাইন"},"bidi":{"ltr":"Text direction from left to right","rtl":"Text direction from right to left"},"blockquote":{"toolbar":"Block Quote"},"clipboard":{"copy":"কপি","copyError":"আপনার ব্রাউজারের সুরক্ষা সেটিংস এডিটরকে অটোমেটিক কপি করার অনুমতি দেয়নি। দয়া করে এই কাজের জন্য কিবোর্ড ব্যবহার করুন (Ctrl/Cmd+C)।","cut":"কাট","cutError":"আপনার ব্রাউজারের সুরক্ষা সেটিংস এডিটরকে অটোমেটিক কাট করার অনুমতি দেয়নি। দয়া করে এই কাজের জন্য কিবোর্ড ব্যবহার করুন (Ctrl/Cmd+X)।","paste":"পেস্ট","pasteArea":"Paste Area","pasteMsg":"অনুগ্রহ করে নীচের বাক্সে কিবোর্ড ব্যবহার করে (<STRONG>Ctrl/Cmd+V</STRONG>) পেস্ট করুন এবং <STRONG>OK</STRONG> চাপ দিন","securityMsg":"Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.","title":"পেস্ট"},"colorbutton":{"auto":"অটোমেটিক","bgColorTitle":"বেকগ্রাউন্ড রং","colors":{"000":"Black","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Brown","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Gray","F00":"Red","FF8C00":"Dark Orange","FFD700":"Gold","008000":"Green","0FF":"Cyan","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White"},"more":"আরও রং...","panelTitle":"Colors","textColorTitle":"টেক্স্ট রং"},"colordialog":{"clear":"Clear","highlight":"Highlight","options":"Color Options","selected":"Selected Color","title":"Select color"},"templates":{"button":"টেমপ্লেট","emptyListMsg":"(কোন টেমপ্লেট ডিফাইন করা নেই)","insertOption":"Replace actual contents","options":"Template Options","selectPromptMsg":"অনুগ্রহ করে এডিটরে ওপেন করার জন্য টেমপ্লেট বাছাই করুন<br>(আসল কনটেন্ট হারিয়ে যাবে):","title":"কনটেন্ট টেমপ্লেট"},"contextmenu":{"options":"Context Menu Options"},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Advisory Title","cssClassInputLabel":"Stylesheet Classes","edit":"Edit Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Left to Right (LTR)","langDirLabel":"Language Direction","langDirRTLLabel":"Right to Left (RTL)","languageCodeInputLabel":" Language Code","remove":"Remove Div","styleSelectLabel":"Style","title":"Create Div Container","toolbar":"Create Div Container"},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"find":{"find":"খোজো","findOptions":"Find Options","findWhat":"যা খুঁজতে হবে:","matchCase":"কেস মিলাও","matchCyclic":"Match cyclic","matchWord":"পুরা শব্দ মেলাও","notFoundMsg":"আপনার উল্লেখিত টেকস্ট পাওয়া যায়নি","replace":"রিপ্লেস","replaceAll":"সব বদলে দাও","replaceSuccessMsg":"%1 occurrence(s) replaced.","replaceWith":"যার সাথে বদলাতে হবে:","title":"Find and Replace"},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"flash":{"access":"Script Access","accessAlways":"Always","accessNever":"Never","accessSameDomain":"Same domain","alignAbsBottom":"Abs নীচে","alignAbsMiddle":"Abs উপর","alignBaseline":"মূল রেখা","alignTextTop":"টেক্সট উপর","bgcolor":"বেকগ্রাউন্ড রং","chkFull":"Allow Fullscreen","chkLoop":"লূপ","chkMenu":"ফ্ল্যাশ মেনু এনাবল কর","chkPlay":"অটো প্লে","flashvars":"Variables for Flash","hSpace":"হরাইজন্টাল স্পেস","properties":"ফ্লাশ প্রোপার্টি","propertiesTab":"Properties","quality":"Quality","qualityAutoHigh":"Auto High","qualityAutoLow":"Auto Low","qualityBest":"Best","qualityHigh":"High","qualityLow":"Low","qualityMedium":"Medium","scale":"স্কেল","scaleAll":"সব দেখাও","scaleFit":"নিখুঁত ফিট","scaleNoBorder":"কোনো বর্ডার নেই","title":"ফ্ল্যাশ প্রোপার্টি","vSpace":"ভার্টিকেল স্পেস","validateHSpace":"HSpace must be a number.","validateSrc":"অনুগ্রহ করে URL লিংক টাইপ করুন","validateVSpace":"VSpace must be a number.","windowMode":"Window mode","windowModeOpaque":"Opaque","windowModeTransparent":"Transparent","windowModeWindow":"Window"},"font":{"fontSize":{"label":"সাইজ","voiceLabel":"Font Size","panelTitle":"সাইজ"},"label":"ফন্ট","panelTitle":"ফন্ট","voiceLabel":"ফন্ট"},"forms":{"button":{"title":"বাটন প্রোপার্টি","text":"টেক্সট (ভ্যালু)","type":"প্রকার","typeBtn":"Button","typeSbm":"Submit","typeRst":"Reset"},"checkboxAndRadio":{"checkboxTitle":"চেক বক্স প্রোপার্টি","radioTitle":"রেডিও বাটন প্রোপার্টি","value":"ভ্যালু","selected":"সিলেক্টেড"},"form":{"title":"ফর্ম প্রোপার্টি","menu":"ফর্ম প্রোপার্টি","action":"একশ্যন","method":"পদ্ধতি","encoding":"Encoding"},"hidden":{"title":"গুপ্ত ফীল্ড প্রোপার্টি","name":"নাম","value":"ভ্যালু"},"select":{"title":"বাছাই ফীল্ড প্রোপার্টি","selectInfo":"তথ্য","opAvail":"অন্যান্য বিকল্প","value":"ভ্যালু","size":"সাইজ","lines":"লাইন সমূহ","chkMulti":"একাধিক সিলেকশন এলাউ কর","opText":"টেক্সট","opValue":"ভ্যালু","btnAdd":"যুক্ত","btnModify":"বদলে দাও","btnUp":"উপর","btnDown":"নীচে","btnSetValue":"বাছাই করা ভ্যালু হিসেবে সেট কর","btnDelete":"ডিলীট"},"textarea":{"title":"টেক্সট এরিয়া প্রোপার্টি","cols":"কলাম","rows":"রো"},"textfield":{"title":"টেক্সট ফীল্ড প্রোপার্টি","name":"নাম","value":"ভ্যালু","charWidth":"ক্যারেক্টার প্রশস্ততা","maxChars":"সর্বাধিক ক্যারেক্টার","type":"টাইপ","typeText":"টেক্সট","typePass":"পাসওয়ার্ড","typeEmail":"Email","typeSearch":"Search","typeTel":"Telephone Number","typeUrl":"URL"}},"format":{"label":"ফন্ট ফরমেট","panelTitle":"ফন্ট ফরমেট","tag_address":"ঠিকানা","tag_div":"শীর্ষক (DIV)","tag_h1":"শীর্ষক ১","tag_h2":"শীর্ষক ২","tag_h3":"শীর্ষক ৩","tag_h4":"শীর্ষক ৪","tag_h5":"শীর্ষক ৫","tag_h6":"শীর্ষক ৬","tag_p":"সাধারণ","tag_pre":"ফর্মেটেড"},"horizontalrule":{"toolbar":"রেখা যুক্ত কর"},"iframe":{"border":"Show frame border","noUrl":"Please type the iframe URL","scrolling":"Enable scrollbars","title":"IFrame Properties","toolbar":"IFrame"},"image":{"alertUrl":"অনুগ্রহক করে ছবির URL টাইপ করুন","alt":"বিকল্প টেক্সট","border":"বর্ডার","btnUpload":"ইহাকে সার্ভারে প্রেরন কর","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"হরাইজন্টাল স্পেস","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"ছবির তথ্য","linkTab":"লিংক","lockRatio":"অনুপাত লক কর","menu":"ছবির প্রোপার্টি","resetSize":"সাইজ পূর্বাবস্থায় ফিরিয়ে দাও","title":"ছবির প্রোপার্টি","titleButton":"ছবি বাটন প্রোপার্টি","upload":"আপলোড","urlMissing":"Image source URL is missing.","vSpace":"ভার্টিকেল স্পেস","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"indent":{"indent":"ইনডেন্ট বাড়াও","outdent":"ইনডেন্ট কমাও"},"smiley":{"options":"Smiley Options","title":"স্মাইলী যুক্ত কর","toolbar":"স্মাইলী"},"justify":{"block":"ব্লক জাস্টিফাই","center":"মাঝ বরাবর ঘেষা","left":"বা দিকে ঘেঁষা","right":"ডান দিকে ঘেঁষা"},"link":{"acccessKey":"এক্সেস কী","advanced":"এডভান্সড","advisoryContentType":"পরামর্শ কন্টেন্টের প্রকার","advisoryTitle":"পরামর্শ শীর্ষক","anchor":{"toolbar":"নোঙ্গর","menu":"নোঙর প্রোপার্টি","title":"নোঙর প্রোপার্টি","name":"নোঙরের নাম","errorName":"নোঙরের নাম টাইপ করুন","remove":"Remove Anchor"},"anchorId":"নোঙরের আইডি দিয়ে","anchorName":"নোঙরের নাম দিয়ে","charset":"লিংক রিসোর্স ক্যারেক্টর সেট","cssClasses":"স্টাইল-শীট ক্লাস","emailAddress":"ইমেইল ঠিকানা","emailBody":"মেসেজের দেহ","emailSubject":"মেসেজের বিষয়","id":"আইডি","info":"লিংক তথ্য","langCode":"ভাষা লেখার দিক","langDir":"ভাষা লেখার দিক","langDirLTR":"বাম থেকে ডান (LTR)","langDirRTL":"ডান থেকে বাম (RTL)","menu":"লিংক সম্পাদন","name":"নাম","noAnchors":"(No anchors available in the document)","noEmail":"অনুগ্রহ করে ইমেইল এড্রেস টাইপ করুন","noUrl":"অনুগ্রহ করে URL লিংক টাইপ করুন","other":"<other>","popupDependent":"ডিপেন্ডেন্ট (Netscape)","popupFeatures":"পপআপ উইন্ডো ফীচার সমূহ","popupFullScreen":"পূর্ণ পর্দা জুড়ে (IE)","popupLeft":"বামের পজিশন","popupLocationBar":"লোকেশন বার","popupMenuBar":"মেন্যু বার","popupResizable":"Resizable","popupScrollBars":"স্ক্রল বার","popupStatusBar":"স্ট্যাটাস বার","popupToolbar":"টুল বার","popupTop":"ডানের পজিশন","rel":"Relationship","selectAnchor":"নোঙর বাছাই","styles":"স্টাইল","tabIndex":"ট্যাব ইন্ডেক্স","target":"টার্গেট","targetFrame":"<ফ্রেম>","targetFrameName":"টার্গেট ফ্রেমের নাম","targetPopup":"<পপআপ উইন্ডো>","targetPopupName":"পপআপ উইন্ডোর নাম","title":"লিংক","toAnchor":"এই পেজে নোঙর কর","toEmail":"ইমেইল","toUrl":"URL","toolbar":"লিংক যুক্ত কর","type":"লিংক প্রকার","unlink":"লিংক সরাও","upload":"আপলোড"},"list":{"bulletedlist":"বুলেট লিস্ট লেবেল","numberedlist":"সাংখ্যিক লিস্টের লেবেল"},"liststyle":{"armenian":"Armenian numbering","bulletedTitle":"Bulleted List Properties","circle":"Circle","decimal":"Decimal (1, 2, 3, etc.)","decimalLeadingZero":"Decimal leading zero (01, 02, 03, etc.)","disc":"Disc","georgian":"Georgian numbering (an, ban, gan, etc.)","lowerAlpha":"Lower Alpha (a, b, c, d, e, etc.)","lowerGreek":"Lower Greek (alpha, beta, gamma, etc.)","lowerRoman":"Lower Roman (i, ii, iii, iv, v, etc.)","none":"None","notset":"<not set>","numberedTitle":"Numbered List Properties","square":"Square","start":"Start","type":"Type","upperAlpha":"Upper Alpha (A, B, C, D, E, etc.)","upperRoman":"Upper Roman (I, II, III, IV, V, etc.)","validateStartNumber":"List start number must be a whole number."},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"newpage":{"toolbar":"নতুন পেজ"},"pagebreak":{"alt":"Page Break","toolbar":"পেজ ব্রেক"},"pastetext":{"button":"সাদা টেক্সট হিসেবে পেস্ট কর","title":"সাদা টেক্সট হিসেবে পেস্ট কর"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"পেস্ট (শব্দ)","toolbar":"পেস্ট (শব্দ)"},"preview":{"preview":"প্রিভিউ"},"print":{"toolbar":"প্রিন্ট"},"removeformat":{"toolbar":"ফরমেট সরাও"},"save":{"toolbar":"সংরক্ষন কর"},"selectall":{"toolbar":"সব সিলেক্ট কর"},"showblocks":{"toolbar":"Show Blocks"},"sourcearea":{"toolbar":"সোর্স"},"specialchar":{"options":"Special Character Options","title":"বিশেষ ক্যারেক্টার বাছাই কর","toolbar":"বিশেষ অক্ষর যুক্ত কর"},"scayt":{"about":"About SCAYT","aboutTab":"About","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Create","dic_delete":"Delete","dic_field_name":"Dictionary name","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Rename","dic_restore":"Restore","dictionariesTab":"Dictionaries","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Languages","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Ignore Words with Numbers","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"স্টাইল","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"বর্ডার সাইজ","caption":"শীর্ষক","cell":{"menu":"সেল","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"সেল মুছে দাও","merge":"সেল জোড়া দাও","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"সেল প্যাডিং","cellSpace":"সেল স্পেস","column":{"menu":"কলাম","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"কলাম মুছে দাও"},"columns":"কলাম","deleteTable":"টেবিল ডিলীট কর","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a positive number.","invalidCellSpacing":"Cell spacing must be a positive number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"টেবিল প্রোপার্টি","row":{"menu":"রো","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"রো মুছে দাও"},"rows":"রো","summary":"সারাংশ","title":"টেবিল প্রোপার্টি","toolbar":"টেবিলের লেবেল যুক্ত কর","widthPc":"শতকরা","widthPx":"পিক্সেল","widthUnit":"width unit"},"undo":{"redo":"রি-ডু","undo":"আনডু"},"wsc":{"btnIgnore":"ইগনোর কর","btnIgnoreAll":"সব ইগনোর কর","btnReplace":"বদলে দাও","btnReplaceAll":"সব বদলে দাও","btnUndo":"আন্ডু","changeTo":"এতে বদলাও","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"বানান পরীক্ষক ইনস্টল করা নেই। আপনি কি এখনই এটা ডাউনলোড করতে চান?","manyChanges":"বানান পরীক্ষা শেষ: %1 গুলো শব্দ বদলে গ্যাছে","noChanges":"বানান পরীক্ষা শেষ: কোন শব্দ পরিবর্তন করা হয়নি","noMispell":"বানান পরীক্ষা শেষ: কোন ভুল বানান পাওয়া যায়নি","noSuggestions":"- কোন সাজেশন নেই -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"শব্দকোষে নেই","oneChange":"বানান পরীক্ষা শেষ: একটি মাত্র শব্দ পরিবর্তন করা হয়েছে","progress":"বানান পরীক্ষা চলছে...","title":"Spell Check","toolbar":"বানান চেক"}}; | PypiClean |
/MSM_PELE-1.1.1-py3-none-any.whl/Helpers/system_prep.py | import os
import subprocess
import argparse
import MSM_PELE.Helpers.pele_env as pl
import MSM_PELE.constants as cs
class SystemBuilder(pl.EnviroBuilder):
def __init__(self, receptor, ligand, residue, pele_dir):
self.receptor = receptor
self.ligand = ligand
self.residue = residue
self.pele_dir = pele_dir
self.system = None if self.ligand else self.receptor
@classmethod
def build_system(cls, receptor, ligand, residue, pele_dir, output=False):
print(ligand)
SPYTHON = os.path.join(cs.SCHRODINGER, "utilities/python")
if ligand:
system = cls(receptor, ligand, residue, pele_dir)
system.lig_ref = os.path.join(pele_dir, "ligand.pdb")
subprocess.call("{} {} {} {} --mae".format(SPYTHON, __file__, system.ligand, system.lig_ref).split())
system.system = system.build_complex()
else:
system = cls(receptor, ligand, residue, pele_dir)
system.receptor, system.lig_ref = system.retrieve_receptor(output=output)
subprocess.call("{} {} {} {}".format(SPYTHON, __file__, system.lig_ref, pele_dir).split())
system.lig = "{}.mae".format(residue)
system.residue = residue
return system
def build_complex(self):
"""
From the receptor and ligand in pdb build
another pdb with the whole complex
"""
complex_content = []
name = os.path.basename(os.path.splitext(self.receptor)[0])
self.complex = os.path.join(self.pele_dir, "{}_complex.pdb".format(name))
with open(self.receptor, 'r') as pdb_file:
receptor_text = [line for line in pdb_file if line.startswith("ATOM") or line.startswith("HETATM") or line.startswith("TER")]
with open(self.lig_ref, 'r') as pdb_file:
ligand_text = [line for line in pdb_file if line.startswith("HETATM")]
if not receptor_text or not ligand_text:
raise ValueError("The ligand_pdb was not properly created check your mae file")
complex_content.extend(receptor_text + ["TER\n"] + ligand_text + ["END"])
with open(self.complex, 'w') as fout:
fout.write("".join(complex_content))
return self.complex
def convert_mae(self):
"""
Desciption: From each structure retrieve
a .mae file of the ligand in the receptor.
Output:
structure_mae: ligand
res = residue
"""
for structure in st.StructureReader(self.lig_ref):
for residue in structure.residue:
res = residue.pdbres.strip()
str_name = "{}".format(res)
try:
structure.write(str_name + ".mae")
except ValueError:
str_name = "{}".format(res)
finally:
structure.write(str_name + ".mae")
structure_mae = "{}.mae".format(str_name)
return structure_mae, res
def retrieve_receptor(self, output=False):
"""
This function returns receptor of the complex of interest.
:param complex: system format pdb
:output: receptor text
"""
ligand = output if output else os.path.join(self.pele_dir, "ligand.pdb")
with open(self.receptor, 'r') as pdb_file:
receptor_text = [line for line in pdb_file if line.startswith("ATOM")]
with open(self.receptor, 'r') as pdb_file:
ligand_text = [line for line in pdb_file if line[17:20].strip() == self.residue]
if not receptor_text or not ligand_text:
raise ValueError("Something went wrong when extracting the ligand. Check residue&Chain on input")
with open(ligand, "w") as fout:
fout.write("".join(ligand_text))
return "".join(receptor_text), ligand
def convert_pdb(mae_file, output_dir):
from schrodinger import structure as st
for structure in st.StructureReader(mae_file):
structure.write(output_dir)
def convert_mae(pdb):
"""
Desciption: From each structure retrieve
a .mae file of the ligand in the receptor.
Output:
structure_mae: ligand
res = residue
"""
from schrodinger import structure as st
for structure in st.StructureReader(pdb):
for residue in structure.residue:
res = residue.pdbres.strip()
str_name = "{}".format(res)
try:
structure.write(str_name + ".mae")
except ValueError:
str_name = "{}".format(res)
finally:
structure.write(str_name + ".mae")
structure_mae = "{}.mae".format(str_name)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ligand", help="ligand input file to convert")
parser.add_argument("output_dir", help="output directory to dump the converted file")
parser.add_argument("--mae", action="store_true", help="Whether to convert to mae (--mae) or pdb (not --mae)")
args = parser.parse_args()
return args.ligand, args.output_dir, args.mae
if __name__ == "__main__":
input_file, output_dir, ligand_mae = parse_args()
print(input_file)
if ligand_mae:
convert_pdb(input_file, output_dir)
else:
convert_mae(input_file) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/editor/plugins/ShowBlockNodes.js.uncompressed.js | define("dojox/editor/plugins/ShowBlockNodes", [
"dojo",
"dijit",
"dojox",
"dijit/_editor/_Plugin",
"dijit/form/Button",
"dijit/form/ToggleButton",
"dojo/_base/connect",
"dojo/_base/declare",
"dojo/i18n",
"dojo/i18n!dojox/editor/plugins/nls/ShowBlockNodes"
], function(dojo, dijit, dojox) {
dojo.declare("dojox.editor.plugins.ShowBlockNodes",dijit._editor._Plugin,{
// summary:
// This plugin provides ShowBlockNodes cabability to the editor. When
// clicked, the document in the editor will apply a class to specific
// block nodes to make them visible in the layout. This info is not
// exposed/extracted when the editor value is obtained, it is purely for help
// while working on the page.
// useDefaultCommand [protected] boolean
// Over-ride indicating that the command processing is done all by this plugin.
useDefaultCommand: false,
// iconClassPrefix: [const] String
// The CSS class name for the button node is formed from `iconClassPrefix` and `command`
iconClassPrefix: "dijitAdditionalEditorIcon",
// _styled [private] boolean
// Flag indicating the document has had the style updates applied.
_styled: false,
_initButton: function(){
// summary:
// Over-ride for creation of the preview button.
var strings = dojo.i18n.getLocalization("dojox.editor.plugins", "ShowBlockNodes");
this.button = new dijit.form.ToggleButton({
label: strings["showBlockNodes"],
showLabel: false,
iconClass: this.iconClassPrefix + " " + this.iconClassPrefix + "ShowBlockNodes",
tabIndex: "-1",
onChange: dojo.hitch(this, "_showBlocks")
});
this.editor.addKeyHandler(dojo.keys.F9, true, true, dojo.hitch(this, this.toggle));
},
updateState: function(){
// summary:
// Over-ride for button state control for disabled to work.
this.button.set("disabled", this.get("disabled"));
},
setEditor: function(editor){
// summary:
// Over-ride for the setting of the editor.
// editor: Object
// The editor to configure for this plugin to use.
this.editor = editor;
this._initButton();
},
toggle: function(){
// summary:
// Function to allow programmatic toggling of the view.
this.button.set("checked", !this.button.get("checked"));
},
_showBlocks: function(show){
// summary:
// Function to trigger printing of the editor document
// tags:
// private
var doc = this.editor.document;
if(!this._styled){
try{
//Attempt to inject our specialized style rules for doing this.
this._styled = true;
var style = "";
var blocks = ["div", "p", "ul", "ol", "table", "h1",
"h2", "h3", "h4", "h5", "h6", "pre", "dir", "center",
"blockquote", "form", "fieldset", "address", "object",
"pre", "hr", "ins", "noscript", "li", "map", "button",
"dd", "dt"];
var template = "@media screen {\n" +
"\t.editorShowBlocks {TAG} {\n" +
"\t\tbackground-image: url({MODURL}/images/blockelems/{TAG}.gif);\n" +
"\t\tbackground-repeat: no-repeat;\n" +
"\t\tbackground-position: top left;\n" +
"\t\tborder-width: 1px;\n" +
"\t\tborder-style: dashed;\n" +
"\t\tborder-color: #D0D0D0;\n" +
"\t\tpadding-top: 15px;\n" +
"\t\tpadding-left: 15px;\n" +
"\t}\n" +
"}\n";
dojo.forEach(blocks, function(tag){
style += template.replace(/\{TAG\}/gi, tag);
});
//Finally associate in the image locations based off the module url.
var modurl = dojo.moduleUrl(dojox._scopeName, "editor/plugins/resources").toString();
if(!(modurl.match(/^https?:\/\//i)) &&
!(modurl.match(/^file:\/\//i))){
// We have to root it to the page location on webkit for some nutball reason.
// Probably has to do with how iframe was loaded.
var bUrl;
if(modurl.charAt(0) === "/"){
//Absolute path on the server, so lets handle...
var proto = dojo.doc.location.protocol;
var hostn = dojo.doc.location.host;
bUrl = proto + "//" + hostn;
}else{
bUrl = this._calcBaseUrl(dojo.global.location.href);
}
if(bUrl[bUrl.length - 1] !== "/" && modurl.charAt(0) !== "/"){
bUrl += "/";
}
modurl = bUrl + modurl;
}
// Update all the urls.
style = style.replace(/\{MODURL\}/gi, modurl);
if(!dojo.isIE){
var sNode = doc.createElement("style");
sNode.appendChild(doc.createTextNode(style));
doc.getElementsByTagName("head")[0].appendChild(sNode);
}else{
var ss = doc.createStyleSheet("");
ss.cssText = style;
}
}catch(e){
console.warn(e);
}
}
// Apply/remove the classes based on state.
if(show){
dojo.addClass(this.editor.editNode, "editorShowBlocks");
}else{
dojo.removeClass(this.editor.editNode, "editorShowBlocks");
}
},
_calcBaseUrl: function(fullUrl) {
// summary:
// Internal function used to figure out the full root url (no relatives)
// for loading images in the styles in the iframe.
// fullUrl: String
// The full url to tear down to the base.
// tags:
// private
var baseUrl = null;
if (fullUrl !== null) {
// Check to see if we need to strip off any query parameters from the Url.
var index = fullUrl.indexOf("?");
if (index != -1) {
fullUrl = fullUrl.substring(0,index);
}
// Now we need to trim if necessary. If it ends in /, then we don't
// have a filename to trim off so we can return.
index = fullUrl.lastIndexOf("/");
if (index > 0 && index < fullUrl.length) {
baseUrl = fullUrl.substring(0,index);
}else{
baseUrl = fullUrl;
}
}
return baseUrl; //String
}
});
// Register this plugin.
dojo.subscribe(dijit._scopeName + ".Editor.getPlugin",null,function(o){
if(o.plugin){ return; }
var name = o.args.name.toLowerCase();
if(name === "showblocknodes"){
o.plugin = new dojox.editor.plugins.ShowBlockNodes();
}
});
return dojox.editor.plugins.ShowBlockNodes;
}); | PypiClean |
/ConferenceCorpus-0.1.1.tar.gz/ConferenceCorpus-0.1.1/corpus/datasources/confref.py | import html
import re
import os
import json
from corpus.event import Event,EventSeries,EventManager,EventSeriesManager
from corpus.eventcorpus import EventDataSourceConfig,EventDataSource
from lodstorage.storageconfig import StorageConfig
from lodstorage.sql import SQLDB
from ptp.ordinal import Ordinal
class Confref(EventDataSource):
'''
ConfRef platform
'''
sourceConfig=EventDataSourceConfig(lookupId="confref",name="confref.org",url="http://portal.confref.org",title="ConfRef",tableSuffix="confref",locationAttribute="location")
def __init__(self):
'''
construct me
'''
super().__init__(ConfrefEventManager(),ConfrefEventSeriesManager(),Confref.sourceConfig)
@staticmethod
def htmlUnEscapeDict(htmlDict:dict):
'''
perform html unescaping on the given dict
Args:
htmlDict(dict): the dictionary to unescape
'''
for key in htmlDict:
value=htmlDict[key]
if value is not None and type(value) is str:
value=html.unescape(value)
htmlDict[key]=value
class ConfrefEvent(Event):
'''
a scientific event derived from Confref
'''
@staticmethod
def postProcessLodRecord(rawEvent:dict):
'''
fix the given raw Event
Args:
rawEvent(dict): the raw event record to fix
'''
Confref.htmlUnEscapeDict(rawEvent)
eventId=rawEvent.pop('id')
# rename number to ordinal
if 'number' in rawEvent:
rawEvent['ordinal']=rawEvent.pop('number')
# handle area and confSeries dicts
_area=rawEvent.pop('area')
if isinstance(_area,dict):
Confref.htmlUnEscapeDict(_area)
rawEvent["area"]=_area["value"]
pass
_confSeries=rawEvent.pop('confSeries')
if isinstance(_confSeries,dict):
# dict:
# {'id': 'btw',
# 'issn': None,
# 'eissn': None,
# 'dblpId': 'https://dblp.org/db/conf/btw/',
# 'name': 'Datenbanksysteme für Business, Technologie und Web Datenbanksysteme in Büro, Technik und Wissenschaft',
# 'description': None
# }
#
Confref.htmlUnEscapeDict(_confSeries)
dblpSeriesId=_confSeries["dblpId"]
if dblpSeriesId is not None:
m=re.match("https://dblp.org/db/(.*)/",dblpSeriesId)
if m:
dblpSeriesId=m.group(1)
rawEvent['dblpSeriesId']=dblpSeriesId
rawEvent['seriesId']=_confSeries["id"]
rawEvent['seriesTitle']=_confSeries["name"]
rawEvent['seriesIssn']=_confSeries["issn"]
rawEvent['seriesEissn']=_confSeries["eissn"]
rawEvent['eventId']=eventId
rawEvent['url']=f'http://portal.confref.org/list/{eventId}'
rawEvent['title']=rawEvent.pop('name')
rawEvent["source"]="confref"
location=None
if "city" in rawEvent and "country" in rawEvent:
location=f"""{rawEvent["city"]},{rawEvent["country"]}"""
rawEvent["location"]=location
Ordinal.addParsedOrdinal(rawEvent)
pass
def fromDict(self,rawEvent:dict):
'''
get me from the given dict
'''
super().fromDict(rawEvent)
class ConfrefEventSeries(Event):
'''
a scientific event series derived from Confref
'''
class ConfrefEventManager(EventManager):
'''
Crossref event manager
'''
def __init__(self, config: StorageConfig = None):
'''
Constructor
'''
super().__init__(name="ConfrefEvents", sourceConfig=Confref.sourceConfig, clazz=ConfrefEvent, config=config)
def configure(self):
'''
configure me
'''
# nothing to do - there is a get ListOfDicts below
def getListOfDicts(self):
'''
get my content from the json file
'''
cachePath=self.config.getCachePath()
jsondir=f"{cachePath}/confref"
if not os.path.exists(jsondir):
os.makedirs(jsondir)
self.jsonFilePath=f"{jsondir}/confref-conferences.json"
with open(self.jsonFilePath) as jsonFile:
rawEvents=json.load(jsonFile)
lod=[]
for rawEvent in rawEvents:
lod.append(rawEvent)
self.postProcessLodRecords(lod)
return lod
class ConfrefEventSeriesManager(EventSeriesManager):
'''
Confref event series handling
'''
def __init__(self,config:StorageConfig=None):
'''
Constructor
'''
super().__init__(name="ConfrefEventSeries", sourceConfig=Confref.sourceConfig,clazz=ConfrefEventSeries,config=config)
def configure(self):
'''
configure me
'''
# nothing to do getListOfDicts is defined
def getListOfDicts(self):
'''
get my data
'''
query="""select dblpSeriesId as eventSeriesId,acronym,seriesTitle as title,count(*) as count,min(year) as minYear,max(year) as maxYear
from event_confref
where dblpSeriesId is not Null
group by dblpSeriesId"""
sqlDB=SQLDB(self.getCacheFile())
listOfDicts=sqlDB.query(query)
self.setAllAttr(listOfDicts, "source", "confref")
self.postProcessLodRecords(listOfDicts)
return listOfDicts | PypiClean |
/MTGProxyPrinter-0.25.0.tar.gz/MTGProxyPrinter-0.25.0/mtg_proxy_printer/document_controller/page_actions.py |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import functools
import typing
if typing.TYPE_CHECKING:
from mtg_proxy_printer.model.document import Document
from mtg_proxy_printer.model.document_page import Page
from ._interface import DocumentAction, IllegalStateError, Self
from mtg_proxy_printer.logger import get_logger
logger = get_logger(__name__)
del get_logger
__all__ = [
"ActionNewPage",
"ActionRemovePage",
]
class ActionNewPage(DocumentAction):
"""
Insert count new, empty pages at the given index. Positions are clamped into the range [0, page_count].
If given None for the position, append the page to the document end instead. Page count defaults to 1.
"""
COMPARISON_ATTRIBUTES = ["position", "count"]
def __init__(self, position: int = None, *, count: int = 1):
self.position = position
self.count = count
def apply(self, document: "Document") -> Self:
self.position = document.rowCount() if self.position is None \
else max(0, min(self.position, document.rowCount()))
document.beginInsertRows(document.INVALID_INDEX, self.position, self.position+self.count-1)
if self.position == document.rowCount():
for _ in range(self.count):
new_page = Page()
document.pages.append(new_page)
document.page_index_cache[id(new_page)] = len(document.pages) - 1
else:
for _ in range(self.count):
document.pages.insert(self.position, Page())
document.recreate_page_index_cache()
document.endInsertRows()
return super().apply(document)
def undo(self, document: "Document") -> Self:
if self.position is None:
raise IllegalStateError("Page position not set")
ActionRemovePage(self.position, self.count).apply(document)
return self
@functools.cached_property
def as_str(self):
if self.count == 1:
return f"Add page {self.position+1}"
return f"Add pages {self.position+1}-{self.position+self.count}"
class ActionRemovePage(DocumentAction):
"""
Delete count pages starting at the given index.
If position is None, start deleting at the current page instead.
"""
COMPARISON_ATTRIBUTES = ["position", "count", "removed_all_pages", "currently_edited_page", "removed_pages"]
def __init__(self, position: int = None, count: int = 1):
self.position = position
self.count = count
self.removed_pages: typing.List[Page] = []
self.currently_edited_page = None # Set, if the currently edited page is removed
self.removed_all_pages: bool = False
def apply(self, document: "Document") -> Self:
self.position = first_index = self.position if self.position is not None \
else document.find_page_list_index(document.currently_edited_page)
last_index = first_index + self.count - 1
logger.debug(f"Removing pages {first_index} to {last_index}. {document.rowCount()=}")
self.removed_pages[:] = document.pages[first_index:last_index+1]
# Note: Can not use "currently_edited_page in removed_pages", because the in operator does not check for
# object identity, which is required here.
currently_edited_page_removed = \
first_index <= document.find_page_list_index(document.currently_edited_page) <= last_index
if currently_edited_page_removed:
self.currently_edited_page = document.currently_edited_page
document.beginRemoveRows(document.INVALID_INDEX, first_index, last_index)
del document.pages[first_index:last_index+1]
document.recreate_page_index_cache()
document.endRemoveRows()
if not document.pages:
self.removed_all_pages = True
ActionNewPage().apply(document)
document.set_currently_edited_page(document.pages[0])
elif currently_edited_page_removed:
newly_selected_page = min(first_index, document.rowCount()-1)
logger.debug(f"Currently edited page is removed, switching to page {newly_selected_page}")
# Since the page list is non-empty, there is always a page to select.
# Choose the first after the removed range or the last, whichever comes first.
document.set_currently_edited_page(document.pages[newly_selected_page])
return self
def undo(self, document: "Document") -> Self:
start = self.position
if start is None:
raise IllegalStateError("Cannot undo page removal without location to restore")
end = start + len(self.removed_pages) - 1
document.beginInsertRows(document.INVALID_INDEX, start, end)
if start == document.rowCount():
self._append_pages(document, start)
else:
self._insert_pages(document, start)
document.endInsertRows()
if self.currently_edited_page is not None:
document.set_currently_edited_page(self.currently_edited_page)
if self.removed_all_pages:
# The Action replaced the whole document with an empty page during apply().
# To undo the creation of the empty replacement page, delete the now obsolete page
page_to_remove = end + 1
document.beginRemoveRows(document.INVALID_INDEX, page_to_remove, page_to_remove)
del document.page_index_cache[id(document.pages[page_to_remove])]
del document.pages[page_to_remove]
document.endRemoveRows()
# Clear state gathered during apply()
self.removed_pages.clear()
self.currently_edited_page = None
self.removed_all_pages = False
return self
def _append_pages(self, document: "Document", start: int):
document.pages += self.removed_pages
document.page_index_cache.update(
(id(page), index) for index, page in enumerate(self.removed_pages, start=start)
)
def _insert_pages(self, document: "Document", start: int):
for index, page in enumerate(self.removed_pages, start=start):
document.pages.insert(index, page)
document.recreate_page_index_cache()
@functools.cached_property
def as_str(self):
cards_removed = sum(map(len, self.removed_pages))
if self.count == 1:
return f"Remove page {self.position+1} containing {cards_removed} cards"
return f"Remove pages {self.position+1}-{self.position+self.count} containing {cards_removed} cards total" | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py | _base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) | PypiClean |
/GloboNetworkAPI-0.9.6.tar.gz/GloboNetworkAPI-0.9.6/networkapiclient/ApiObjectType.py | from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiObjectType(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiObjectType, self).__init__(
networkapi_url,
user,
password,
user_ldap
)
def search(self, **kwargs):
"""
Method to search object types based on extends search.
:param search: Dict containing QuerySets to find object types.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing object types
"""
return super(ApiObjectType, self).get(self.prepare_url('api/v3/object-type/',
kwargs))
def get(self, ids, **kwargs):
"""
Method to get object types by their ids
:param ids: List containing identifiers of object types
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing object types
"""
url = build_uri_with_ids('api/v3/object-type/%s/', ids)
return super(ApiObjectType, self).get(self.prepare_url(url, kwargs)) | PypiClean |
/HallPy_Teach-0.1.7.tar.gz/HallPy_Teach-0.1.7/src/HallPy_Teach/__init__.py | import re
import pyvisa
from .experiments import curieWeiss, hallEffect
from IPython.core.display import display
from IPython.display import clear_output
import ipywidgets as widgets
from .constants import supportedInstruments, serialRegex
from .helper import reconnectInstructions, getInstTypeCount, filterArrByKey
allExperiments = [
curieWeiss,
hallEffect,
]
def initInstruments(inGui: bool = False):
"""Initializing and recognising connected equipment.
Function does the setup for any of the experiments which use this HallPy_Teach. It recognises the connected
instruments and provides the instruments in the form of the `inst` object. It also classifies the equipment by their
uses depending on the manufacturer & model. Equipment is queried using the pyvisa library (`inst.query("*IDN?")`).
The list of supported instruments is in the constants' module (mentioned in the See Also section).
Parameters
----------
inGui: bool, default=False
Bool to check if gui is being used (if using Setup() the whole experiment setup process is done via GUI)
See Also
--------
+ constants.supportedEquipment : Used to classify instrument
+ Setup() : Used to use library with GUI in Jupyter Notebook / Lab
Returns
-------
list[object]
Array of objects containing information about the connected instruments
Examples
--------
Example of 2 found instruments:
[
{
'inst': USBInstrument, #PyVisa Object: to be used to communicate with instrument eg.:
multimeter['inst'].query('*IDN?')
'name': 'KEITHLEY INSTRUMENTS INC.,MODEL 2110,8014885,02.03-03-20', #String: Name of instrument from
inst.query('*IDN?')
'resName': 'USB0::0x5E6::0x2110::8014885::INSTR', #String: Name of instrument USB resource
'type': 'Multimeter' #Strign: Type of instrument. other types: 'LCR Meter', 'Power Supply'
},
{
'inst': SerialInstrument, #PyVisa Object
'name': 'B&K Precision ,891,468L20200...', #String
'resName': 'ASLR::INSTR', #String
'type': 'LCR Meter' #String
}
]
"""
rm = pyvisa.ResourceManager()
resList = rm.list_resources()
instruments = []
# Looping through all connected USB devices to look for usable instruments
for res in resList:
try:
# Initiating communication with instrument
instResource = rm.open_resource(res)
# Getting instrument name - if successful, it is supported by PyVisa and is an Instrument not just
# another USB device
name = instResource.query("*IDN?")
# Creating the instrument object to be used in the rest of the library
inst = {
"inst": instResource,
"name": name,
"resName": res
}
# Defining instrument type (see supported instruments in hp.constants.supportedInstruments)
for instrumentType in supportedInstruments.keys():
for supportedInstrumentName in supportedInstruments[instrumentType]:
if supportedInstrumentName in name:
inst['type'] = instrumentType
# Defining instrument type as Unknown if instrument cannot be classified
if len(inst.keys()) == 3:
inst["type"] = "Unknown"
# Adding instrument to the list of all instruments usable by HallPy_Teach_uofgPhys
instruments.append(inst)
# Error indicates that the USB device is incompatible with PyVisa
except pyvisa.VisaIOError:
pass
finally:
pass
# Getting instrument count by instrument type
instTypeCount = getInstTypeCount(instruments)
# Raising error if no instruments are connected.
if all(instrumentCount == 0 for instrumentCount in instTypeCount.values()):
print("\x1b[;43m No instruments could be recognised / contacted \x1b[m")
print('')
reconnectInstructions(inGui)
raise Exception("No instruments could be recognised / contacted")
else:
# Showing connected instruments to user
countStr = ""
for instrumentType in instTypeCount.keys():
if instTypeCount[instrumentType] != 0:
countStr = countStr + str(instTypeCount[instrumentType]) + " " + instrumentType + "(s) "
print(countStr)
print('')
reconnectInstructions(inGui)
# Returning array of instruments : See documentation at the start of the function.
return instruments
# noinspection PyUnusedLocal
class Setup:
"""Setting up instruments with GUI in jupyter python.
Class uses initInstruments() and individual experiments setup functions to set up the instruments for performing
the selected experiment. Subsequently, user will have to use classInstance.expInsts object in the doExpeiment()
function to perform the given experiment.
See Also
--------
+ initInstruments() : Setup class uses this function to find all connected instruments
+ hallEffect.doExperiment() : Used after Setup() class initiation - Example doExperiment() function
+ hallEffect.setup() : Used in the Setup() class to set up selected experiment from the GUI
Notes
-------
Use classInstanceName.expInsts in doExperiment() function to perform given experiment.
Example
------
In jupyter python:
>>> import HallPy_Teach as Teach
>>> exp = Teach.Setup()
>>> data = exp.doExperiment(exp.expInsts)
Same as doing the following:
>>> import HallPy_Teach.experiments.hallEffect as Experiment
>>> import HallPy_Teach as Teach
>>> insts = Teach.initInstruments()
>>> expInsts = Experiment.setup(insts)
>>> data = Experiment.doExperiment(expInsts)
"""
def __init__(self, btn=None):
# Getting all experiments in the library
expChoices = []
for experiment in allExperiments:
expChoices.append((experiment.expName, experiment))
# Setting up UI buttons and dropdowns for later use
self.restartSetupBtn = widgets.Button(
description="Restart Setup",
icon="play",
disabled=True
)
self.pickExpDropdown = widgets.Dropdown(options=expChoices, disabled=False)
self.submitBtn = widgets.Button(description="Setup Experiment", icon="flask")
self.submitBtn.on_click(self.handle_pickExpSubmit)
# Objects and functions to be used after class instance is set up
self.expInsts = None
self.doExperiment = None
clear_output()
self.instruments = initInstruments(inGui=True)
# Getting user input for experiment choice
print(" ")
print("Choose experiment to perform")
# noinspection PyTypeChecker
display(widgets.VBox([self.pickExpDropdown, self.submitBtn]))
# Getting serial assignment : what instrument is performing what function based on requiredInstruments object
# defined in the experiment file
def getUserSerialAssignment(self, expSetupFunc, expReq, availableInsts, expName):
serials = {}
serialDropdownsByType = {}
assignSerialsBtn = widgets.Button(
description="Assign Instruments",
icon="tachometer"
)
for instType in expReq.keys():
serialDropdownsByType[instType] = {}
if len(expReq[instType]) > 1:
print("Assign", instType + "(s)")
availableSerials = []
for inst in filterArrByKey(availableInsts, "type", instType):
regex = ""
for instPartialName in serialRegex.keys():
if instPartialName in inst["name"]:
regex = serialRegex[instPartialName]
if regex == "":
raise Exception("Regular expression not defined for given instrument")
serial = re.search(regex, inst["name"]).group()
availableSerials.append((serial, serial))
for neededInst in expReq[instType]:
instSerialDropdown = widgets.Dropdown(
description=neededInst["purpose"],
options=availableSerials
)
serialDropdownsByType[instType][neededInst["var"]] = instSerialDropdown
# noinspection PyTypeChecker
display(widgets.VBox(list(serialDropdownsByType[instType].values())))
def handle_submitSerials(assignSerialsButton):
for dropdownInstType in serialDropdownsByType.keys():
for instNeededVar in serialDropdownsByType[dropdownInstType].keys():
serials[instNeededVar] = serialDropdownsByType[dropdownInstType][instNeededVar].value
doExecAssignment = True
for singleSerial in serials.values():
if list(serials.values()).count(singleSerial) > 1:
print("\x1b[;43m You cannot pick the same device for more than one purpose \x1b[m ")
doExecAssignment = False
break
if doExecAssignment:
clear_output()
self.expInsts = self.assignInstsAndSetupExp(
expSetupFunc=expSetupFunc,
expReq=expReq,
availableInsts=availableInsts,
expName=expName,
pickedSerials=serials
)
return self.expInsts
assignSerialsBtn.on_click(handle_submitSerials)
display(assignSerialsBtn)
# performing the experiment.setup() function for selected experiment.
def assignInstsAndSetupExp(self, expSetupFunc, expReq, availableInsts, expName, pickedSerials=None):
if pickedSerials is None:
pickedSerials = {}
try:
# If serials are assigned, setting up experiment instruments with serials
if len(pickedSerials.keys()) > 0:
expInsts = expSetupFunc(instruments=availableInsts, serials=pickedSerials, inGui=True)
else:
expInsts = expSetupFunc(instruments=availableInsts, inGui=True)
return expInsts
except Exception as errMsg:
errMsg = str(errMsg).lower()
# if experiment requires multiple of one type of instrument getting serial assignment from user
if "missing serial" in errMsg:
self.getUserSerialAssignment(
expSetupFunc=expSetupFunc,
expReq=expReq,
availableInsts=availableInsts,
expName=expName
)
# Checking if error is for missing required instruments.
elif "connected" in errMsg:
print('')
print("All instruments required for", expName)
for reqInstType in expReq.keys():
for inst in expReq[reqInstType]:
print(" -", reqInstType, "for", inst['purpose'], "measurement")
print('')
reconnectInstructions(inGui=True)
self.restartSetupBtn.disabled = False
self.restartSetupBtn.on_click(Setup)
# noinspection PyTypeChecker
display(widgets.VBox([self.restartSetupBtn]))
# Raising all other errors
else:
raise
# Submit handler after picking experiment.
def handle_pickExpSubmit(self, submitBtnAfterClick=None):
clear_output()
expSetupFunc = self.pickExpDropdown.value.setup
expReq = self.pickExpDropdown.value.requiredEquipment
self.doExperiment = self.pickExpDropdown.value.doExperiment
expName = self.pickExpDropdown.label
self.pickExpDropdown.close = True
submitBtnAfterClick.close = True
try:
self.expInsts = self.assignInstsAndSetupExp(
expName=expName,
expSetupFunc=expSetupFunc,
expReq=expReq,
availableInsts=self.instruments
)
return self.expInsts
except Exception as errMsg:
self.restartSetupBtn.on_click(Setup)
self.restartSetupBtn.disabled = False
print(errMsg)
# noinspection PyTypeChecker
display(widgets.VBox([self.restartSetupBtn])) | PypiClean |
/CHSPy-1.2.2.tar.gz/CHSPy-1.2.2/docs/index.rst | CHSPy (Cubic Hermite Splines for Python)
========================================
This module provides Python tools for `cubic Hermite splines <https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_ with one argument (time) and multiple values (:math:`ℝ→ℝ^n`).
It was branched of from `JiTCDDE <http://github.com/neurophysik/jitcdde>`_, which uses it for representing the past of a delay differential equation.
CHSPy is not optimised for efficiency, however it should be fairly effective for high-dimensionally valued splines.
Each spline (`CubicHermiteSpline`) is stored as a series of *anchors* (using the `Anchor` class) each of which contains:
* a time point (`time`),
* an :math:`n`-dimensional state (`state`),
* an :math:`n`-dimensional temporal derivative (`diff`).
Between such anchors, the spline is uniquely described by a polynomial of third degree. With other words, the spline is a piecewise cubic Hermite interpolant of its anchors.
Example
-------
The following example implements a simple three-dimensional spline with three anchors (at :math:`t=0`, :math:`t=1`, and :math:`t=4`) and plots it.
.. plot:: ../examples/simple_example.py
:include-source:
The markers depict the anchors.
Note how the slope at the anchors is zero for Components 0 and 2 (which is how we defined the spline), while it isn’t for Component 1.
Command Reference
-----------------
.. automodule:: _chspy
:members:
| PypiClean |
/GoogleAppEnginePipeline-1.9.22.1.tar.gz/GoogleAppEnginePipeline-1.9.22.1/pipeline/ui/jquery.json.min.js | (function($){$.toJSON=function(o)
{if(typeof(JSON)=='object'&&JSON.stringify)
return JSON.stringify(o);var type=typeof(o);if(o===null)
return"null";if(type=="undefined")
return undefined;if(type=="number"||type=="boolean")
return o+"";if(type=="string")
return $.quoteString(o);if(type=='object')
{if(typeof o.toJSON=="function")
return $.toJSON(o.toJSON());if(o.constructor===Date)
{var month=o.getUTCMonth()+1;if(month<10)month='0'+month;var day=o.getUTCDate();if(day<10)day='0'+day;var year=o.getUTCFullYear();var hours=o.getUTCHours();if(hours<10)hours='0'+hours;var minutes=o.getUTCMinutes();if(minutes<10)minutes='0'+minutes;var seconds=o.getUTCSeconds();if(seconds<10)seconds='0'+seconds;var milli=o.getUTCMilliseconds();if(milli<100)milli='0'+milli;if(milli<10)milli='0'+milli;return'"'+year+'-'+month+'-'+day+'T'+
hours+':'+minutes+':'+seconds+'.'+milli+'Z"';}
if(o.constructor===Array)
{var ret=[];for(var i=0;i<o.length;i++)
ret.push($.toJSON(o[i])||"null");return"["+ret.join(",")+"]";}
var pairs=[];for(var k in o){var name;var type=typeof k;if(type=="number")
name='"'+k+'"';else if(type=="string")
name=$.quoteString(k);else
continue;if(typeof o[k]=="function")
continue;var val=$.toJSON(o[k]);pairs.push(name+":"+val);}
return"{"+pairs.join(", ")+"}";}};$.evalJSON=function(src)
{if(typeof(JSON)=='object'&&JSON.parse)
return JSON.parse(src);return eval("("+src+")");};$.secureEvalJSON=function(src)
{if(typeof(JSON)=='object'&&JSON.parse)
return JSON.parse(src);var filtered=src;filtered=filtered.replace(/\\["\\\/bfnrtu]/g,'@');filtered=filtered.replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,']');filtered=filtered.replace(/(?:^|:|,)(?:\s*\[)+/g,'');if(/^[\],:{}\s]*$/.test(filtered))
return eval("("+src+")");else
throw new SyntaxError("Error parsing JSON, source is not valid.");};$.quoteString=function(string)
{if(string.match(_escapeable))
{return'"'+string.replace(_escapeable,function(a)
{var c=_meta[a];if(typeof c==='string')return c;c=a.charCodeAt();return'\\u00'+Math.floor(c/16).toString(16)+(c%16).toString(16);})+'"';}
return'"'+string+'"';};var _escapeable=/["\\\x00-\x1f\x7f-\x9f]/g;var _meta={'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','"':'\\"','\\':'\\\\'};})(jQuery); | PypiClean |
/Ardy-0.0.6.tar.gz/Ardy-0.0.6/ardy/config/__init__.py | from __future__ import unicode_literals, print_function
import json
import os
from abc import ABCMeta
from ardy.config.exceptions import ArdyRequiredKeyError
from ardy.core.exceptions import ArdyLambdaNotExistsError
from ardy.core.exceptions import ArdyNoFileError, ArdyNoDirError, ArdyEnvironmentNotExistsError
from ardy.utils.log import logger
class BaseConfig(dict):
environment = None
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
super(BaseConfig, self).__init__(**kwargs)
def __getattr__(self, name, *args, **kwargs):
return self[name]
def __getitem__(self, key):
val = dict.__getitem__(self, key)
return val
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
def __repr__(self):
dictrepr = dict.__repr__(self)
return '%s(%s)' % (type(self).__name__, dictrepr)
def set_environment(self, environment=False):
logger.debug("Setting environment {}".format(environment))
self.environment = environment
def get_environment(self):
return self.environment
def update(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
self[k] = v
class GlobalConfig(BaseConfig):
"""Create the configuration needed to deploy a group of AWS lambda functions
"""
_DEFAULT_CONFIG_FILE_NAME = "config.json"
_REQUIRED_LAMBDAS_KEY = "lambdas"
_REQUIRED_KEYS = ("lambdas",)
deploy_environments = False
project_dir = ""
project_filename = ""
def __init__(self, *args, **kwargs):
super(GlobalConfig, self).__init__(*args, **kwargs)
self.set_projectdir(kwargs.get("path", False))
self.set_project_config_filename(kwargs.get("filename", False))
self._set_conf_from_file(environment=kwargs.get("environment", False))
for key in self._REQUIRED_KEYS:
if key not in self.keys():
raise ArdyRequiredKeyError("{} is required to create the configuration".format(key))
def set_projectdir(self, path=False):
self.project_dir = os.path.abspath(path or os.getcwd())
if self.project_dir and os.path.isdir(self.project_dir):
return True
raise ArdyNoDirError("Folder {} not exist".format(self.project_dir))
def get_projectdir(self):
return self.project_dir
def set_project_config_filename(self, filename=False):
self.project_filename = filename or self._DEFAULT_CONFIG_FILE_NAME
def get_project_config_filename(self):
return self.project_filename
def _get_config_file(self):
return os.path.join(self.get_projectdir(), self.get_project_config_filename())
def _set_conf_from_file(self, config_file=None, environment=False):
if not config_file:
config_file = self._get_config_file()
if os.path.isfile(config_file):
logger.debug("Loading configuration from file {}".format(config_file))
with open(config_file) as data_file:
config_dict = json.load(data_file)
self._set_conf_from_dict(config_dict=config_dict, environment=environment)
else:
raise ArdyNoFileError("File {} not exist".format(config_file))
def set_environment(self, environment=False):
if environment and environment not in self["deploy"]["deploy_environments"]:
raise ArdyEnvironmentNotExistsError("Environment {} not exists".format(environment))
self.environment = environment
def reload_conf(self):
self._set_conf_from_file()
def _set_conf_from_dict(self, config_dict, environment=False):
for key in config_dict:
self[key] = config_dict[key]
if environment:
self.set_environment(environment=environment)
self[self._REQUIRED_LAMBDAS_KEY] = [
LambdaConfig(awslambda, self.get_globals(), environment=self.get_environment()) for awslambda in
config_dict[self._REQUIRED_LAMBDAS_KEY]
]
def get_globals(self):
return {k: v for k, v in self.items() if k != "lambdas"}
def get_lambdas(self):
for awslambda in self["lambdas"]:
yield awslambda
def get_lambda_by_name(self, name):
for i in self.get_lambdas():
if i["FunctionName"] == name:
return i
raise ArdyLambdaNotExistsError("Lambda function {} not exist.".format(name))
def print_config(self):
print(json.dumps(self, indent=2))
class LambdaConfig(BaseConfig):
_DEPLOY_KEYS_BLACKLIST = ["path", "version", "filename", "aws_credentials", "deploy", "triggers",
"deploy_environments", "requirements", "environment", "lambdas_to_deploy",
"FunctionNameOrigin"]
def __init__(self, *args, **kwargs):
super(LambdaConfig, self).__init__(*args, **kwargs)
self.set_environment(kwargs.get("environment", False))
self._set_conf_from_dict(args[0], args[1])
environment_config = args[0].get("deploy_environments", {})
self["FunctionNameOrigin"] = self["FunctionName"]
if self.get_environment() and environment_config:
self._set_conf_from_dict(environment_config[self.get_environment()], self)
def _set_conf_from_dict(self, lambda_config, global_config):
aux_dict = self.merge_dicts(global_config, lambda_config)
for key in aux_dict:
self[key] = aux_dict[key]
def get_deploy_conf(self):
return {k: v for k, v in self.items() if k not in self._DEPLOY_KEYS_BLACKLIST}
def merge_dicts(self, x, y):
"""
if sys.version_info >= (3,5):
return {**x, **y}
else:
"""
z = x.copy()
z.update(y)
return z
class ConfigMixin(object):
def __init__(self, *args, **kwargs):
super(ConfigMixin, self).__init__()
logger.debug("[{}] loading config...".format(self.__class__, ))
self.config = GlobalConfig(*args, **kwargs) | PypiClean |
/Mopidy-Mopify-1.7.3.tar.gz/Mopidy-Mopify-1.7.3/mopidy_mopify/static/debug/src/app/search/search.controller.js | 'use strict';
// Declare app level module which depends on views, and components
angular.module('mopify.search', [
'spotify',
'ngRoute',
'cfp.hotkeys',
'mopify.services.spotifylogin',
'mopify.services.mopidy',
'mopify.services.station',
'mopify.services.util',
'mopify.widgets.directive.playlist',
'mopify.widgets.directive.album',
'mopify.widgets.directive.artist',
'mopify.widgets.directive.track',
'mopify.widgets.directive.focusme'
]).config([
'$routeProvider',
function ($routeProvider) {
$routeProvider.when('/search', {
templateUrl: 'search/search.tmpl.html',
controller: 'SearchController',
reloadOnSearch: false
});
}
]).controller('SearchController', [
'$rootScope',
'$scope',
'$routeParams',
'$route',
'$timeout',
'$location',
'Spotify',
'SpotifyLogin',
'mopidyservice',
'stationservice',
'util',
'Settings',
'PlaylistManager',
function SearchController($rootScope, $scope, $routeParams, $route, $timeout, $location, Spotify, SpotifyLogin, mopidyservice, stationservice, util, Settings, PlaylistManager) {
$scope.$watch(function () {
return $routeParams.query;
}, function (val) {
$scope.query = val;
$scope.typing();
});
var typingTimeout = null;
// Define empty result scope
$scope.results = {
artists: [],
tracks: [],
albums: [],
playlists: []
};
$scope.searchLimits = {
artists: 8,
albums: 8,
tracks: 15,
playlists: 8
};
$scope.topresult = {};
$scope.loading = true;
// Keep track of previous query
var previousQuery = $routeParams.query || '';
/**
* Event listener for typing
* @param {object} event
* @return {void}
*/
$scope.typing = function (event) {
// Close the search overlay on ESC press
if (event != null && event.keyCode === 27)
$scope.closeSearch();
if ($scope.query.trim().length === 0 || $scope.query === previousQuery)
return;
// Set previous query
previousQuery = $scope.query;
// Set loading
$scope.loading = true;
// Clear previous timeout
$timeout.cancel(typingTimeout);
// Set timeout before performing search
typingTimeout = $timeout(function () {
// Set search param
$location.search('query', $scope.query);
if ($scope.query.trim().length > 1)
$scope.performSearch();
}, 1000);
};
/**
* Close the search overlay
* @return {void}
*/
$scope.closeSearch = function () {
$location.url($routeParams.refer || '/');
};
/*
* Perform a search with the current query
*/
$scope.performSearch = function performSearch() {
var searchableItems = !SpotifyLogin.connected ? 'album,artist' : 'album,artist,playlist';
var resultsloaded = 0;
Spotify.search($scope.query, searchableItems, {
market: Settings.get('country', 'US'),
limit: '50'
}).then(function (response) {
var data = response.data;
// Perform local search and put at beginning of playlist array
var localLists = PlaylistManager.search($scope.query);
if (data.playlists === undefined) {
data.playlists = { items: [] };
}
data.playlists.items = localLists.concat(data.playlists.items);
$scope.results.artists = data.artists;
$scope.results.albums = data.albums;
$scope.results.playlists = data.playlists;
// The search request only returns limited information about an album
// so lets get some more information
Spotify.getAlbums(_.map(data.albums.items.slice(0, 20), function (album) {
return album.id;
})).then(function (response) {
angular.extend($scope.results.albums.items, response.data.albums);
});
resultsloaded++;
if (resultsloaded == 2)
getTopMatchingResult($scope.query, $scope.results);
});
mopidyservice.search($scope.query).then(function (data) {
// Check if tracks are available
if (data.length > 0 && data[0].tracks != null) {
$scope.results.tracks = data[0].tracks.splice(0, 100);
}
// Check if all data is loaded and if it is; calculate the topresult
resultsloaded++;
if (resultsloaded == 2)
getTopMatchingResult($scope.query, $scope.results);
});
};
// Run on load
$scope.$on('mopidy:state:online', function () {
typingTimeout = $timeout(function () {
if ($scope.query.trim().length > 1)
$scope.performSearch();
}, 250);
});
if (mopidyservice.isConnected) {
typingTimeout = $timeout(function () {
if ($scope.query.trim().length > 1)
$scope.performSearch();
}, 250);
}
/**
* Play the songs that are given in the topresult
*/
$scope.playTopItem = function () {
mopidyservice.lookup($scope.topresult.item.uri).then(function (response) {
var tracks = response[$scope.topresult.item.uri];
mopidyservice.playTrack(tracks[0], tracks.splice(0, 10));
});
};
/**
* Start a station from the top result
*/
$scope.startTopItemStation = function () {
stationservice.startFromSpotifyUri($scope.topresult.item.uri);
};
/**
* Toggle the number of results that should be shown
* @param {string} item category: artists, albums, tracks, playlists
* @return {[type]} [description]
*/
$scope.searchLimitsToggle = function (item) {
if ($scope.searchLimits[item] == 50)
$scope.searchLimits[item] = item != 'tracks' ? 8 : 15;
else
$scope.searchLimits[item] = 50;
};
/**
* Get the top matching resutls from the given batch
* @param {string} search The search string to check against
* @param {object} results All the results from spotify and mopidy
*/
function getTopMatchingResult(search, results) {
var bestmatch = null;
var resultitem = {};
var items = [];
// Override results with angular copy of results
results = angular.copy(results);
// Loop through all results and create an array with all items
_.each(results, function (result, key) {
if (result != null) {
// Get correct items array
if (result.items) {
items.push({
type: key,
items: result.items
});
} else {
items.push({
type: key,
items: result
});
}
}
});
// Check each item with the query using the levenshtein algorithme
_.each(items, function (collection) {
_.each(collection.items, function (item) {
var stringtocheck = item.name.toLowerCase();
var distance = levenshteinDistance(search, stringtocheck);
// Check with previous bestmatch and update if needed
if (bestmatch === null || bestmatch > distance) {
bestmatch = distance;
resultitem = {
item: item,
type: collection.type
};
}
});
});
if (resultitem.item != null) {
// Genereate the link
if (resultitem.type === 'artists')
resultitem.link = '#/music/artist/' + resultitem.item.uri;
else
resultitem.link = '#/music/tracklist/' + resultitem.item.uri;
}
// Set topresult and stop loading
$scope.loading = false;
$scope.topresult = resultitem;
}
/**
* Compute the edit distance between the two given strings
* @param {string} a
* @param {string} b
* @return {int} the number that represents the distance
*/
function levenshteinDistance(a, b) {
if (a.length === 0)
return b.length;
if (b.length === 0)
return a.length;
var matrix = [];
// increment along the first column of each row
var i;
for (i = 0; i <= b.length; i++) {
matrix[i] = [i];
}
// increment each column in the first row
var j;
for (j = 0; j <= a.length; j++) {
matrix[0][j] = j;
}
// Fill in the rest of the matrix
for (i = 1; i <= b.length; i++) {
for (j = 1; j <= a.length; j++) {
if (b.charAt(i - 1) == a.charAt(j - 1)) {
matrix[i][j] = matrix[i - 1][j - 1];
} else {
matrix[i][j] = Math.min(matrix[i - 1][j - 1] + 1, Math.min(matrix[i][j - 1] + 1, matrix[i - 1][j] + 1)); // deletion
}
}
}
return matrix[b.length][a.length];
}
}
]).controller('SearchMenuController', [
'$scope',
'$rootScope',
'$routeParams',
'$route',
'$location',
'hotkeys',
function SearchMenuController($scope, $rootScope, $routeParams, $route, $location, hotkeys) {
var previous = '';
// Send the user to the search page when he starts typing
$scope.typing = function () {
if ($scope.query === undefined)
return;
if ($scope.query.trim().length > 0 && $scope.query !== previous) {
var refer;
if ($location.url().indexOf('/search') > -1)
refer = $routeParams.refer;
else
refer = $location.url();
$location.url('/search?query=' + $scope.query + '&refer=' + refer);
}
previous = $scope.query;
};
$scope.query = $routeParams.query;
// Add search hotkey
hotkeys.add({
combo: 'ctrl+f',
description: 'Search',
callback: function (event, hotkey) {
event.preventDefault();
$rootScope.focussearch = true;
}
});
$scope.$watch(function () {
return $routeParams.query;
}, function (val) {
$scope.query = val;
});
}
]); | PypiClean |
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/lang/gl.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['gl']={"editor":"Editor de texto mellorado","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Prema ALT 0 para obter axuda","browseServer":"Examinar o servidor","url":"URL","protocol":"Protocolo","upload":"Enviar","uploadSubmit":"Enviar ao servidor","image":"Imaxe","flash":"Flash","form":"Formulario","checkbox":"Caixa de selección","radio":"Botón de opción","textField":"Campo de texto","textarea":"Área de texto","hiddenField":"Campo agochado","button":"Botón","select":"Campo de selección","imageButton":"Botón de imaxe","notSet":"<sen estabelecer>","id":"ID","name":"Nome","langDir":"Dirección de escritura do idioma","langDirLtr":"Esquerda a dereita (LTR)","langDirRtl":"Dereita a esquerda (RTL)","langCode":"Código do idioma","longDescr":"Descrición completa do URL","cssClass":"Clases da folla de estilos","advisoryTitle":"Título","cssStyle":"Estilo","ok":"Aceptar","cancel":"Cancelar","close":"Pechar","preview":"Vista previa","resize":"Redimensionar","generalTab":"Xeral","advancedTab":"Avanzado","validateNumberFailed":"Este valor non é un número.","confirmNewPage":"Calquera cambio que non gardara neste contido perderase.\r\nConfirma que quere cargar unha páxina nova?","confirmCancel":"Algunhas das opcións foron cambiadas.\r\nConfirma que quere pechar o diálogo?","options":"Opcións","target":"Destino","targetNew":"Nova xanela (_blank)","targetTop":"Xanela principal (_top)","targetSelf":"Mesma xanela (_self)","targetParent":"Xanela superior (_parent)","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","styles":"Estilo","cssClasses":"Clases da folla de estilos","width":"Largo","height":"Alto","align":"Aliñamento","alignLeft":"Esquerda","alignRight":"Dereita","alignCenter":"Centro","alignTop":"Arriba","alignMiddle":"Centro","alignBottom":"Abaixo","invalidValue":"Valor incorrecto.","invalidHeight":"O alto debe ser un número.","invalidWidth":"O largo debe ser un número.","invalidCssLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","invalidHtmlLength":"O valor especificado para o campo «%1» debe ser un número positivo con ou sen unha unidade de medida HTML correcta (px ou %).","invalidInlineStyle":"O valor especificado no estilo en liña debe consistir nunha ou máis tuplas co formato «nome : valor», separadas por punto e coma.","cssLengthTooltip":"Escriba un número para o valor en píxeles ou un número cunha unidade CSS correcta (px, %, in, cm, mm, em, ex, pt, ou pc).","unavailable":"%1<span class=\"cke_accessibility\">, non dispoñíbel</span>"},"about":{"copy":"Copyright © $1. Todos os dereitos reservados.","dlgTitle":"Sobre o CKEditor","help":"Consulte $1 para obter axuda.","moreInfo":"Para obter información sobre a licenza, visite o noso sitio web:","title":"Sobre o CKEditor","userGuide":"Guía do usuario do CKEditor"},"basicstyles":{"bold":"Negra","italic":"Cursiva","strike":"Riscado","subscript":"Subíndice","superscript":"Superíndice","underline":"Subliñado"},"bidi":{"ltr":"Dirección do texto de esquerda a dereita","rtl":"Dirección do texto de dereita a esquerda"},"blockquote":{"toolbar":"Cita"},"clipboard":{"copy":"Copiar","copyError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de copia. Use o teclado para iso (Ctrl/Cmd+C).","cut":"Cortar","cutError":"Os axustes de seguranza do seu navegador non permiten que o editor realice automaticamente as tarefas de corte. Use o teclado para iso (Ctrl/Cmd+X).","paste":"Pegar","pasteArea":"Zona de pegado","pasteMsg":"Pegue dentro do seguinte cadro usando o teclado (<STRONG>Ctrl/Cmd+V</STRONG>) e prema en Aceptar","securityMsg":"Por mor da configuración de seguranza do seu navegador, o editor non ten acceso ao portapapeis. É necesario pegalo novamente nesta xanela.","title":"Pegar"},"colorbutton":{"auto":"Automático","bgColorTitle":"Cor do fondo","colors":{"000":"Negro","800000":"Marrón escuro","8B4513":"Ocre","2F4F4F":"Pizarra escuro","008080":"Verde azulado","000080":"Azul mariño","4B0082":"Índigo","696969":"Gris escuro","B22222":"Ladrillo","A52A2A":"Marrón","DAA520":"Dourado escuro","006400":"Verde escuro","40E0D0":"Turquesa","0000CD":"Azul medio","800080":"Púrpura","808080":"Gris","F00":"Vermello","FF8C00":"Laranxa escuro","FFD700":"Dourado","008000":"Verde","0FF":"Cian","00F":"Azul","EE82EE":"Violeta","A9A9A9":"Gris medio","FFA07A":"Salmón claro","FFA500":"Laranxa","FFFF00":"Amarelo","00FF00":"Lima","AFEEEE":"Turquesa pálido","ADD8E6":"Azul claro","DDA0DD":"Violeta pálido","D3D3D3":"Verde claro","FFF0F5":"Lavanda vermello","FAEBD7":"Branco antigo","FFFFE0":"Amarelo claro","F0FFF0":"Mel","F0FFFF":"Azul celeste","F0F8FF":"Azul pálido","E6E6FA":"Lavanda","FFF":"Branco"},"more":"Máis cores...","panelTitle":"Cores","textColorTitle":"Cor do texto"},"colordialog":{"clear":"Limpar","highlight":"Resaltar","options":"Opcións de cor","selected":"Cor seleccionado","title":"Seleccione unha cor"},"templates":{"button":"Modelos","emptyListMsg":"(Non hai modelos definidos)","insertOption":"Substituír o contido actual","options":"Opcións de modelos","selectPromptMsg":"Seleccione o modelo a abrir no editor","title":"Modelos de contido"},"contextmenu":{"options":"Opcións do menú contextual"},"div":{"IdInputLabel":"ID","advisoryTitleInputLabel":"Título","cssClassInputLabel":"Clases da folla de estilos","edit":"Editar Div","inlineStyleInputLabel":"Estilo de liña","langDirLTRLabel":"Esquerda a dereita (LTR)","langDirLabel":"Dirección de escritura do idioma","langDirRTLLabel":"Dereita a esquerda (RTL)","languageCodeInputLabel":"Código do idioma","remove":"Retirar Div","styleSelectLabel":"Estilo","title":"Crear un contedor Div","toolbar":"Crear un contedor Div"},"toolbar":{"toolbarCollapse":"Contraer a barra de ferramentas","toolbarExpand":"Expandir a barra de ferramentas","toolbarGroups":{"document":"Documento","clipboard":"Portapapeis/desfacer","editing":"Edición","forms":"Formularios","basicstyles":"Estilos básicos","paragraph":"Paragrafo","links":"Ligazóns","insert":"Inserir","styles":"Estilos","colors":"Cores","tools":"Ferramentas"},"toolbars":"Barras de ferramentas do editor"},"elementspath":{"eleLabel":"Ruta dos elementos","eleTitle":"Elemento %1"},"find":{"find":"Buscar","findOptions":"Buscar opcións","findWhat":"Texto a buscar:","matchCase":"Coincidir Mai./min.","matchCyclic":"Coincidencia cíclica","matchWord":"Coincidencia coa palabra completa","notFoundMsg":"Non se atopou o texto indicado.","replace":"Substituir","replaceAll":"Substituír todo","replaceSuccessMsg":"%1 concorrencia(s) substituída(s).","replaceWith":"Substituír con:","title":"Buscar e substituír"},"fakeobjects":{"anchor":"Ancoraxe","flash":"Animación «Flash»","hiddenfield":"Campo agochado","iframe":"IFrame","unknown":"Obxecto descoñecido"},"flash":{"access":"Acceso de scripts","accessAlways":"Sempre","accessNever":"Nunca","accessSameDomain":"Mesmo dominio","alignAbsBottom":"Abs Inferior","alignAbsMiddle":"Abs centro","alignBaseline":"Liña de base","alignTextTop":"Tope do texto","bgcolor":"Cor do fondo","chkFull":"Permitir pantalla completa","chkLoop":"Repetir","chkMenu":"Activar o menú do «Flash»","chkPlay":"Reprodución auomática","flashvars":"Opcións do «Flash»","hSpace":"Esp. Horiz.","properties":"Propiedades do «Flash»","propertiesTab":"Propiedades","quality":"Calidade","qualityAutoHigh":"Alta, automática","qualityAutoLow":"Baixa, automática","qualityBest":"A mellor","qualityHigh":"Alta","qualityLow":"Baixa","qualityMedium":"Media","scale":"Escalar","scaleAll":"Amosar todo","scaleFit":"Encaixar axustando","scaleNoBorder":"Sen bordo","title":"Propiedades do «Flash»","vSpace":"Esp.Vert.","validateHSpace":"O espazado horizontal debe ser un número.","validateSrc":"O URL non pode estar baleiro.","validateVSpace":"O espazado vertical debe ser un número.","windowMode":"Modo da xanela","windowModeOpaque":"Opaca","windowModeTransparent":"Transparente","windowModeWindow":"Xanela"},"font":{"fontSize":{"label":"Tamaño","voiceLabel":"Tamaño da letra","panelTitle":"Tamaño da letra"},"label":"Tipo de letra","panelTitle":"Nome do tipo de letra","voiceLabel":"Tipo de letra"},"forms":{"button":{"title":"Propiedades do botón","text":"Texto (Valor)","type":"Tipo","typeBtn":"Botón","typeSbm":"Enviar","typeRst":"Restabelever"},"checkboxAndRadio":{"checkboxTitle":"Propiedades da caixa de selección","radioTitle":"Propiedades do botón de opción","value":"Valor","selected":"Seleccionado"},"form":{"title":"Propiedades do formulario","menu":"Propiedades do formulario","action":"Acción","method":"Método","encoding":"Codificación"},"hidden":{"title":"Propiedades do campo agochado","name":"Nome","value":"Valor"},"select":{"title":"Propiedades do campo de selección","selectInfo":"Información","opAvail":"Opcións dispoñíbeis","value":"Valor","size":"Tamaño","lines":"liñas","chkMulti":"Permitir múltiplas seleccións","opText":"Texto","opValue":"Valor","btnAdd":"Engadir","btnModify":"Modificar","btnUp":"Subir","btnDown":"Baixar","btnSetValue":"Estabelecer como valor seleccionado","btnDelete":"Eliminar"},"textarea":{"title":"Propiedades da área de texto","cols":"Columnas","rows":"Filas"},"textfield":{"title":"Propiedades do campo de texto","name":"Nome","value":"Valor","charWidth":"Largo do carácter","maxChars":"Núm. máximo de caracteres","type":"Tipo","typeText":"Texto","typePass":"Contrasinal","typeEmail":"Correo","typeSearch":"Buscar","typeTel":"Número de teléfono","typeUrl":"URL"}},"format":{"label":"Formato","panelTitle":"Formato do parágrafo","tag_address":"Enderezo","tag_div":"Normal (DIV)","tag_h1":"Enacabezado 1","tag_h2":"Encabezado 2","tag_h3":"Encabezado 3","tag_h4":"Encabezado 4","tag_h5":"Encabezado 5","tag_h6":"Encabezado 6","tag_p":"Normal","tag_pre":"Formatado"},"horizontalrule":{"toolbar":"Inserir unha liña horizontal"},"iframe":{"border":"Amosar o bordo do marco","noUrl":"Escriba o enderezo do iframe","scrolling":"Activar as barras de desprazamento","title":"Propiedades do iFrame","toolbar":"IFrame"},"image":{"alertUrl":"Escriba o URL da imaxe","alt":"Texto alternativo","border":"Bordo","btnUpload":"Enviar ao servidor","button2Img":"Quere converter o botón da imaxe seleccionada nunha imaxe sinxela?","hSpace":"Esp.Horiz.","img2Button":"Quere converter a imaxe seleccionada nun botón de imaxe?","infoTab":"Información da imaxe","linkTab":"Ligazón","lockRatio":"Proporcional","menu":"Propiedades da imaxe","resetSize":"Tamaño orixinal","title":"Propiedades da imaxe","titleButton":"Propiedades do botón de imaxe","upload":"Cargar","urlMissing":"Non se atopa o URL da imaxe.","vSpace":"Esp.Vert.","validateBorder":"O bordo debe ser un número.","validateHSpace":"O espazado horizontal debe ser un número.","validateVSpace":"O espazado vertical debe ser un número."},"indent":{"indent":"Aumentar a sangría","outdent":"Reducir a sangría"},"smiley":{"options":"Opcións de emoticonas","title":"Inserir unha emoticona","toolbar":"Emoticona"},"justify":{"block":"Xustificado","center":"Centrado","left":"Aliñar á esquerda","right":"Aliñar á dereita"},"link":{"acccessKey":"Chave de acceso","advanced":"Avanzado","advisoryContentType":"Tipo de contido informativo","advisoryTitle":"Título","anchor":{"toolbar":"Ancoraxe","menu":"Editar a ancoraxe","title":"Propiedades da ancoraxe","name":"Nome da ancoraxe","errorName":"Escriba o nome da ancoraxe","remove":"Retirar a ancoraxe"},"anchorId":"Polo ID do elemento","anchorName":"Polo nome da ancoraxe","charset":"Codificación do recurso ligado","cssClasses":"Clases da folla de estilos","emailAddress":"Enderezo de correo","emailBody":"Corpo da mensaxe","emailSubject":"Asunto da mensaxe","id":"ID","info":"Información da ligazón","langCode":"Código do idioma","langDir":"Dirección de escritura do idioma","langDirLTR":"Esquerda a dereita (LTR)","langDirRTL":"Dereita a esquerda (RTL)","menu":"Editar a ligazón","name":"Nome","noAnchors":"(Non hai ancoraxes dispoñíbeis no documento)","noEmail":"Escriba o enderezo de correo","noUrl":"Escriba a ligazón URL","other":"<outro>","popupDependent":"Dependente (Netscape)","popupFeatures":"Características da xanela emerxente","popupFullScreen":"Pantalla completa (IE)","popupLeft":"Posición esquerda","popupLocationBar":"Barra de localización","popupMenuBar":"Barra do menú","popupResizable":"Redimensionábel","popupScrollBars":"Barras de desprazamento","popupStatusBar":"Barra de estado","popupToolbar":"Barra de ferramentas","popupTop":"Posición superior","rel":"Relación","selectAnchor":"Seleccionar unha ancoraxe","styles":"Estilo","tabIndex":"Índice de tabulación","target":"Destino","targetFrame":"<marco>","targetFrameName":"Nome do marco de destino","targetPopup":"<xanela emerxente>","targetPopupName":"Nome da xanela emerxente","title":"Ligazón","toAnchor":"Ligar coa ancoraxe no testo","toEmail":"Correo","toUrl":"URL","toolbar":"Ligazón","type":"Tipo de ligazón","unlink":"Eliminar a ligazón","upload":"Enviar"},"list":{"bulletedlist":"Inserir/retirar lista viñeteada","numberedlist":"Inserir/retirar lista numerada"},"liststyle":{"armenian":"Numeración armenia","bulletedTitle":"Propiedades da lista viñeteada","circle":"Circulo","decimal":"Decimal (1, 2, 3, etc.)","decimalLeadingZero":"Decimal con cero á esquerda (01, 02, 03, etc.)","disc":"Disc","georgian":"Numeración xeorxiana (an, ban, gan, etc.)","lowerAlpha":"Alfabeto en minúsculas (a, b, c, d, e, etc.)","lowerGreek":"Grego en minúsculas (alpha, beta, gamma, etc.)","lowerRoman":"Números romanos en minúsculas (i, ii, iii, iv, v, etc.)","none":"Ningún","notset":"<sen estabelecer>","numberedTitle":"Propiedades da lista numerada","square":"Cadrado","start":"Inicio","type":"Tipo","upperAlpha":"Alfabeto en maiúsculas (A, B, C, D, E, etc.)","upperRoman":"Números romanos en maiúsculas (I, II, III, IV, V, etc.)","validateStartNumber":"O número de inicio da lista debe ser un número enteiro."},"magicline":{"title":"Inserir aquí o parágrafo"},"maximize":{"maximize":"Maximizar","minimize":"Minimizar"},"newpage":{"toolbar":"Páxina nova"},"pagebreak":{"alt":"Quebra de páxina","toolbar":"Inserir quebra de páxina"},"pastetext":{"button":"Pegar como texto simple","title":"Pegar como texto simple"},"pastefromword":{"confirmCleanup":"O texto que quere pegar semella ser copiado desde o Word. Quere depuralo antes de pegalo?","error":"Non foi posíbel depurar os datos pegados por mor dun erro interno","title":"Pegar desde Word","toolbar":"Pegar desde Word"},"preview":{"preview":"Vista previa"},"print":{"toolbar":"Imprimir"},"removeformat":{"toolbar":"Retirar o formato"},"save":{"toolbar":"Gardar"},"selectall":{"toolbar":"Seleccionar todo"},"showblocks":{"toolbar":"Amosar os bloques"},"sourcearea":{"toolbar":"Orixe"},"specialchar":{"options":"Opcións de caracteres especiais","title":"Seleccione un carácter especial","toolbar":"Inserir un carácter especial"},"scayt":{"about":"About SCAYT","aboutTab":"About","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Create","dic_delete":"Delete","dic_field_name":"Dictionary name","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Rename","dic_restore":"Restore","dictionariesTab":"Dictionaries","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Languages","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Ignore Words with Numbers","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"Estilos","panelTitle":"Estilos de formatando","panelTitle1":"Estilos de bloque","panelTitle2":"Estilos de carácter","panelTitle3":"Estilos de obxecto"},"table":{"border":"Tamaño do bordo","caption":"Título","cell":{"menu":"Cela","insertBefore":"Inserir a cela á esquerda","insertAfter":"Inserir a cela á dereita","deleteCell":"Eliminar celas","merge":"Combinar celas","mergeRight":"Combinar á dereita","mergeDown":"Combinar cara abaixo","splitHorizontal":"Dividir a cela en horizontal","splitVertical":"Dividir a cela en vertical","title":"Propiedades da cela","cellType":"Tipo de cela","rowSpan":"Expandir filas","colSpan":"Expandir columnas","wordWrap":"Axustar ao contido","hAlign":"Aliñación horizontal","vAlign":"Aliñación vertical","alignBaseline":"Liña de base","bgColor":"Cor do fondo","borderColor":"Cor do bordo","data":"Datos","header":"Cabeceira","yes":"Si","no":"Non","invalidWidth":"O largo da cela debe ser un número.","invalidHeight":"O alto da cela debe ser un número.","invalidRowSpan":"A expansión de filas debe ser un número enteiro.","invalidColSpan":"A expansión de columnas debe ser un número enteiro.","chooseColor":"Escoller"},"cellPad":"Marxe interior da cela","cellSpace":"Marxe entre celas","column":{"menu":"Columna","insertBefore":"Inserir a columna á esquerda","insertAfter":"Inserir a columna á dereita","deleteColumn":"Borrar Columnas"},"columns":"Columnas","deleteTable":"Borrar Táboa","headers":"Cabeceiras","headersBoth":"Ambas","headersColumn":"Primeira columna","headersNone":"Ningún","headersRow":"Primeira fila","invalidBorder":"O tamaño do bordo debe ser un número.","invalidCellPadding":"A marxe interior debe ser un número positivo.","invalidCellSpacing":"A marxe entre celas debe ser un número positivo.","invalidCols":"O número de columnas debe ser un número maior que 0.","invalidHeight":"O alto da táboa debe ser un número.","invalidRows":"O número de filas debe ser un número maior que 0","invalidWidth":"O largo da táboa debe ser un número.","menu":"Propiedades da táboa","row":{"menu":"Fila","insertBefore":"Inserir a fila por riba","insertAfter":"Inserir a fila por baixo","deleteRow":"Eliminar filas"},"rows":"Filas","summary":"Resumo","title":"Propiedades da táboa","toolbar":"Taboa","widthPc":"porcentaxe","widthPx":"píxeles","widthUnit":"unidade do largo"},"undo":{"redo":"Refacer","undo":"Desfacer"},"wsc":{"btnIgnore":"Ignorar","btnIgnoreAll":"Ignorar Todas","btnReplace":"Substituir","btnReplaceAll":"Substituir Todas","btnUndo":"Desfacer","changeTo":"Cambiar a","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"O corrector ortográfico non está instalado. ¿Quere descargalo agora?","manyChanges":"Corrección ortográfica rematada: %1 verbas substituidas","noChanges":"Corrección ortográfica rematada: Non se substituiu nengunha verba","noMispell":"Corrección ortográfica rematada: Non se atoparon erros","noSuggestions":"- Sen candidatos -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Non está no diccionario","oneChange":"Corrección ortográfica rematada: Unha verba substituida","progress":"Corrección ortográfica en progreso...","title":"Spell Check","toolbar":"Corrección Ortográfica"}}; | PypiClean |
/GBT_parser-1.0.3-py3-none-any.whl/cantools/subparsers/decode.py | import argparse
import logging
import sys
from argparse_addons import Integer
from .. import database, logreader
from .__utils__ import format_message_by_frame_id
logging.basicConfig(level=logging.WARNING)
def _do_decode(args):
dbase = database.load_file(args.database,
encoding=args.encoding,
frame_id_mask=args.frame_id_mask,
prune_choices=args.prune,
strict=not args.no_strict)
decode_choices = not args.no_decode_choices
decode_containers = not args.no_decode_containers
parser = logreader.Parser(sys.stdin)
for line, frame in parser.iterlines(keep_unknowns=True):
if frame is not None:
line += ' ::'
line += format_message_by_frame_id(dbase,
frame.frame_id,
frame.data,
decode_choices,
args.single_line,
decode_containers)
print(line)
def add_subparser(subparsers):
decode_parser = subparsers.add_parser(
'decode',
description=('Decode "candump" CAN frames read from standard input '
'and print them in a human readable format.'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
decode_parser.add_argument(
'-c', '--no-decode-choices',
action='store_true',
help='Do not convert scaled values to choice strings.')
decode_parser.add_argument(
'-t', '--no-decode-containers',
action='store_true',
help='Do not decode container messages.')
decode_parser.add_argument(
'-s', '--single-line',
action='store_true',
help='Print the decoded message on a single line.')
decode_parser.add_argument(
'-e', '--encoding',
help='File encoding.')
decode_parser.add_argument(
'--prune',
action='store_true',
help='Try to shorten the names of named signal choices.')
decode_parser.add_argument(
'--no-strict',
action='store_true',
help='Skip database consistency checks.')
decode_parser.add_argument(
'-m', '--frame-id-mask',
type=Integer(0),
help=('Only compare selected frame id bits to find the message in the '
'database. By default the candump and database frame ids must '
'be equal for a match.'))
decode_parser.add_argument(
'database',
help='Database file.')
decode_parser.set_defaults(func=_do_decode) | PypiClean |
/AWSScout2-3.2.1.tar.gz/AWSScout2-3.2.1/README.rst | ##########
AWS Scout2
##########
.. image:: https://travis-ci.org/nccgroup/Scout2.svg?branch=master
:target: https://travis-ci.org/nccgroup/Scout2
.. image:: https://coveralls.io/repos/github/nccgroup/Scout2/badge.svg?branch=master
:target: https://coveralls.io/github/nccgroup/Scout2
.. image:: https://badge.fury.io/py/AWSScout2.svg
:target: https://badge.fury.io/py/AWSScout2
:align: right
***********
Description
***********
Scout2 is a security tool that lets AWS administrators assess their
environment's security posture. Using the AWS API, Scout2 gathers configuration
data for manual inspection and highlights high-risk areas automatically. Rather
than pouring through dozens of pages on the web, Scout2 supplies a clear view of
the attack surface automatically.
**Note:** Scout2 is stable and actively maintained, but a number of features and
internals may change. As such, please bear with us as we find time to work on,
and improve, the tool. Feel free to report a bug with details (*e.g.* console
output using the "--debug" argument), request a new feature, or send a pull
request.
************
Installation
************
Install via `pip`_:
::
$ pip install awsscout2
Install from source:
::
$ git clone https://github.com/nccgroup/Scout2
$ cd Scout2
$ pip install -r requirements.txt
$ python setup.py install
************
Requirements
************
Computing resources
-------------------
Scout2 is a multi-threaded tool that fetches and stores your AWS account's configuration settings in memory during
runtime. It is expected that the tool will run with no issues on any modern laptop or equivalent VM.
**Running Scout2 in a VM with limited computing resources such as a t2.micro instance is not intended and will likely
result in the process being killed.**
Python
------
Scout2 is written in Python and supports the following versions:
* 2.7
* 3.3
* 3.4
* 3.5
* 3.6
AWS Credentials
---------------
To run Scout2, you will need valid AWS credentials (*e.g* Access Key ID and Secret Access Key).
The role, or user account, associated with these credentials requires read-only access for all resources in a number of
services, including but not limited to CloudTrail, EC2, IAM, RDS, Redshift, and S3.
The following AWS Managed Policies can be attached to the principal in order to grant necessary permissions:
* ReadOnlyAccess
* SecurityAudit
Compliance with AWS' Acceptable Use Policy
------------------------------------------
Use of Scout2 does not require AWS users to complete and submit the AWS
Vulnerability / Penetration Testing Request Form. Scout2 only performs AWS API
calls to fetch configuration data and identify security gaps, which is not
considered security scanning as it does not impact AWS' network and
applications.
Usage
-----
After performing a number of AWS API calls, Scout2 will create a local HTML report and open it in the default browser.
Using a computer already configured to use the AWS CLI, boto3, or another AWS SDK, you may use Scout2 using the
following command:
::
$ Scout2
**Note:** EC2 instances with an IAM role fit in this category.
If multiple profiles are configured in your .aws/credentials and .aws/config files, you may specify which credentials
to use with the following command:
::
$ Scout2 --profile <PROFILE_NAME>
If you have a CSV file containing the API access key ID and secret, you may run Scout2 with the following command:
::
$ Scout2 --csv-credentials <CREDENTIALS.CSV>
**********************
Advanced documentation
**********************
The following command will provide the list of available command line options:
::
$ Scout2 --help
For further details, checkout our Wiki pages at https://github.com/nccgroup/Scout2/wiki.
*******
License
*******
GPLv2: See LICENSE.
.. _pip: https://pip.pypa.io/en/stable/index.html
| PypiClean |
/BespON-0.6.0.tar.gz/BespON-0.6.0/bespon/astnodes.py | # pylint: disable=C0301
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import collections
import itertools
from . import grammar
from . import load_types
from . import erring
OPEN_INDENTATION_LIST = grammar.LIT_GRAMMAR['open_indentation_list']
INLINE_ELEMENT_SEPARATOR = grammar.LIT_GRAMMAR['inline_element_separator']
PATH_SEPARATOR = grammar.LIT_GRAMMAR['path_separator']
INDENT = grammar.LIT_GRAMMAR['indent']
LINE_TERMINATOR_ASCII_OR_EMPTY_SET = set(grammar.LIT_GRAMMAR['line_terminator_ascii_seq'] + ('',))
LINE_TERMINATOR_UNICODE_OR_EMPTY_SET = set(grammar.LIT_GRAMMAR['line_terminator_unicode_seq'] + ('',))
# Key path nodes need to process their raw content into individual scalars.
# This involves checking scalar values for reserved words. All permutations
# of reserved words are generated and put in a set for this purpose. This
# avoids the overhead of using a regex. The float reserved words are excluded
# from valid values, to be consistent with numeric float values being
# excluded from key paths.
_reserved_words = [grammar.LIT_GRAMMAR[k] for k in ('none_type', 'bool_true', 'bool_false', 'infinity_word', 'not_a_number_word')]
_reserved_word_patterns = set([''.join(perm) for word in _reserved_words for perm in itertools.product(*zip(word.lower(), word.upper()))])
_key_path_reserved_word_vals = {grammar.LIT_GRAMMAR['none_type']: None,
grammar.LIT_GRAMMAR['bool_true']: True,
grammar.LIT_GRAMMAR['bool_false']: False}
_reserved_word_types = {grammar.LIT_GRAMMAR['none_type']: 'none',
grammar.LIT_GRAMMAR['bool_true']: 'bool',
grammar.LIT_GRAMMAR['bool_false']: 'bool'}
_node_common_slots = ['implicit_type', '_state',
'indent', 'at_line_start',
'inline', 'inline_indent',
'first_lineno', 'first_colno',
'last_lineno', 'last_colno',
'_resolved', 'final_val']
_node_data_slots = ['doc_comment', 'tag',
'external_indent',
'external_at_line_start',
'external_first_lineno',
'external_first_colno']
_node_scalar_slots = ['delim', 'block']
_node_collection_slots = ['view',
'nesting_depth', 'parent', 'index',
'key_path_parent', '_key_path_traversable',
'_key_path_scope',
'_open',
'_unresolved_dependency_count',
'extra_dependents']
class SourceNode(object):
'''
The highest-level node in the AST, representing the string, file, or
stream in which data is embedded.
In some cases, it would be possible to collapse the functionality of the
source node and the root node into a single node. The two are separated
because this makes the handling of a tag for the root node more parallel
with normal tags (the tag is external to the node). Having a source
node also makes it convenient to distinguish between where bespon content
begins and ends (source), versus where the actual data begins and ends
(root). For example, there may be comments before or after the data.
'''
__slots__ = (_node_common_slots + ['source_name', 'source_include_depth',
'source_initial_nesting_depth', 'nesting_depth',
'root', 'full_ast'])
def __init__(self, state):
self.implicit_type = 'source'
self.source_name = state.source_name
self.source_include_depth = state.source_include_depth
self.source_initial_nesting_depth = state.source_initial_nesting_depth
self.nesting_depth = state.source_initial_nesting_depth
self.full_ast = state.full_ast
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = self.last_colno = state.colno
self.root = RootNode(self)
self._resolved = False
class RootNode(list):
'''
Lowest level in the AST except for the source node. A list subclass that
must ultimately contain only a single element.
'''
__slots__ = (_node_common_slots + _node_collection_slots +
['source_name', 'doc_comment', 'tag', 'end_tag'])
def __init__(self, source, list=list):
list.__init__(self)
self.implicit_type = 'root'
self.source_name = source.source_name
self.tag = None
self.end_tag = None
self._unresolved_dependency_count = 0
self.nesting_depth = source.source_initial_nesting_depth
self.parent = source
self.key_path_parent = None
self._key_path_scope = None
self._open = True
self._resolved = False
self._state = source._state
self.indent = source.indent
self.at_line_start = source.at_line_start
self.inline = source.inline
self.inline_indent = source.inline_indent
self.first_lineno = source.first_lineno
self.first_colno = source.first_colno
self.last_lineno = source.last_lineno
self.last_colno = source.last_colno
def check_append_scalar_val(self, node, len=len):
if len(self) == 1:
raise erring.ParseError('Only a single scalar or collection object is allowed at root level', node)
if not node.external_indent.startswith(self.indent):
raise erring.IndentationError(node)
if node._resolved:
self.append(node)
self._resolved = True
else:
node.parent = self
node.index = len(self)
self.append(node)
self._unresolved_dependency_count += 1
self.indent = node.external_indent
self.at_line_start = node.external_at_line_start
self.first_lineno = node.external_first_lineno
self.first_colno = node.external_first_colno
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def check_append_collection(self, node, len=len):
if len(self) == 1:
raise erring.ParseError('Only a single scalar or collection object is allowed at root level', node)
if not node.external_indent.startswith(self.indent):
raise erring.IndentationError(node)
self.append(node)
node.parent = self
node.index = len(self)
node.nesting_depth = self.nesting_depth + 1
self._unresolved_dependency_count += 1
self.indent = node.external_indent
self.at_line_start = node.external_at_line_start
self.first_lineno = node.external_first_lineno
self.first_colno = node.external_first_colno
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def _set_tag_doc_comment_externals(self, state, block=False, len=len):
'''
When there are doc comments or tags, set them and determine external
attributes. This is shared by all AST nodes below root level.
Incorporating it into individual node `__init__()` would be possible,
but would risk logic not staying in sync.
'''
doc_comment_node = state.next_doc_comment
tag_node = state.next_tag
# If there is no tag or doc comment, the external appearance of the object
# is identical to that of the object itself; that case is handled in
# individual `__init__()` since it is simple and avoids overhead.
# Otherwise, the external appearance is based on the doc comment, or the
# tag in its absence. There is no need to perform indentation checks for
# the external appearance, since these will be done during appending to
# the AST. The rules for cases when a doc comment or a tag is not at the
# start of a line in indentation-style syntax will allow for things that
# are less visually pleasing than might be desired. However, the tradeoff
# is that the rules are simple, relatively intuitive, and minimal while
# still preventing ambiguity.
if tag_node is None:
self.doc_comment = doc_comment_node
self.tag = None
state.next_doc_comment = None
state.next_cache = False
if doc_comment_node.inline:
if not self.indent.startswith(doc_comment_node.inline_indent):
raise erring.IndentationError(self)
elif doc_comment_node.at_line_start:
if not self.at_line_start:
raise erring.ParseError('In indentation-style syntax, a doc comment that starts at the beginning of a line cannot be followed immediately by the start of another object; a doc comment cannot set the indentation level', doc_comment_node, self)
if doc_comment_node.indent != self.indent:
raise erring.ParseError('Inconsistent indentation between doc comment and object', doc_comment_node, self)
elif self.at_line_start and (len(self.indent) <= len(doc_comment_node.indent) or not self.indent.startswith(doc_comment_node.indent)):
raise erring.IndentationError(self)
self.external_indent = doc_comment_node.indent
self.external_at_line_start = doc_comment_node.at_line_start
self.external_first_lineno = doc_comment_node.first_lineno
self.external_first_colno = doc_comment_node.first_colno
elif self.implicit_type in tag_node.compatible_implicit_types:
if tag_node.block_scalar and not self.block:
raise erring.ParseError('Tag has a "newline" or "indent" argument, but is applied to a scalar with no literal line breaks', tag_node, self)
self.doc_comment = doc_comment_node
self.tag = tag_node
state.next_tag = None
state.next_doc_comment = None
state.next_cache = False
tag_node.parent = self
if not tag_node._resolved:
self._unresolved_dependency_count += 1
if tag_node.label is not None:
state.ast.register_label(self)
if doc_comment_node is None:
if tag_node.external_inline:
if not self.indent.startswith(tag_node.inline_indent):
raise erring.IndentationError(self)
elif tag_node.at_line_start:
if not self.indent.startswith(tag_node.indent):
raise erring.IndentationError(self)
else:
if self.at_line_start and (len(self.indent) <= len(tag_node.indent) or not self.indent.startswith(tag_node.indent)):
raise erring.IndentationError(self)
if self.implicit_type in ('dict', 'list') and not self.inline:
raise erring.ParseError('The tag for an indentation-style collection must be at the start of a line', tag_node)
self.external_indent = tag_node.indent
self.external_at_line_start = tag_node.at_line_start
self.external_first_lineno = tag_node.first_lineno
self.external_first_colno = tag_node.first_colno
else:
if doc_comment_node.inline:
if not tag_node.indent.startswith(doc_comment_node.inline_indent):
raise erring.IndentationError(tag_node)
if not self.indent.startswith(doc_comment_node.inline_indent):
raise erring.IndentationError(self)
elif doc_comment_node.at_line_start:
if not tag_node.at_line_start:
raise erring.ParseError('In indentation-style syntax, a doc comment that starts at the beginning of a line cannot be followed immediately by the start of another object; a doc comment cannot set the indentation level', doc_comment_node, tag_node)
if doc_comment_node.indent != tag_node.indent:
raise erring.ParseError('Inconsistent indentation between doc comment and tag', doc_comment_node, tag_node)
if not self.indent.startswith(tag_node.indent):
raise erring.IndentationError(self)
elif tag_node.at_line_start:
if len(tag_node.indent) <= len(doc_comment_node.indent) or not tag_node.indent.startswith(doc_comment_node.indent):
raise erring.IndentationError(tag_node)
if not self.indent.startswith(tag_node.indent):
raise erring.IndentationError(self)
else:
if self.at_line_start and (len(self.indent) <= len(tag_node.indent) or not self.indent.startswith(tag_node.indent)):
raise erring.IndentationError(self)
if self.implicit_type in ('dict', 'list') and not self.inline:
raise erring.ParseError('The tag for an indentation-style collection must be at the start of a line', tag_node)
self.external_indent = doc_comment_node.indent
self.external_at_line_start = doc_comment_node.at_line_start
self.external_first_lineno = doc_comment_node.first_lineno
self.external_first_colno = doc_comment_node.first_colno
elif not self.inline and 'dict' in tag_node.compatible_implicit_types:
if tag_node.type is None:
raise erring.ParseError('Tag is incompatible with object; tags for dict-like objects in indentation-style syntax require an explicit type', tag_node, self)
state.ast.start_explicit_indentation_dict()
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
raise erring.ParseError('Tag is incompatible with object', tag_node, self)
class ScalarNode(object):
'''
Scalar object, including quoted (inline, block) and unquoted strings,
none, bool, int, and float. Also used to represent doc comments.
'''
__slots__ = _node_common_slots + _node_data_slots + _node_scalar_slots
def __init__(self, state, first_lineno, first_colno, last_lineno, last_colno,
implicit_type, delim=None, block=False,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals):
self.implicit_type = implicit_type
self.delim = delim
self.block = block
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = first_lineno
self.first_colno = first_colno
self.last_lineno = last_lineno
self.last_colno = last_colno
self._resolved = True
if not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
set_tag_doc_comment_externals(self, state, block)
class FullScalarNode(object):
'''
ScalarNode with extra data for full AST.
'''
__slots__ = (_node_common_slots + _node_data_slots + _node_scalar_slots +
['view', 'index', 'parent',
'continuation_indent', 'raw_val', 'num_base',
'key_path', 'key_path_occurrences',
'assign_key_val_lineno', 'assign_key_val_colno',
'trailing_comment'])
def __init__(self, state, first_lineno, first_colno, last_lineno, last_colno,
implicit_type, delim=None, block=False, num_base=None,
continuation_indent=None,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals):
self.view = None
self.trailing_comment = None
self.num_base = num_base
self.continuation_indent = continuation_indent
self.key_path = None
self.key_path_occurrences = None
self.implicit_type = implicit_type
self.delim = delim
self.block = block
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = first_lineno
self.first_colno = first_colno
self.last_lineno = last_lineno
self.last_colno = last_colno
self._resolved = True
if not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
set_tag_doc_comment_externals(self, state, block)
class CommentNode(object):
'''
Line comment or doc comment.
'''
__slots__ = _node_common_slots + _node_scalar_slots
def __init__(self, state, first_lineno, first_colno, last_lineno, last_colno,
implicit_type, delim=None, block=False):
self.implicit_type = implicit_type
self.delim = delim
self.block = block
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = first_lineno
self.first_colno = first_colno
self.last_lineno = last_lineno
self.last_colno = last_colno
self._resolved = True
class FullCommentNode(object):
'''
CommentNode with extra data for full AST.
'''
__slots__ = (_node_common_slots +
['delim', 'block', 'implicit_type', 'continuation_indent',
'raw_val'])
def __init__(self, state, first_lineno, first_colno, last_lineno, last_colno,
implicit_type, delim=None, block=None, continuation_indent=None):
self.implicit_type = implicit_type
self.delim = delim
self.block = block
self.continuation_indent = continuation_indent
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = first_lineno
self.first_colno = first_colno
self.last_lineno = last_lineno
self.last_colno = last_colno
self._resolved = True
class ListlikeNode(list):
'''
List-like collection.
'''
__slots__ = (_node_common_slots + _node_data_slots +
_node_collection_slots +
['internal_indent', 'start_trailing_comment', 'end_trailing_comment'])
def __init__(self, state_or_scalar_node,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals,
key_path_parent=None, _key_path_traversable=False,
list=list):
list.__init__(self)
self.view = None
self.start_trailing_comment = None
self.end_trailing_comment = None
self.implicit_type = 'list'
self.key_path_parent = key_path_parent
self._key_path_traversable = _key_path_traversable
self._unresolved_dependency_count = 0
self._key_path_scope = None
state = state_or_scalar_node._state
self._state = state
if state_or_scalar_node is state:
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = self.last_colno = state.colno
else:
self.indent = state_or_scalar_node.external_indent
self.at_line_start = state_or_scalar_node.external_at_line_start
self.inline = state_or_scalar_node.inline
self.inline_indent = state_or_scalar_node.inline_indent
self.first_lineno = self.last_lineno = state_or_scalar_node.external_first_lineno
self.first_colno = self.last_colno = state_or_scalar_node.external_first_colno
self.extra_dependents = None
self.internal_indent = None
if _key_path_traversable:
self.doc_comment = None
self.tag = None
elif not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
set_tag_doc_comment_externals(self, state)
self._resolved = False
self._open = False
def check_append_scalar_key(self, node):
raise erring.ParseError('Cannot append a key-value pair directly to a list-like object', node)
def check_append_key_path_scalar_key(self, node):
raise erring.ParseError('Key path is incompatible with previously created list-like object', node, self)
def check_append_scalar_val(self, node, len=len):
if not self._open:
if self.inline:
raise erring.ParseError('Cannot append to a closed list-like object; check for a missing "{0}"'.format(INLINE_ELEMENT_SEPARATOR), node)
else:
raise erring.ParseError('Cannot append to a closed list-like object; check for incorrect indentation or missing "{0}"'.format(OPEN_INDENTATION_LIST), node)
if self.inline:
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
elif node.external_indent != self.internal_indent:
if self.internal_indent is None:
if not self._key_path_traversable and (len(node.external_indent) <= len(self.indent) or not node.external_indent.startswith(self.indent)):
raise erring.IndentationError(node)
self.internal_indent = node.external_indent
else:
raise erring.IndentationError(node)
self.append(node)
if not node._resolved:
node.parent = self
node.index = len(self)
self._unresolved_dependency_count += 1
elif self._state.full_ast:
node.parent = self
node.index = len(self)
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def check_append_key_path_scalar_val(self, node, len=len):
self.append(node)
if not node._resolved:
node.parent = self
node.index = len(self)
self._unresolved_dependency_count += 1
elif self._state.full_ast:
node.parent = self
node.index = len(self)
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def check_append_collection(self, node, len=len):
if not self._open:
if self.inline:
raise erring.ParseError('Cannot append to a closed list-like object; check for a missing "{0}"'.format(INLINE_ELEMENT_SEPARATOR), node)
else:
if node.implicit_type == 'dict':
erring.ParseError('Cannot start a new dict-like object in a closed list-like object; check for incorrect indentation or missing "{0}"'.format(OPEN_INDENTATION_LIST), node)
raise erring.ParseError('Cannot append to a closed list-like object; check for incorrect indentation or missing "{0}"'.format(OPEN_INDENTATION_LIST), node)
if self.inline:
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
elif node.external_indent != self.internal_indent:
if self.internal_indent is None:
if not self._key_path_traversable and (len(node.external_indent) <= len(self.indent) or not node.external_indent.startswith(self.indent)):
raise erring.IndentationError(node)
self.internal_indent = node.external_indent
else:
raise erring.IndentationError(node)
self.append(node)
node.parent = self
node.index = len(self)
node.nesting_depth = self.nesting_depth + 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def check_append_key_path_collection(self, node, len=len):
self.append(node)
node.parent = self
node.index = len(self)
node.nesting_depth = self.nesting_depth + 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
class DictlikeNode(collections.OrderedDict):
'''
Dict-like collection.
'''
__slots__ = (_node_common_slots + _node_data_slots +
_node_collection_slots +
['_next_key', '_awaiting_val', 'key_nodes', 'start_trailing_comment', 'end_trailing_comment'])
def __init__(self, state_or_scalar_node,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals,
key_path_parent=None, _key_path_traversable=False,
OrderedDict=collections.OrderedDict):
OrderedDict.__init__(self)
self.view = None
self.start_trailing_comment = None
self.end_trailing_comment = None
self.implicit_type = 'dict'
self.key_path_parent = key_path_parent
self._key_path_traversable = _key_path_traversable
self._unresolved_dependency_count = 0
self._key_path_scope = None
self._awaiting_val = False
self.key_nodes = {}
state = state_or_scalar_node._state
self._state = state
if state_or_scalar_node is state:
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = self.last_colno = state.colno
else:
self.indent = state_or_scalar_node.external_indent
self.at_line_start = state_or_scalar_node.external_at_line_start
self.inline = state_or_scalar_node.inline
self.inline_indent = state_or_scalar_node.inline_indent
self.first_lineno = self.last_lineno = state_or_scalar_node.external_first_lineno
self.first_colno = self.last_colno = state_or_scalar_node.external_first_colno
self.extra_dependents = None
if _key_path_traversable:
self.doc_comment = None
self.tag = None
elif not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
set_tag_doc_comment_externals(self, state)
self._resolved = False
self._open = False
def check_append_scalar_key(self, node):
if self.inline:
if not self._open:
raise erring.ParseError('Cannot add a key to a closed object; perhaps a "{0}" is missing'.format(INLINE_ELEMENT_SEPARATOR), node)
if self._awaiting_val:
raise erring.ParseError('Missing value; cannot add a key until the previous key has been given a value', node, self.key_nodes[self._next_key])
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
else:
# Indentation dict-like objects are always open, so there is no
# test for that. In contrast, indentation-style list-like objects
# must be explicitly opened with `*`.
if self._awaiting_val:
raise erring.ParseError('Missing value; cannot add a key until the previous key has been given a value', node, self.key_nodes[self._next_key])
if not node.external_at_line_start:
raise erring.ParseError('A key must be at the start of the line in indentation-style syntax', node)
if node.external_indent != self.indent:
raise erring.IndentationError(node)
# Set `_open` so that dict-like and list-like objects share a
# common test for completeness.
self._open = True
# No need to check for valid key type; already done at AST level
key = node.final_val
if key in self:
raise erring.ParseError('Duplicate keys are prohibited', node, self.key_nodes[key])
self.key_nodes[key] = node
self._next_key = key
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = True
def check_append_key_path_scalar_key(self, node):
if self._awaiting_val:
raise erring.ParseError('Missing value; cannot add a key until the previous key has been given a value', node, self.key_nodes[self._next_key])
key = node.final_val
if key in self:
raise erring.ParseError('Duplicate keys are prohibited', node, self.key_nodes[key])
self.key_nodes[key] = node
self._next_key = key
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = True
self._open = True
def check_append_scalar_val(self, node, len=len):
if not self._awaiting_val:
raise erring.ParseError('Missing key; cannot add a value until a key has been given', node)
if self.inline:
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
elif node.external_at_line_start:
# Don't need to check indentation when the value starts on the
# same line as the key, because any value that starts on that line
# will be consistent with the key indentation.
if len(node.external_indent) <= len(self.indent) or not node.external_indent.startswith(self.indent):
raise erring.IndentationError(node)
self[self._next_key] = node
if not node._resolved:
node.parent = self
node.index = self._next_key
self._unresolved_dependency_count += 1
elif self._state.full_ast:
node.parent = self
node.index = self._next_key
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
def check_append_key_path_scalar_val(self, node):
if not self._awaiting_val:
raise erring.ParseError('Missing key; cannot add a value until a key has been given', node)
self[self._next_key] = node
if not node._resolved:
node.parent = self
node.index = self._next_key
self._unresolved_dependency_count += 1
elif self._state.full_ast:
node.parent = self
node.index = self._next_key
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
def check_append_collection(self, node, len=len):
if not self._awaiting_val:
raise erring.ParseError('Missing key; cannot add a value until a key has been given', node)
if self.inline:
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
elif node.external_at_line_start:
if len(node.external_indent) <= len(self.indent) or not node.external_indent.startswith(self.indent):
raise erring.IndentationError(node)
key = self._next_key
self[key] = node
node.parent = self
node.index = key
node.nesting_depth = self.nesting_depth + 1
if not node._resolved:
self._unresolved_dependency_count += 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
def check_append_key_path_collection(self, node):
if not self._awaiting_val:
raise erring.ParseError('Missing key; cannot add a value until a key has been given', node)
key = self._next_key
self[key] = node
node.parent = self
node.index = key
node.nesting_depth = self.nesting_depth + 1
if not node._resolved:
self._unresolved_dependency_count += 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
class TagNode(collections.OrderedDict):
'''
Tag for explicit typing, configuring collection types, defining labels,
or setting newlines for string types.
'''
__slots__ = (_node_common_slots +
['external_inline',
'_open', '_unresolved_dependency_count', 'parent',
'_next_key', '_awaiting_val', 'key_nodes',
'type', 'label',
'compatible_implicit_types',
'block_scalar',
'collection_config', 'collection_config_nodes'])
def __init__(self, state, first_lineno, first_colno, external_inline,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals,
OrderedDict=collections.OrderedDict,
default_compatible_implicit_types = load_types.IMPLICIT_TYPES):
OrderedDict.__init__(self)
self.implicit_type = 'tag'
self.type = None
self.compatible_implicit_types = default_compatible_implicit_types
self.label = None
self.block_scalar = False
self.collection_config = False
self.collection_config_nodes = None
self._unresolved_dependency_count = 0
self._open = False
self._awaiting_val = False
self._next_key = None
self.key_nodes = {}
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.external_inline = external_inline
self.first_lineno = first_lineno
self.first_colno = first_colno
self.last_lineno = first_lineno
self.last_colno = first_colno
self._resolved = False
_scalar_compatible_implicit_types = load_types.IMPLICIT_SCALAR_TYPES
_collection_compatible_implicit_types = load_types.IMPLICIT_COLLECTION_TYPES
_dictlike_compatible_implicit_types = set(['dict'])
_listlike_compatible_implicit_types = set(['list'])
_general_keywords = set(['label'])
_block_scalar_keywords = set(['newline', 'indent'])
_collection_keywords = set(['init'])
_dictlike_keywords = set(['recmerge', 'default'])
_listlike_keywords = set(['extend'])
_any_collection_keywords = _collection_keywords | _dictlike_keywords | _listlike_keywords
_keywords = _general_keywords | _block_scalar_keywords | _collection_keywords | _dictlike_keywords | _listlike_keywords
def check_append_scalar_key(self, node, len=len):
if not self._open:
raise erring.ParseError('Cannot add a key to a closed object; perhaps a "{0}" is missing'.format(INLINE_ELEMENT_SEPARATOR), node)
if self._awaiting_val:
raise erring.ParseError('Missing value; cannot add a key until the previous key has been given a value', node, self.key_nodes[self._next_key])
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
if node.delim is not None:
raise erring.ParseError('Only unquoted keys are allowed in tags', node)
key = node.final_val
if key in self:
raise erring.ParseError('Duplicate keys are prohibited', node, self.key_nodes[key])
if key not in self._keywords:
raise erring.ParseError('Invalid tag keyword "{0}"'.format(key), node)
if key in self._block_scalar_keywords:
if 'str' not in self.compatible_implicit_types:
raise erring.ParseError('Tag keyword argument "{0}" is incompatible with tag type'.format(key), node)
self.compatible_implicit_types = self._scalar_compatible_implicit_types
self.block_scalar = True
elif key in self._collection_keywords:
if 'dict' not in self.compatible_implicit_types and 'list' not in self.compatible_implicit_types:
raise erring.ParseError('Tag keyword argument "{0}" is incompatible with type'.format(key), node)
# #### If add copy or deepcopy variants
# if key[:5] == 'deep_':
# other_key = key[5:]
# else:
# other_key = 'deep_' + key
# if other_key in self:
# raise erring.ParseError('Encountered mutually exclusive collection config settings "{0}" and "{1}'.format(key, other_key), obj, self.key_nodes[other_key])
if len(self.compatible_implicit_types) > 1:
self.compatible_implicit_types = self._collection_compatible_implicit_types
self.collection_config = True
elif key in self._dictlike_keywords:
if 'dict' not in self.compatible_implicit_types:
raise erring.ParseError('Tag keyword argument "{0}" is incompatible with type'.format(key), node)
self.compatible_implicit_types = self._dictlike_compatible_implicit_types
self.collection_config = True
elif key in self._listlike_keywords:
if 'list' not in self.compatible_implicit_types:
raise erring.ParseError('Tag keyword argument "{0}" is incompatible with type'.format(key), node)
self.compatible_implicit_types = self._listlike_compatible_implicit_types
self.collection_config = True
self.key_nodes[key] = node
self._next_key = key
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = True
def check_append_scalar_val(self, node, indent=INDENT,
line_terminator_ascii_or_empty_set=LINE_TERMINATOR_ASCII_OR_EMPTY_SET,
line_terminator_unicode_or_empty_set=LINE_TERMINATOR_UNICODE_OR_EMPTY_SET):
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
if not self._awaiting_val:
data_types = self._state.data_types
if self._open and node.implicit_type == 'str' and node.delim is None and node.final_val in data_types and not self:
self['type'] = node
val = node.final_val
self.type = val
self.compatible_implicit_types = data_types[val].compatible_implicit_types
else:
if not self._open:
raise erring.ParseError('Cannot append to a closed tag; check for a missing "{0}"'.format(INLINE_ELEMENT_SEPARATOR), node)
if node.implicit_type != 'str':
raise erring.ParseError('Unexpected object in tag; check for a missing key', node)
if node.final_val in data_types or node.final_val in self._state.extended_data_types:
if node.final_val not in data_types:
raise erring.ParseError('Type "{0}" is not enabled (extended_data_types=False)'.format(node.final_val), node)
if node.delim is not None:
raise erring.ParseError('Type names must be unquoted', node)
if self:
raise erring.ParseError('Misplaced type; type must be first in a tag', node)
raise erring.ParseError('Missing key or unknown type; cannot add a value until a key has been given', node)
raise erring.ParseError('Missing key or unknown type; cannot add a value until a key has been given', node)
else:
key = self._next_key
if key == 'label':
if node.implicit_type != 'str' or node.delim is not None:
raise erring.ParseError('Label values must be unquoted strings', node)
self[key] = node
self.label = node.final_val
elif key in self._block_scalar_keywords:
if node.implicit_type != 'str' or node.first_lineno != node.last_lineno:
raise erring.ParseError('Keyword argument "{0}" only takes inline string values that are not broken over multiple lines'.format(key), node)
val = node.final_val
if key == 'newline':
if val not in line_terminator_unicode_or_empty_set:
raise erring.ParseError('Invalid value for "newline"; must be a Unicode line termination sequence or the empty string', node)
if self.type is not None and self._state.data_types[self.type].ascii_bytes and val not in line_terminator_ascii_or_empty_set:
raise erring.ParseError('Invalid value for "newline"; must be a Unicode line termination sequence in the ASCII range (or the empty string) for type "{0}"'.format(self.type), node)
self[key] = node
elif key == 'indent':
if val.lstrip(indent) != '':
raise erring.ParseError('Invalid value for "indent"; must be a sequence of spaces and/or tabs', node)
self[key] = node
else:
raise ValueError
elif key in self._any_collection_keywords:
if node.implicit_type != 'alias':
raise erring.ParseError('Collection config requires an alias or list of aliases', node)
node.parent = self
node.index = key
self[key] = node
self._unresolved_dependency_count += 1
else:
raise ValueError
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
def check_append_collection(self, node):
if node.implicit_type != 'alias_list':
raise erring.ParseError('Collections are prohibited in tags, except for lists of aliases used in collection config', node)
if not node.indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
if not self._awaiting_val:
raise erring.ParseError('Missing key; cannot add a value until a key has been given', node)
key = self._next_key
if key not in self._collection_keywords:
raise erring.ParseError('Collections are prohibited in tags, except for lists of aliases used in collection config', node)
self[key] = node
node.parent = self
node.index = key
node.nesting_depth = 0
self._unresolved_dependency_count += 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._awaiting_val = False
self._open = False
class AliasListNode(list):
'''
List of alias nodes, for collection config in tags.
'''
__slots__ = (_node_common_slots + ['nesting_depth', 'parent', 'index',
'_open', '_unresolved_dependency_count'])
def __init__(self, state, list=list):
list.__init__(self)
self.implicit_type = 'alias_list'
self._state = state
self._unresolved_dependency_count = 0
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = self.last_colno = state.colno
self._resolved = False
self._open = False
def check_append_scalar_key(self, node):
raise erring.ParseError('Cannot append a key-value pair directly to a list-like object', node)
def check_append_scalar_val(self, node,
self_alias=grammar.LIT_GRAMMAR['self_alias'],
len=len):
if not self._open:
raise erring.ParseError('Cannot append to a closed alias list; check for a missing "{0}"'.format(INLINE_ELEMENT_SEPARATOR), node)
if not node.external_indent.startswith(self.inline_indent):
raise erring.IndentationError(node)
if node.implicit_type != 'alias':
raise erring.ParseError('Only aliases are allowed in alias lists', node)
self.append(node)
node.parent = self
node.index = len(self)
self._unresolved_dependency_count += 1
self.last_lineno = node.last_lineno
self.last_colno = node.last_colno
self._open = False
def check_append_collection(self, node, len=len):
raise erring.ParseError('Only aliases are allowed in alias lists; collections are not permitted', node)
class AliasNode(object):
'''
Alias node.
'''
__slots__ = (_node_common_slots + _node_data_slots +
['parent', 'index', 'target_root', 'target_path',
'target_node', 'target_label', 'extra_dependents'])
def __init__(self, state, alias_raw_val, path_separator=PATH_SEPARATOR,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals):
self.implicit_type = 'alias'
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = state.colno
self.last_colno = state.colno + len(alias_raw_val) - 1
self._resolved = False
self.extra_dependents = None
if not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
set_tag_doc_comment_externals(self, state)
alias_path = alias_raw_val[1:].split(path_separator)
self.target_root = None
self.target_node = None
self.target_label = alias_path[0]
if len(alias_path) == 1:
self.target_path = None
else:
self.target_path = alias_path[1:]
class KeyPathNode(list):
'''
Abstract key path.
Used as dict keys or in sections for assigning in nested objects.
'''
__slots__ = (_node_common_slots + _node_data_slots +
['external_indent', 'external_at_line_start',
'external_first_lineno', 'resolved', 'raw_val',
'assign_key_val_lineno', 'assign_key_val_colno'])
def __init__(self, state, key_path_raw_val,
set_tag_doc_comment_externals=_set_tag_doc_comment_externals,
open_indentation_list=OPEN_INDENTATION_LIST,
path_separator=PATH_SEPARATOR,
reserved_word_patterns=_reserved_word_patterns,
key_path_reserved_word_vals=_key_path_reserved_word_vals,
reserved_word_types=_reserved_word_types,
ScalarNode=ScalarNode, FullScalarNode=FullScalarNode,
list=list):
list.__init__(self)
self.implicit_type = 'key_path'
self._state = state
self.indent = state.indent
self.at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = state.lineno
self.first_colno = state.colno
self.last_colno = state.colno + len(key_path_raw_val) - 1
self._resolved = False
if not state.next_cache:
self.doc_comment = None
self.tag = None
self.external_indent = self.indent
self.external_at_line_start = self.at_line_start
self.external_first_lineno = self.first_lineno
self.external_first_colno = self.first_colno
else:
# Cached objects that would be invalid for a key path are filtered
# out in decoding, so using the shared externals functions here
# won't introduce any errors and doesn't require any checks
set_tag_doc_comment_externals(self, state)
first_colno = last_colno = self.first_colno
lineno = self.first_lineno
for kp_elem_raw in key_path_raw_val.split(path_separator):
if kp_elem_raw == open_indentation_list:
self.append(kp_elem_raw)
last_colno += 2
first_colno = last_colno
else:
last_colno += len(kp_elem_raw) - 1
if kp_elem_raw in reserved_word_patterns:
try:
kp_elem_final = key_path_reserved_word_vals[kp_elem_raw]
except KeyError:
kp_elem_node = ScalarNode(state, lineno, first_colno, lineno, last_colno, implicit_type)
kp_elem_node.first_colno = first_colno
kp_elem_node.last_colno = last_colno
if kp_elem_raw.lower() in _reserved_word_types:
raise erring.ParseError('Invalid capitalization of reserved word "{0}"'.format(kp_elem_raw.lower()), kp_elem_node)
elif kp_elem_raw == kp_elem_raw.lower():
raise erring.ParseError('Reserved word "{0}" is not allowed in key paths'.format(kp_elem_raw.lower()), kp_elem_node)
else:
raise erring.ParseError('Reserved word "{0}" is not allowed in key paths, and has invalid capitalization'.format(kp_elem_raw.lower()), kp_elem_node)
implicit_type = _reserved_word_types[kp_elem_raw]
else:
kp_elem_final = kp_elem_raw
implicit_type = 'str'
if not self._state.full_ast:
kp_elem_node = ScalarNode(state, lineno, first_colno, lineno, last_colno, implicit_type)
else:
kp_elem_node = FullScalarNode(state, lineno, first_colno, lineno, last_colno,
implicit_type)
kp_elem_node.first_colno = first_colno
kp_elem_node.last_colno = last_colno
if state.full_ast:
kp_elem_node.raw_val = kp_elem_raw
kp_elem_node.key_path = self
kp_elem_node.final_val = kp_elem_final
kp_elem_node._resolved = True
self.append(kp_elem_node)
last_colno += 2
first_colno = last_colno
class SectionNode(object):
'''
Section.
'''
__slots__ = (_node_common_slots + _node_data_slots +
['delim', 'key_path', 'scalar', '_end_delim'])
def __init__(self, state, delim):
self.implicit_type = 'section'
self.delim = delim
self.key_path = None
self.scalar = None
self._end_delim = False
# Sections never have tags or doc comments, and thus technically don't
# require external attributes. However, external attributes are
# created anyway so that there is a common set of attributes for
# creating collections based on scalars, key paths, and sections.
# There is no need for tag or doc comment checks here, because that
# is already done during decoding.
self._state = state
self.indent = self.external_indent = state.indent
self.at_line_start = self.external_at_line_start = state.at_line_start
self.inline = state.inline
self.inline_indent = state.inline_indent
self.first_lineno = self.last_lineno = self.external_first_lineno = state.lineno
self.first_colno = self.last_colno = self.external_first_colno = state.colno
self._resolved = False | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/geo/charting/Feature.js | define("dojox/geo/charting/Feature",["dojo/_base/lang","dojo/_base/declare","dojo/_base/array","dojo/_base/html","dojo/dom","dojo/_base/event","dojox/gfx/fx","dojox/color"],function(_1,_2,_3,_4,_5,_6,fx,_7){
return _2("dojox.geo.charting.Feature",null,{_isZoomIn:false,isSelected:false,markerText:null,constructor:function(_8,_9,_a){
this.id=_9;
this.shape=_8.mapObj.createGroup();
this.parent=_8;
this.mapObj=_8.mapObj;
this._bbox=_a.bbox;
this._center=_a.center;
this._defaultFill=_8.defaultColor;
this._highlightFill=_8.highlightColor;
this._defaultStroke={width:this._normalizeStrokeWeight(0.5),color:"white"};
var _b=(_1.isArray(_a.shape[0]))?_a.shape:[_a.shape];
_3.forEach(_b,function(_c){
this.shape.createPolyline(_c).setStroke(this._defaultStroke);
},this);
this.unsetValue();
},unsetValue:function(){
this.value=null;
this.unsetColor();
},unsetColor:function(){
this._defaultFill=this.parent.defaultColor;
var _d=new _7.Color(this.parent.defaultColor).toHsl();
_d.l=1.2*_d.l;
this._highlightFill=_7.fromHsl(_d);
this._setFillWith(this._defaultFill);
},setValue:function(_e){
this.value=_e;
if(_e==null){
this.unsetValue();
}else{
if(this.parent.series.length!=0){
for(var i=0;i<this.parent.series.length;i++){
var _f=this.parent.series[i];
if((_e>=_f.min)&&(_e<_f.max)){
this._setFillWith(_f.color);
this._defaultFill=_f.color;
var col=new _7.Color(_f.color).toHsv();
col.v=(col.v+20);
this._highlightFill=_7.fromHsv(col);
return;
}
}
this.unsetColor();
}
}
},_setFillWith:function(_10){
var _11=(_1.isArray(this.shape.children))?this.shape.children:[this.shape.children];
_3.forEach(_11,_1.hitch(this,function(_12){
if(this.parent.colorAnimationDuration>0){
var _13=fx.animateFill({shape:_12,color:{start:_12.getFill(),end:_10},duration:this.parent.colorAnimationDuration});
_13.play();
}else{
_12.setFill(_10);
}
}));
},_setStrokeWith:function(_14){
var _15=(_1.isArray(this.shape.children))?this.shape.children:[this.shape.children];
_3.forEach(_15,function(_16){
_16.setStroke({color:_14.color,width:_14.width,join:"round"});
});
},_normalizeStrokeWeight:function(_17){
var _18=this.shape._getRealMatrix();
return (dojox.gfx.renderer!="vml")?_17/(this.shape._getRealMatrix()||{xx:1}).xx:_17;
},_onmouseoverHandler:function(evt){
this.parent.onFeatureOver(this);
this._setFillWith(this._highlightFill);
this.mapObj.marker.show(this.id,evt);
},_onmouseoutHandler:function(){
this._setFillWith(this._defaultFill);
this.mapObj.marker.hide();
_4.style("mapZoomCursor","display","none");
},_onmousemoveHandler:function(evt){
if(this.mapObj.marker._needTooltipRefresh){
this.mapObj.marker.show(this.id,evt);
}
if(this.isSelected){
if(this.parent.enableFeatureZoom){
evt=_6.fix(evt||window.event);
_4.style("mapZoomCursor","left",evt.pageX+12+"px");
_4.style("mapZoomCursor","top",evt.pageY+"px");
_4.byId("mapZoomCursor").className=this._isZoomIn?"mapZoomOut":"mapZoomIn";
_4.style("mapZoomCursor","display","block");
}else{
_4.style("mapZoomCursor","display","none");
}
}
},_onclickHandler:function(evt){
this.parent.onFeatureClick(this);
if(!this.isSelected){
this.parent.deselectAll();
this.select(true);
this._onmousemoveHandler(evt);
}else{
if(this.parent.enableFeatureZoom){
if(this._isZoomIn){
this._zoomOut();
}else{
this._zoomIn();
}
}
}
},select:function(_19){
if(_19){
this.shape.moveToFront();
this._setStrokeWith({color:"black",width:this._normalizeStrokeWeight(2)});
this._setFillWith(this._highlightFill);
this.isSelected=true;
this.parent.selectedFeature=this;
}else{
this._setStrokeWith(this._defaultStroke);
this._setFillWith(this._defaultFill);
this.isSelected=false;
this._isZoomIn=false;
}
},_zoomIn:function(){
var _1a=this.mapObj.marker;
_1a.hide();
this.parent.fitToMapArea(this._bbox,15,true,_1.hitch(this,function(){
this._setStrokeWith({color:"black",width:this._normalizeStrokeWeight(2)});
_1a._needTooltipRefresh=true;
this.parent.onZoomEnd(this);
}));
this._isZoomIn=true;
_5.byId("mapZoomCursor").className="";
},_zoomOut:function(){
var _1b=this.mapObj.marker;
_1b.hide();
this.parent.fitToMapContents(3,true,_1.hitch(this,function(){
this._setStrokeWith({color:"black",width:this._normalizeStrokeWeight(2)});
_1b._needTooltipRefresh=true;
this.parent.onZoomEnd(this);
}));
this._isZoomIn=false;
_5.byId("mapZoomCursor").className="";
},init:function(){
this.shape.id=this.id;
this.tooltip=null;
}});
}); | PypiClean |
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/mapproxy/script/conf/utils.py |
from copy import copy
from mapproxy.compat import iteritems
__all__ = ['update_config', 'MapProxyYAMLDumper']
def update_config(conf, overwrites):
wildcard_keys = []
for k, v in iteritems(overwrites):
if k == '__all__':
continue
if k.startswith('___') or k.endswith('___'):
wildcard_keys.append(k)
continue
if k.endswith('__extend__'):
k = k[:-len('__extend__')]
if k not in conf:
conf[k] = v
elif isinstance(v, list):
conf[k].extend(v)
else:
raise ValueError('cannot extend non-list:', v)
elif k not in conf:
conf[k] = copy(v)
else:
if isinstance(conf[k], dict) and isinstance(v, dict):
conf[k] = update_config(conf[k], v)
else:
conf[k] = copy(v)
if '__all__' in overwrites:
v = overwrites['__all__']
for conf_k, conf_v in iteritems(conf):
if isinstance(conf_v, dict):
conf[conf_k] = update_config(conf_v, v)
else:
conf[conf_k] = v
if wildcard_keys:
for key in wildcard_keys:
v = overwrites[key]
if key.startswith('___'):
key = key[3:]
key_check = lambda x: x.endswith(key)
else:
key = key[:-3]
key_check = lambda x: x.startswith(key)
for conf_k, conf_v in iteritems(conf):
if not key_check(conf_k):
continue
if isinstance(conf_v, dict):
conf[conf_k] = update_config(conf_v, v)
else:
conf[conf_k] = v
return conf
from yaml.serializer import Serializer
from yaml.nodes import ScalarNode, SequenceNode, MappingNode
from yaml.emitter import Emitter
from yaml.representer import SafeRepresenter
from yaml.resolver import Resolver
class _MixedFlowSortedSerializer(Serializer):
def serialize_node(self, node, parent, index):
# reset any anchors
if parent is None:
for k in self.anchors:
self.anchors[k] = None
self.serialized_nodes = {}
if isinstance(node, SequenceNode) and all(isinstance(item, ScalarNode) for item in node.value):
node.flow_style = True
elif isinstance(node, MappingNode):
node.value.sort(key=lambda x: x[0].value)
return Serializer.serialize_node(self, node, parent, index)
class _EmptyNoneRepresenter(SafeRepresenter):
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'')
_EmptyNoneRepresenter.add_representer(type(None), _EmptyNoneRepresenter.represent_none)
class MapProxyYAMLDumper(Emitter, _MixedFlowSortedSerializer, _EmptyNoneRepresenter, Resolver):
"""
YAML dumper that uses block style by default, except for
node-only sequences. Also sorts dicts by key, prevents `none`
for empty entries and prevents any anchors.
"""
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
_EmptyNoneRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
from mapproxy.request.base import BaseRequest, url_decode
from mapproxy.client.http import open_url
from mapproxy.compat.modules import urlparse
def wms_capapilities_url(url):
parsed_url = urlparse.urlparse(url)
base_req = BaseRequest(
url=url.split('?', 1)[0],
param=url_decode(parsed_url.query),
)
base_req.params['service'] = 'WMS'
if not base_req.params['version']:
base_req.params['version'] = '1.1.1'
base_req.params['request'] = 'GetCapabilities'
return base_req.complete_url
def download_capabilities(url):
capabilities_url = wms_capapilities_url(url)
return open_url(capabilities_url) | PypiClean |
/FlyBIDS-0.0.2.tar.gz/FlyBIDS-0.0.2/README.md | # FlyBIDS
Quickly build and inspect Flywheel BIDS data.
# Installation
- Development version:
Clone this repo and install the package with `pip`
```
git clone https://github.com/PennLINC/FlyBIDS.git
cd FlyBIDS
pip install -e .
```
- Stable Releases:
Download from Pip:
```
pip install FlyBIDS
```
# Examples
```python
>>> from FlyBIDS.BIDSLayout import FlyBIDSLayout
>>> fbl = FlyBIDSLayout('gear_testing', subjects=['sub-1832999514', 'sub-2216595430'])
>>> print(fbl)
FlyBIDS Layout: Project 'gear_testing' | Subjects: 2 | Sessions: 3
>>> as_df = fbl.to_df()
>>> print(as_df)
template Filename \
0 anat_file sub-1832999514_ses-PNC1_T1w.nii.gz
1 func_file sub-1832999514_ses-PNC1_task-rest_acq-singleba...
2 anat_file sub-1832999514_ses-PNC2_T1w.nii.gz
3 acquisition_file sub-1832999514_ses-PNC2_task-idemo.nii.gz
: : : : : : : :
12 NaN False 5ebee86c4425360a219e6670 NaN NaN NaN 2216595430
13 bold False 5ebee86e4425360a219e6672 rest NaN 2216595430
14 NaN False 5ebee8714425360a219e6673 NaN NaN NaN 2216595430
15 phasediff False 5ebee87244253609f99e681a NaN NaN 2216595430
>>> fbl.get_files(RepetitionTime=3)
['sub-1832999514_ses-PNC1_task-rest_acq-singleband_bold.nii.gz',
'sub-1832999514_ses-PNC2_task-idemo.nii.gz',
'sub-1832999514_ses-PNC2_task-rest_acq-singleband_bold.nii.gz',
'sub-1832999514_ses-PNC2_task-frac2back.nii.gz',
'sub-2216595430_ses-PNC1_task-frac2back_run-02.nii.gz',
'sub-2216595430_ses-PNC1_task-frac2back_run-01.nii.gz',
'sub-2216595430_ses-PNC1_task-rest_acq-singleband_bold.nii.gz',
'sub-2216595430_ses-PNC1_task-idemo.nii.gz']
>>> fbl.get_metadata('EchoTime', filename='sub-1832999514_ses-PNC1_task-rest_acq-singleband_bold.nii.gz')
{'EchoTime': [0.032, 0.00351, 0.00269, 0.00527, 0.00667]}
```
| PypiClean |
/AsyncDex-1.1.tar.gz/AsyncDex-1.1/asyncdex/models/chapter.py | import asyncio
import re
from datetime import datetime
from logging import getLogger
from os import makedirs
from os.path import exists, join
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Tuple
from aiohttp import ClientError
from .abc import GenericModelList, Model
from .group import Group
from .mixins import DatetimeMixin
from .user import User
from ..constants import invalid_folder_name_regex, routes
from ..utils import copy_key_to_attribute
logger = getLogger(__name__)
if TYPE_CHECKING:
from .manga import Manga
from ..client import MangadexClient
class Chapter(Model, DatetimeMixin):
"""A :class:`.Model` representing an individual chapter.
.. versionadded:: 0.3
"""
volume: Optional[str]
"""The volume of the chapter. ``None`` if the chapter belongs to no volumes."""
number: Optional[str]
"""The number of the chapter. ``None`` if the chapter is un-numbered (such as in an anthology).
.. note::
A chapter can have a number, a title, or both. If a chapter's number is ``None``, it must have a title.
"""
title: Optional[str]
"""The title of the chapter. ``None`` if the chapter does not have a title.
.. note::
A chapter can have a number, a title, or both. If a chapter's title is ``None``, it must have a number.
"""
language: str
"""The language of the chapter."""
hash: str
"""The chapter's hash."""
page_names: List[str]
"""A list of strings containing the filenames of the pages.
.. seealso:: :attr:`.data_saver_page_names`
"""
data_saver_page_names: List[str]
"""A list of strings containing the filenames of the data saver pages.
.. seealso:: :attr:`.page_names`
"""
publish_time: datetime
"""A :class:`datetime.datetime` representing the time the chapter was published.
.. seealso:: :attr:`.created_at`
.. note::
The datetime is **timezone aware** as it is parsed from an ISO-8601 string.
"""
manga: "Manga"
"""The manga that this chapter belongs to."""
user: User
"""The user that uploaded this chapter."""
groups: GenericModelList[Group]
"""The groups that uploaded this chapter."""
read: bool
"""Whether or not the chapter is read."""
def __init__(
self,
client: "MangadexClient",
*,
id: Optional[str] = None,
version: int = 0,
data: Optional[Dict[str, Any]] = None,
):
self.read = False
super().__init__(client, id=id, version=version, data=data)
@property
def name(self) -> str:
"""Returns a nicely formatted name based on available fields. Includes the volume number, chapter number,
and chapter title if any one or more of them exist.
:return: Formatted name
:rtype: str
"""
if self.number:
constructed = ""
if self.volume:
constructed += f"Volume {self.volume} "
if self.number.isdecimal():
num_rep = float(self.number)
if num_rep.is_integer():
num_rep = int(num_rep)
else:
num_rep = self.number
constructed += f"Chapter {num_rep}"
if self.title:
constructed += f": {self.title}"
return constructed
else:
return self.title
@property
def sorting_number(self) -> float:
"""Returns ``0`` if the chapter does not have a number, otherwise returns the chapter's number.
:return: A number usable for sorting.
:rtype: float
"""
return float(self.number) if self.number.isdecimal() else -1
async def pages(self, *, data_saver: bool = False, ssl_only: bool = False) -> List[str]:
"""Get fully formatted page URLs.
.. note::
The given page URLs are only valid for a short timeframe. These URLs cannot be used for hotlinking.
:param data_saver: Whether or not to return the pages for the data saver URLs. Defaults to ``False``.
:type data_saver: bool
:param ssl_only: Whether or not the given URL has port ``443``. Useful if your firewall blocks outbound
connections to ports that are not port ``443``. Defaults to ``False``.
.. note::
This will lower the pool of available clients and can cause higher latencies.
:type ssl_only: bool
:return: A list of valid URLs in the order of the pages.
:rtype: List[str]
"""
if not hasattr(self, "page_names"):
await self.fetch()
r = await self.client.request(
"GET", routes["md@h"].format(chapterId=self.id), params={"forcePort443": ssl_only}
)
base_url = (await r.json())["baseUrl"]
r.close()
return [
f"{base_url}/{'data-saver' if data_saver else 'data'}/{self.hash}/{filename}"
for filename in (self.data_saver_page_names if data_saver else self.page_names)
]
async def download_chapter(
self,
*,
folder_format: str = "{manga}/{chapter_num}{separator}{title}",
file_format: str = "{num}",
as_bytes_list: bool = False,
overwrite: bool = True,
retries: int = 3,
use_data_saver: bool = False,
ssl_only: bool = False,
) -> Optional[List[bytes]]:
"""Download all of the pages of the chapter and either save them locally to the filesystem or return the raw
bytes.
:param folder_format: The format of the folder to create for the chapter. The folder can already be existing.
The default format is ``{manga}/{chapter_num}{separator}{chapter_title}``.
.. note::
Specify ``.`` if you want to save the pages in the current folder.
Available variables:
* ``{manga}``: The name of the manga. If the chapter's manga object does not contain a title object,
it will be fetched.
* ``{chapter_num}``: The number of the chapter, if it exists.
* ``{separator}``: A separator if both the chapter's number and title exists.
* ``{title}``: The title of the chapter, if it exists.
:type folder_format: str
:param file_format: The format of the individual image file names. The default format is ``{num}``.
.. note::
The file extension is applied automatically from the real file name. There is no need to include it.
Available variables:
* ``{num}``: The numbering of the image files starting from 1. This respects the order the images are in
inside of :attr:`.page_names`.
* ``{num0}``: The same as ``{num}`` but starting from 0.
* ``{name}``: The actual filename of the image from :attr:`.page_names`, without the file extension.
:type file_format: str
:param as_bytes_list: Whether or not to return the pages as a list of raw bytes. Setting this parameter to
``True`` will ignore the value of the ``folder_format`` parameter.
:type as_bytes_list: bool
:param overwrite: Whether or not to override existing files with the same name as the page. Defaults to
``True``.
:type overwrite: bool
:param retries: How many times to retry a chapter if a MD@H node does not let us download the pages.
Defaults to ``3``.
:type retries: int
:param use_data_saver: Whether or not to use the data saver pages or the normal pages. Defaults to ``False``.
:type use_data_saver: bool
:param ssl_only: Whether or not the given URL has port ``443``. Useful if your firewall blocks outbound
connections to ports that are not port ``443``. Defaults to ``False``.
.. note::
This will lower the pool of available clients and can cause higher download times.
:type ssl_only: bool
:raises: :class:`aiohttp.ClientResponseError` if there is an error after all retries are exhausted.
:return: A list of byte strings if ``as_bytes_list`` is ``True`` else None.
:rtype: Optional[List[bytes]]
"""
if not hasattr(self, "page_names"):
await self.fetch()
pages = await self.pages(data_saver=use_data_saver, ssl_only=ssl_only)
try:
items = await asyncio.gather(*[self.client.get_page(url) for url in pages])
except ClientError as e:
if retries > 0:
logger.warning("Retrying download of chapter %s due to %s: %s", self.id, type(e).__name__, e)
return await self.download_chapter(
folder_format=folder_format,
as_bytes_list=as_bytes_list,
overwrite=overwrite,
retries=retries - 1,
use_data_saver=use_data_saver,
ssl_only=ssl_only,
)
else:
raise
else:
byte_list = await asyncio.gather(*[item.read() for item in items])
[item.close() for item in items]
if as_bytes_list:
return byte_list # NOQA: ignore; This is needed because for whatever reason PyCharm cannot guess the
# output of asyncio.gather()
else:
base = ""
if not as_bytes_list:
chapter_num = self.number or ""
separator = " - " if self.number and self.title else ""
title = (
re.sub("_{2,}", "_", invalid_folder_name_regex.sub("_", self.title.strip()))
if self.title
else ""
)
# This replaces invalid characters with underscores then deletes duplicate underscores in a
# series. This
# means that a name of ``ex___ample`` becomes ``ex_ample``.
if not self.manga.titles:
await self.manga.fetch()
manga_title = self.manga.titles[self.language].primary or (
self.manga.titles.first().primary if self.manga.titles else self.manga.id
)
manga_title = re.sub("_{2,}", "_", invalid_folder_name_regex.sub("_", manga_title.strip()))
base = folder_format.format(
manga=manga_title, chapter_num=chapter_num, separator=separator, title=title
)
makedirs(base, exist_ok=True)
for original_file_name, (num, item) in zip(
self.data_saver_page_names if use_data_saver else self.page_names, enumerate(byte_list, start=1)
):
filename = (
file_format.format(num=num, num0=num - 1, name=original_file_name)
+ "."
+ original_file_name.rpartition(".")[-1]
)
full_path = join(base, filename)
if not (exists(full_path) and overwrite):
with open(full_path, "wb") as fp:
fp.write(item)
@staticmethod
def _get_number_from_chapter_string(chapter_str: str) -> Tuple[Optional[float], Optional[str]]:
if not chapter_str:
return None, None
elif chapter_str.isdecimal():
return float(chapter_str), None
else:
# Unfortunately for us some people decided to enter in garbage data, which means that we cannot cleanly
# convert to a float. Attempt to try to get something vaguely resembling a number or return a null
# chapter number and set the title as the value for the chapter number.
match = re.search(r"[\d.]+", chapter_str)
return None if not match else float(match.group(0)), chapter_str
def parse(self, data: Dict[str, Any]):
super().parse(data)
if "data" in data and "attributes" in data["data"]:
attributes = data["data"]["attributes"]
copy_key_to_attribute(attributes, "volume", self)
copy_key_to_attribute(attributes, "title", self)
copy_key_to_attribute(attributes, "chapter", self, "number")
copy_key_to_attribute(attributes, "translatedLanguage", self, "language")
copy_key_to_attribute(attributes, "hash", self)
copy_key_to_attribute(attributes, "data", self, "page_names")
copy_key_to_attribute(attributes, "dataSaver", self, "data_saver_page_names")
self._process_times(attributes)
self._parse_relationships(data)
if hasattr(self, "_users"):
self.user = self._users[0]
del self._users
if hasattr(self, "mangas"):
# This is needed to move the list of Mangas created by the parse_relationships function into a
# singular manga, since there can never be >1 manga per chapter.
self.mangas: List[Manga]
self.manga = self.mangas[0]
del self.mangas
def _process_times(self, attributes: Dict[str, str]):
super()._process_times(attributes)
copy_key_to_attribute(
attributes,
"publishAt",
self,
"publish_time",
transformation=lambda attrib: datetime.fromisoformat(attrib) if attrib else attrib,
)
async def fetch(self):
"""Fetch data about the chapter. |permission| ``chapter.view``
:raises: :class:`.InvalidID` if a chapter with the ID does not exist.
"""
await self._fetch("chapter.view", "chapter")
async def load_groups(self):
"""Shortcut method that calls :meth:`.MangadexClient.batch_groups` with the groups that belong to the group.
Roughly equivalent to:
.. code-block:: python
await client.batch_groups(*user.groups)
"""
await self.client.batch_groups(*self.groups)
async def mark_read(self):
"""Mark the chapter as read. |auth|
.. versionadded:: 0.5
"""
self.client.raise_exception_if_not_authenticated("GET", routes["read"])
r = await self.client.request("POST", routes["read"].format(id=self.id))
self.read = True
r.close()
async def mark_unread(self):
"""Mark the chapter as unread. |auth|
.. versionadded:: 0.5
"""
self.client.raise_exception_if_not_authenticated("GET", routes["read"])
r = await self.client.request("DELETE", routes["read"].format(id=self.id))
self.read = False
r.close()
async def toggle_read(self):
"""Toggle a chapter between being read and unread. Requires authentication.
.. versionadded:: 0.5
.. note::
This requires the read status of the chapter to be known. See :meth:`.get_read_status` or
:meth:`.ChapterList.get_read`.
:raises: :class:`.Unauthorized` is authentication is missing.
"""
if self.read:
await self.mark_unread()
else:
await self.mark_read()
async def get_read(self):
"""Gets whether or not the chapter is read. The read status can then be viewed in :attr:`.read`.
.. versionadded:: 0.5
"""
r = await self.client.request("GET", routes["manga_read"].format(id=self.manga.id))
self.manga._check_404(r)
json = await r.json()
r.close()
self.read = self.id in json["data"] | PypiClean |
/Brainfeatures-0.0.4.tar.gz/Brainfeatures-0.0.4/brainfeatures/data_set/tuh_abnormal.py | from collections import OrderedDict
from glob import glob
import logging
import re
import pandas as pd
import numpy as np
from brainfeatures.data_set.abstract_data_set import DataSet
from brainfeatures.utils.file_util import (natural_key,
parse_age_and_gender_from_edf_header,
mne_load_signals_and_fs_from_edf,
property_in_path)
from brainfeatures.preprocessing.rules import reject_too_long_recording
# check whether this can be replaced by natural key
def _session_key(string):
""" sort the file name by session """
p = r'(s\d*)_'
return re.findall(p, string)
def _time_key(file_name):
""" provides a time-based sorting key """
# the splits are specific to tuh abnormal eeg data set
splits = file_name.split('/')
p = r'(\d{4}_\d{2}_\d{2})'
[date] = re.findall(p, splits[-2])
date_id = [int(token) for token in date.split('_')]
recording_id = natural_key(splits[-1])
session_id = _session_key(splits[-2])
return date_id + session_id + recording_id
def read_all_file_names(path, extension, key="time"):
""" read all files with specified extension from given path
:param path: parent directory holding the files directly or in
subdirectories
:param extension: the type of the file, e.g. '.txt' or '.edf'
:param key: the sorting of the files. natural e.g. 1, 2, 12, 21
(machine 1, 12, 2, 21) or by time since this is
important for cv. time is specified in the edf file names
"""
assert key in ["natural", "time"], "unknown sorting key"
file_paths = glob(path + '**/*' + extension, recursive=True)
if key == "time":
sorting_key = _time_key
else:
sorting_key = natural_key
file_names = sorted(file_paths, key=sorting_key)
assert len(file_names) > 0, ("something went wrong. Found no {} files in {}"
.format(extension, path))
return file_names
class TuhAbnormal(DataSet):
"""tuh abnormal data set. file names are given as"""
# v2.0.0/edf/eval/abnormal/01_tcp_ar/007/00000768/s003_2012_04_06/
# 00000768_s003_t000.edf
def __init__(self, data_path, extension, subset="train", channels=sorted([
'A1', 'A2', 'C3', 'C4', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FP1', 'FP2',
'FZ', 'O1', 'O2', 'P3', 'P4', 'PZ', 'T3', 'T4', 'T5', 'T6']),
key="time", n_recordings=None, target="pathological",
max_recording_mins=None, ch_name_pattern="EEG {}-REF"):
self.max_recording_mins = max_recording_mins
self.ch_name_pattern = ch_name_pattern
self.n_recordings = n_recordings
self.extension = extension
self.data_path = data_path
self.channels = channels
self.target = target
self.subset = subset
self.key = key
self.gender_int_map = {"M": 0, "F": 1}
self.pathologicals = []
self.file_names = []
self.genders = []
self.targets = []
self.sfreqs = []
self.ages = []
assert data_path.endswith("/"), "data path has to end with '/'"
assert extension.startswith("."), "extension has to start with '.'"
if self.subset == "eval":
assert self.max_recording_mins is None, ("do not reject eval "
"recordings")
def load(self):
# read all file names in path with given extension sorted by key
self.file_names = read_all_file_names(
self.data_path, self.extension, self.key)
assert self.subset in self.file_names[0], (
"cannot parse {} from file name {}"
.format(self.subset, self.file_names[0]))
# prune this file names to train or eval subset
self.file_names = [file_name for file_name in self.file_names
if self.subset in file_name.split('/')]
n_picked_recs = 0
files_to_delete = []
for file_name in self.file_names:
if self.n_recordings is not None:
if n_picked_recs == self.n_recordings:
break
# if this is raw version of data set, reject too long recordings
if self.extension == ".edf":
if self.max_recording_mins is not None:
# reject recordings that are too long
rejected, duration = reject_too_long_recording(
file_name, self.max_recording_mins)
if rejected:
files_to_delete.append(file_name)
continue
n_picked_recs += 1
assert self.target in ["pathological", "age", "gender"], (
"unknown target {}".format(self.target))
assert self.extension in [".edf", ".h5"], (
"unknown file format {}".format(self.extension))
if self.extension == ".edf":
# get pathological status, age and gender for edf file
pathological = property_in_path(file_name, "abnormal")
age, gender = parse_age_and_gender_from_edf_header(file_name)
else:
info_df = pd.read_hdf(file_name, key="info")
assert len(info_df) == 1, "too many rows in info df"
info = info_df.iloc[-1].to_dict()
pathological = info["pathological"]
age = info["age"]
gender = info["gender"]
self.sfreqs.append(int(info["sfreq"]))
# encode gender string as integer
# assert gender in ["M", "F"], "unknown gender"
if gender in self.gender_int_map.keys():
gender = self.gender_int_map[gender]
else:
assert gender in self.gender_int_map.values(), "unknown gender"
# assert gender in self.gender_int_map.keys() or gender in self.gender_int_map.values(), "unknown gender"
# gender = 0 if gender == "M" else 1
# gender = self.gender_int_map[gender]
targets = {"pathological": pathological, "age": age,
"gender": gender}
self.targets.append(targets[self.target])
self.ages.append(age)
self.genders.append(gender)
self.pathologicals.append(pathological)
if self.max_recording_mins is not None:
# prune list of all file names to n_recordings
for file_name in files_to_delete:
self.file_names.remove(file_name)
if self.n_recordings is not None:
self.file_names = self.file_names[:self.n_recordings]
assert len(self.file_names) == len(self.targets), "lengths differ"
if self.n_recordings is not None:
assert len(self.file_names) == self.n_recordings, (
"less recordings picked than desired")
assert len(np.intersect1d(self.file_names, files_to_delete)) == 0, (
"deleting unwanted file names failed")
def __getitem__(self, index):
file_ = self.file_names[index]
label = self.targets[index]
# raw tuh data
if self.extension == ".edf":
signals, sfreq = mne_load_signals_and_fs_from_edf(
file_, self.channels, self.ch_name_pattern)
# preprocessed tuh data / features
else:
assert self.extension == ".h5", "unknown data format"
signals = pd.read_hdf(file_, key="data")
x_dim, y_dim = signals.shape
if x_dim > y_dim:
signals = signals.T
sfreq = self.sfreqs[index]
return signals, sfreq, label
def __len__(self):
return len(self.file_names)
class TuhAbnormalSubset(DataSet):
def __init__(self, dataset, indeces):
self.file_names = [dataset.file_names[i] for i in indeces]
self.targets = [dataset.targets[i] for i in indeces]
self.target = dataset.target
self.sfreqs = [dataset.sfreqs[i] for i in indeces]
self.ages = [dataset.ages[i] for i in indeces]
self.genders = [dataset.genders[i] for i in indeces]
self.pathologicals = [dataset.pathologicals[i] for i in indeces]
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
file_ = self.file_names[idx]
label = self.targets[idx]
signals = pd.read_hdf(file_, key="data")
x_dim, y_dim = signals.shape
if x_dim > y_dim:
signals = signals.T
sfreq = self.sfreqs[idx]
return signals, sfreq, label
# this function is called once for devel, once for eval set.
# on second call don't add feature name, but add metafeature!
def add_meta_feature(data_set, features, feature_labels):
""" modify the feature vectors of a data set. here, we add additional
meta features age and gender """
features_to_add = OrderedDict([
("age", data_set.ages),
("gender", data_set.genders),
])
target = data_set.target
if target in features_to_add:
features_to_add.pop(target)
logging.info("now adding {} to feature vectors".
format(' and '.join(features_to_add.keys())))
for feature in features_to_add:
feature_label = "meta_" + feature
for i in range(len(features)):
repeated_meta_feature = np.repeat(features_to_add[feature][i],
len(features[i]))
repeated_meta_feature = pd.DataFrame(
repeated_meta_feature.reshape(-1, 1),
columns=[feature_label])
features[i] = pd.concat((features[i], repeated_meta_feature),
axis=1)
if feature_label in feature_labels[::-1]:
continue
else:
feature_labels.append(feature_label)
return features, feature_labels | PypiClean |
/ActiveReign-1.0.5.tar.gz/ActiveReign-1.0.5/ar3/ops/shell/__init__.py | import os
from ar3.core.smb import SmbCon
from ar3.servers.smb import SMBServer
from ar3.core.connector import Connector
from ar3.helpers.misc import gen_random_string
from ar3.ops.enum.host_enum import code_execution
class AR3Shell(Connector):
def __init__(self, args, db_obj, config_obj, loggers):
Connector.__init__(self, args, loggers, args.target)
self.output = []
self.pwd_list = ['C:', 'Windows', 'System32']
self.pwd = '\\'.join(self.pwd_list)
self.exec_method = args.exec_method
self.sharename = args.fileless_sharename
self.db = db_obj
self.config_obj = config_obj
try:
# Setup Smb Connection
self.logger.status('Initiating remote connection')
self.smbcon = SmbCon(self.args, loggers, self.host, self.db)
self.smbcon.create_smb_con()
# Execute command to verify permissions
self.cmd_execution('ECHO %USERDOMAIN%\%USERNAME%')
self.logger.success('Starting emulated shell (Host: {}) (User: {}) (Method: {}) (Fileless: {})'.format(self.host, self.output[0].strip(), self.exec_method, str(args.fileless)))
self.logger.warning("This is a limited shell and requires full paths for file interactions\n")
except Exception as e:
self.logger.fail("Error Starting Shell: {}".format(str(e)))
exit(1)
def help(self):
print("""
help - show this menu
exit - Close shell
Navigation:
pwd - Show PWD
dir - List PWD
cd - Change directory
File Interactions:
type [remote_file] - Show file contents (Full Path Required)
download [remote_file] [location] - Download remote file (Full Path Required)
upload [local_file] [location] - Upload local file (Full Path Required)
delete [remote_file] - Delete remote file (Full Path Required)
Commands:
[cmd] - Execute remote cmd
""")
def cd(self, cmd):
if cmd.startswith('cd'):
try:
cd_path = cmd.split(' ')[1]
cd_split = cd_path.replace("\\", "/").split("/") # Input formatting
cd_split = [x for x in cd_split if x] # Remove blanks
if cd_path == "/" or cd_path == "\\":
self.pwd_list = ['C:']
# Dir up
elif cd_split[0] == "..":
self.pwd_list.pop(-1)
cd_split.pop(cd_split.index(".."))
# new dir
elif cd_path.startswith(("/", "\\")):
self.pwd_list = ['C:']
self.pwd_list = self.pwd_list + cd_split
except:
self.logger.FAIL('Unable to change directories')
def dir(self, cmd):
if cmd == "dir":
return self.cmd_execution("dir {}".format(self.pwd))
else:
return self.cmd_execution(cmd)
def download(self, cmd):
try:
val = cmd.split(" ")
self.smbcon.downloadFile(val[1], val[2])
self.logger.success("Download Complete: {}".format(val[2]))
except Exception as e:
if str(e) == "list index out of range":
self.logger.fail('Not enough values to unpack, see -h for more')
else:
self.logger.fail("Download Failed: {}".format(str(e)))
def upload(self, cmd):
try:
val = cmd.split(" ")
self.smbcon.uploadFile(val[1], val[2])
self.logger.success("Upload Complete: {}".format(val[2]))
except Exception as e:
if str(e) == "list index out of range":
self.logger.fail('Not enough values to unpack, see -h for more')
else:
self.logger.fail("Upload Failed: {}".format(str(e)))
def delete(self, cmd):
try:
val = cmd.split(" ")
self.smbcon.deleteFile(val[1])
self.logger.success("Download Complete: {}".format(val[1]))
except Exception as e:
if str(e) == "list index out of range":
self.logger.fail('Not enough values to unpack, see -h for more')
else:
self.logger.fail("Deletion Failed: {}".format(str(e)))
def cmd_execution(self, cmd):
resp = code_execution(self.smbcon, self.args, self.host, self.loggers, self.config_obj, cmd, return_data=True)
self.output = resp.splitlines()
def cmdloop(self):
while True:
try:
# init prompt
self.output = []
self.pwd = '\\'.join(self.pwd_list)
cmd = input("{}> ".format(self.pwd))
cmd = cmd.lstrip().rstrip()
self.logger.debug("User cmd ::: \'{}\'".format(cmd))
# Handle CMD input
if cmd == "help":
self.help()
elif cmd == 'exit':
try:
self.smbcon.close()
except:
pass
return True
elif cmd.startswith('cd'):
self.cd(cmd)
elif cmd.startswith('dir'):
self.dir(cmd)
elif cmd.startswith('download'):
self.download(cmd)
elif cmd.startswith('upload'):
self.upload(cmd)
elif cmd.startswith('delete'):
self.delete(cmd)
elif cmd == 'pwd':
self.logger.output(self.pwd)
else:
self.output = self.cmd_execution(cmd)
# Show cmd Output
for result in self.output:
self.logger.output(result)
except KeyboardInterrupt:
try:
self.smbcon.close()
except:
pass
return True
except Exception as e:
self.logger.debug(str(e))
def main(args, config_obj, db_obj, loggers):
shell = None
smb_srv_obj = None
try:
# Init smb server
if args.fileless:
# Start smbserver
setattr(args, 'fileless_sharename', 'TEMP-{}$'.format(gen_random_string()))
smb_srv_obj = SMBServer(loggers['console'], args.fileless_sharename, verbose=args.debug)
smb_srv_obj.start()
# Enter CMD Loop
shell = AR3Shell(args, db_obj, config_obj, loggers)
shell.cmdloop()
# Close smbserver & exit
if args.fileless:
smb_srv_obj.cleanup_server()
smb_srv_obj.server = None
os._exit(0)
except KeyboardInterrupt:
# Cleanup and close
if shell:
shell.smbcon.close()
if smb_srv_obj:
smb_srv_obj.cleanup_server()
return | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/nodes/ImportHardNodes.py | """ Nodes representing more trusted imports. """
from nuitka.importing.Importing import locateModule, makeModuleUsageAttempt
from nuitka.utils.ModuleNames import ModuleName
from .ExpressionBases import ExpressionBase
class ExpressionImportHardBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
#
__slots__ = ("module_name", "finding", "module_kind", "module_filename")
def __init__(self, module_name, source_ref):
ExpressionBase.__init__(self, source_ref)
self.module_name = ModuleName(module_name)
self.finding = None
self.module_filename = None
(
_module_name,
self.module_filename,
self.module_kind,
self.finding,
) = locateModule(
module_name=self.module_name,
parent_package=None,
level=0,
)
# Expect to find them and to match the name of course.
assert self.finding != "not-found", self.module_name
assert _module_name == self.module_name, (self.module_name, _module_name)
def getModuleUsageAttempt(self):
return makeModuleUsageAttempt(
module_name=self.module_name,
filename=self.module_filename,
module_kind=self.module_kind,
finding=self.finding,
level=0,
source_ref=self.source_ref,
reason="import",
)
class ExpressionImportModuleNameHardBase(ExpressionImportHardBase):
"""Hard import names base class."""
# Base classes can be abstract, pylint: disable=I0021,abstract-method
__slots__ = ("import_name", "finding", "module_filename", "module_guaranteed")
def __init__(self, module_name, import_name, module_guaranteed, source_ref):
ExpressionImportHardBase.__init__(
self, module_name=module_name, source_ref=source_ref
)
self.import_name = import_name
self.module_guaranteed = module_guaranteed
def getDetails(self):
return {
"module_name": self.module_name,
"import_name": self.import_name,
"module_guaranteed": self.module_guaranteed,
}
# Derived ones have the same interface.
@staticmethod
def isExpressionImportModuleNameHard():
return True
@staticmethod
def hasVeryTrustedValue():
return True
def finalize(self):
del self.parent
def getModuleName(self):
return self.module_name
def getImportName(self):
return self.import_name
class ExpressionImportModuleNameHardMaybeExists(ExpressionImportModuleNameHardBase):
"""Hard coded import names, e.g. of "site.something"
These are created for attributes of hard imported modules that are not know if
they exist or not.
"""
kind = "EXPRESSION_IMPORT_MODULE_NAME_HARD_MAYBE_EXISTS"
def computeExpressionRaw(self, trace_collection):
trace_collection.onExceptionRaiseExit(AttributeError)
# Trace the module usage attempt.
trace_collection.onModuleUsageAttempt(self.getModuleUsageAttempt())
return self, None, None
@staticmethod
def mayHaveSideEffects():
return True
@staticmethod
def mayRaiseException(exception_type):
return True
class ExpressionImportModuleNameHardExists(ExpressionImportModuleNameHardBase):
"""Hard coded import names, e.g. of "sys.stdout"
These are directly created for some Python mechanics.
"""
kind = "EXPRESSION_IMPORT_MODULE_NAME_HARD_EXISTS"
def computeExpressionRaw(self, trace_collection):
if not self.module_guaranteed:
trace_collection.onExceptionRaiseExit(ImportError)
# Trace the module usage attempt.
trace_collection.onModuleUsageAttempt(self.getModuleUsageAttempt())
# As good as it gets.
return self, None, None
def mayHaveSideEffects(self):
return not self.module_guaranteed
def mayRaiseException(self, exception_type):
return not self.module_guaranteed
def computeExpressionCallViaVariable(
self, call_node, variable_ref_node, call_args, call_kw, trace_collection
):
# Hard imports being called, generally have no problem with forward propagation.
return self.computeExpressionCall(
call_node=call_node,
call_args=call_args,
call_kw=call_kw,
trace_collection=trace_collection,
)
class ExpressionImportModuleNameHardExistsSpecificBase(
ExpressionImportModuleNameHardExists
):
"""Base class for nodes that hard coded import names, e.g. of "importlib.import_module" name."""
@staticmethod
def getDetails():
return {} | PypiClean |
/KiwiCoder-0.3.tar.gz/KiwiCoder-0.3/kiwi/plugin/hardware/control/phidget.py | from Phidget22.Devices.DigitalOutput import DigitalOutput
from kiwi.common import PeripheryUsage
from kiwi.common.common import class_mock_enable
from kiwi.core import ControlPeriphery
from threading import Timer, Lock
class PhidgetRelayPort(object):
def __init__(self, vintport, channel, hold_duty=1.0, hit_delay=0.2):
self.vintport = vintport
self.channel = channel
self.hold_duty = hold_duty
self.hit_delay = hit_delay
self.rly = None
self.lock = None
self.state = None
self.t = None
def prepare(self):
self.rly = DigitalOutput()
self.rly.setHubPort(self.vintport)
self.rly.setChannel(self.channel)
self.rly.openWaitForAttachment(5000)
self.lock = Lock()
self.state = False # false -> closed, true->open, or duty>0%
self.hit_delay = self.hit_delay
self.hold_duty = self.hold_duty
self.t = Timer(0, None)
def open(self):
def _hold():
with self.lock:
# double check it hasn't been closed in the mean time!
if self.state == True:
self.rly.setDutyCycle(self.hold_duty)
with self.lock:
self.rly.setDutyCycle(1.0)
self.state = True
# set hold_duty after hit_delay seconds
self.t = Timer(self.hit_delay, _hold)
self.t.start()
def close(self):
with self.lock:
self.t.cancel()
self.rly.setDutyCycle(0.0)
self.rly.state = False
def start(self):
pass
def shutdown(self):
pass
@class_mock_enable
class PhidgetRelay(ControlPeriphery):
def __init__(self, vintport, nvalves=16, name="", usage=PeripheryUsage.BASE, mock=False, mock_obj=None):
super().__init__(name=name, usage=usage, mock=mock, mock_obj=mock_obj)
self.vintport = vintport
self.nvalves = nvalves
self.relays = []
self.gen_port_idx = 0
self.id2port = {}
def register(self, bio_id: int, port: int) -> None:
self.id2port[bio_id] = port
def prepare(self):
for ch in range(self.nvalves):
_rly = PhidgetRelayPort(self.vintport, ch, hold_duty=1.0)
self.relays.append(_rly)
for relay in self.relays:
relay.prepare()
def __mock_prepare__(self):
pass
def start(self):
pass
def shutdown(self):
pass
def set_signal(self, bio_id: int):
port_id = self.id2port[bio_id]
relay = self.relays[port_id]
relay.open()
def unset_signal(self, bio_id: int):
port_id = self.id2port[bio_id]
relay = self.relays[port_id]
relay.close()
def set_signal_with_value(self, bio_id: int, val: float):
pass
def __mock_set_signal__(self, bio_id: int):
pass
def __mock_unset_signal__(self, bio_id: int):
pass | PypiClean |
/IdracRedfishSupport-0.0.8.tar.gz/IdracRedfishSupport-0.0.8/SupportAssistCollectionLocalREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import urllib.parse
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description="Python script using Redfish API with OEM extension to perform Support Assist(SA) operations. These include export SA report locally, accept End User License Agreement(EULA) or register SA for iDRAC.")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--export', help='Export support assist collection locally. You must also use agrument --data for export SA collection.', action="store_true", required=False)
parser.add_argument('--accept', help='Accept support assist end user license agreement (EULA)', action="store_true", required=False)
parser.add_argument('--get', help='Get support assist end user license agreement (EULA)', action="store_true", required=False)
parser.add_argument('--register', help='Register SupportAssist for iDRAC. NOTE: You must also pass in city, company name, country, email, first name, last name, phone number, street, state and zip arguments to register. NOTE: ISM must be installed and running on the operating system before you register SA.', action="store_true", required=False)
parser.add_argument('--city', help='Pass in city name to register Support Assist', required=False)
parser.add_argument('--companyname', help='Pass in company name to register Support Assist', required=False)
parser.add_argument('--country', help='Pass in country to register Support Assist', required=False)
parser.add_argument('--first-email', help='Pass in primary (first) email address to register Support Assist', dest="first_email", required=False)
parser.add_argument('--firstname', help='Pass in firstname to register Support Assist', required=False)
parser.add_argument('--lastname', help='Pass in lastname to register Support Assist', required=False)
parser.add_argument('--phonenumber', help='Pass in phone number to register Support Assist', required=False)
parser.add_argument('--second-firstname', help='Pass in firstname of the secondary contact to register Support Assist', dest="second_firstname", required=False)
parser.add_argument('--second-lastname', help='Pass in lastname of the secondary contact to register Support Assist', dest="second_lastname", required=False)
parser.add_argument('--second-phonenumber', help='Pass in phone number of the secondary contact to register Support Assist', dest="second_phonenumber", required=False)
parser.add_argument('--second-email', help='Pass in email address of the secondary contact to register Support Assist', dest="second_email", required=False)
parser.add_argument('--street', help='Pass in street name to register Support Assist', required=False)
parser.add_argument('--state', help='Pass in state to register Support Assist', required=False)
parser.add_argument('--zip', help='Pass in zipcode to register Support Assist', required=False)
parser.add_argument('--data', help='Pass in a value for the type of data you want to collect for Support Assist collection. Supported values are: pass in 0 for \"DebugLogs\", pass in 1 for "HWData\", pass in 2 for \"OSAppData\", pass in 3 for \"TTYLogs(storage logs)\", pass in 4 for \"TelemetryReports\". Note: If you do not pass in this argument, default settings will collect HWData. Note: You can pass in one value or multiple values to collect. If you pass in multiple values, use comma separator for the values (Example: 0,3)', required=False)
parser.add_argument('--filter', help='Filter personal identification information (PII) for Support Assist collection. Supported values are: 0 for \"No\" and 1 for \"Yes\". NOTE: If you don\'t pass in this argument, no filtering is performed for the collection.', required=False)
parser.add_argument('--filename', help='Change default filename for SupportAssist collection file. Default filename: sacollect.zip. NOTE: If using this argument make sure to give the filename .zip extension', required=False, default='sacollect.zip')
args = vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -u root -p calvin --get, this example will get SA EULA current status.
\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -u root --accept, this example will first prompt to enter iDRAC user password, then accept SA EULA.
\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -x bd48034369f6e5f7424e9aea88f94123 --export --data 0,3, this example using X-auth token session will export SA logs locally. The SA log will only include debug and TTY logs.
\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -u root -p calvin --register --city Austin --state Texas --zip 78665 --companyname Dell --country US --firstname test --lastname tester --phonenumber "512-123-4567" --first-email \"[email protected]\" --second-email \"[email protected]\" --street \"1234 One Dell Way\", this example shows registering SupportAssist.
\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -u root -p calvin --export --data 1, this example will export SA collection locally which contains only hardware data. Once th job ID is marked completed, SA collection will be saved locally to default filename sacollect.zip.
\n- SupportAssistCollectionLocalREDFISH.py -ip 192.168.0.120 -u root -p calvin --accept --export --data 1 --filename R640_SA_collection.zip, this example will first attempt to accept EULA, then export SA collection and saved locally to a custom file named R640_SA_collection.zip""")
sys.exit(0)
def check_supported_idrac_version():
supported = ""
if args["x"]:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
if response.__dict__['reason'] == "Unauthorized":
logging.error("\n- FAIL, unauthorized to execute Redfish command. Check to make sure you are passing in correct iDRAC username/password and the IDRAC user has the correct privileges")
sys.exit(0)
data = response.json()
supported = "no"
for i in data['Actions'].keys():
if "SupportAssistCollection" in i:
supported = "yes"
if supported == "no":
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def support_assist_collection():
global job_id
global start_time
start_time = datetime.now()
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistCollection' % (idrac_ip)
method = "SupportAssistCollection"
payload = {"ShareType":"Local"}
if args["filter"]:
if args["filter"] == "0":
payload["Filter"] = "No"
elif args["filter"] == "1":
payload["Filter"] = "Yes"
if args["data"]:
data_selector_values=[]
if "," in args["data"]:
data_selector = [i for i in args["data"].split(",")]
if "0" in data_selector:
data_selector_values.append("DebugLogs")
if "1" in data_selector:
data_selector_values.append("HWData")
if "2" in data_selector:
data_selector_values.append("OSAppData")
if "3" in data_selector:
data_selector_values.append("TTYLogs")
if "4" in data_selector:
data_selector_values.append("TelemetryReports")
payload["DataSelectorArrayIn"] = data_selector_values
else:
if args["data"] == "0":
data_selector_values.append("DebugLogs")
if args["data"] == "1":
data_selector_values.append("HWData")
if args["data"] == "2":
data_selector_values.append("OSAppData")
if args["data"] == "3":
data_selector_values.append("TTYLogs")
if args["data"] == "4":
data_selector_values.append("TelemetryReports")
payload["DataSelectorArrayIn"] = data_selector_values
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code != 202:
data = response.json()
logging.error("\n- FAIL, status code %s returned, detailed error information:\n %s" % (response.status_code, data))
sys.exit(0)
try:
job_id = response.headers['Location'].split("/")[-1]
except:
logging.error("- FAIL, unable to find job ID in headers POST response, headers output is:\n%s" % response.headers)
sys.exit(0)
logging.info("\n- PASS, job ID %s successfuly created for %s method\n" % (job_id, method))
def support_assist_accept_EULA():
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistAcceptEULA' % (idrac_ip)
method = "SupportAssistAcceptEULA"
payload = {}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 202 or response.status_code == 200:
logging.debug("- PASS, POST command passed to accept EULA")
else:
data = response.json()
logging.error("\n- FAIL, status code %s returned, detailed error information:\n %s" % (response.status_code, data))
sys.exit(0)
logging.info("\n- PASS, %s method passed and End User License Agreement (EULA) has been accepted" % method)
return
def support_assist_get_EULA_status():
global accept_interface
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistGetEULAStatus' % (idrac_ip)
method = "SupportAssistGetEULAStatus"
payload = {}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if args["accept"]:
accept_interface = data["Interface"]
else:
logging.info("\n- Current Support Assist End User License Agreement Information -\n")
for i in data.items():
if not "ExtendedInfo" in i[0]:
print("%s: %s" % (i[0],i[1]))
def support_assist_register():
url = 'https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Attributes' % idrac_ip
payload = {"Attributes":{"OS-BMC.1.AdminState":"Enabled"}}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
statusCode = response.status_code
data = response.json()
if statusCode != 200:
logging.error("\n- FAIL, Command failed for action %s, status code is: %s\n" % (args["s"].upper(),statusCode))
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistRegister' % (idrac_ip)
method = "SupportAssistRegister"
payload = {"City": args["city"], "CompanyName": args["companyname"], "Country":args["country"], "PrimaryFirstName":args["firstname"],"PrimaryLastName":args["lastname"], "PrimaryPhoneNumber":args["phonenumber"], "State":args["state"], "Street1": args["street"],"Zip":args["zip"]}
if args["first_email"]:
payload["PrimaryEmail"] = args["first_email"]
if args["second_email"]:
payload["SecondaryEmail"] = args["second_email"]
if args["second_firstname"]:
payload["SecondaryFirstName"] = args["second_firstname"]
if args["second_lastname"]:
payload["SecondaryLastName"] = args["second_lastname"]
if args["second_phonenumber"]:
payload["SecondaryPhoneNumber"] = args["second_phonenumber"]
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 200 or response.status_code == 202:
logging.info("\n- PASS, SupportAssistRegister action passed, status code %s returned" % response.status_code)
else:
logging.error("\n- FAIL, SupportAssistRegister action failed, status code %s returned. Detailed error results:\n" % response.status_code)
data = response.__dict__
print(data["_content"])
sys.exit(0)
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistGetEULAStatus' % (idrac_ip)
method = "SupportAssistGetEULAStatus"
payload = {}
logging.info("- INFO, validating if Support Assist is registered for iDRAC")
time.sleep(15)
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if data["IsRegistered"] == "Registered":
logging.info("\n- PASS, Support Assist verified as registered")
else:
logging.error("\n- FAIL, Support Assist not registered, current status is: %s" % data["IsRegistered"])
sys.exit(0)
def loop_job_status():
loop_count = 0
while True:
if loop_count == 20:
logging.info("- INFO, retry count for GET request has been elapsed, script will exit. Manually check the job queue for final job status results")
sys.exit(0)
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
current_time = (datetime.now()-start_time)
data = response.json()
if response.status_code != 200:
logging.error("- FAIL, status code %s returned, GET command will retry" % statusCode)
time.sleep(10)
loop_count += 1
continue
try:
if response.headers['Location'] == "/redfish/v1/Dell/sacollect.zip" or response.headers['Location'] == "/redfish/v1/Oem/Dell/sacollect.zip":
logging.info("- PASS, job ID %s successfully marked completed" % job_id)
if args["x"]:
response = requests.get('https://%s%s' % (idrac_ip, response.headers['Location']), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s%s' % (idrac_ip, response.headers['Location']), verify=verify_cert,auth=(idrac_username, idrac_password))
if args["filename"]:
SA_export_filename = args["filename"]
else:
SA_export_filename = "sacollect.zip"
with open(SA_export_filename, "wb") as output:
output.write(response.content)
logging.info("\n- INFO, check your local directory for SupportAssist collection zip file \"%s\"" % SA_export_filename)
sys.exit(0)
else:
data = response.json()
logging.error("- ERROR, unable to locate SA collection URI in headers output, JSON response: \n%s" % data)
sys.exit(0)
except:
if str(current_time)[0:7] >= "0:30:00":
logging.error("\n- FAIL: Timeout of 30 minutes has been hit, script stopped\n")
sys.exit(0)
elif data['JobState'] == "CompletedWithErrors":
logging.info("\n- INFO, SA collection completed with errors, please check iDRAC Lifecycle Logs for more details")
sys.exit(0)
elif "Fail" in data['Message'] or "fail" in data['Message'] or data['JobState'] == "Failed" or "error" in data['Message'] or "Error" in data['Message']:
logging.error("- FAIL: job ID %s failed, failed message is: %s" % (job_id, data['Message']))
sys.exit(0)
elif data['JobState'] == "Completed" or "complete" in data['Message'].lower():
if "local path" in data['Message']:
logging.info("\n--- PASS, Final Detailed Job Status Results ---\n")
else:
logging.warning("- WARNING, unable to detect final job status message. Manually run GET on URI \"%s\" using browser to see if SA zip collection is available to download." % response.headers['Location'])
sys.exit(0)
for i in data.items():
pprint(i)
break
else:
logging.info("- INFO, Job status not marked completed, polling job status again, execution time: %s" % str(current_time)[0:7])
time.sleep(5)
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] and args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip = args["ip"]
idrac_username = args["u"]
if args["p"]:
idrac_password = args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- INFO, argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
if args["accept"]:
support_assist_get_EULA_status()
if accept_interface is None:
support_assist_accept_EULA()
else:
logging.info("\n- WARNING, SupportAssist EULA has already been accepted")
if not args["export"]:
sys.exit(0)
if args["export"] and args["data"]:
support_assist_collection()
loop_job_status()
sys.exit(0)
if args["get"]:
support_assist_get_EULA_status()
sys.exit(0)
if args["register"] and args["city"] and args["companyname"] and args["country"] and args["firstname"] and args["lastname"] and args["phonenumber"] and args["state"] and args["street"] and args["zip"]:
support_assist_register()
sys.exit(0)
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.") | PypiClean |
/Flask_Turnstile-0.1.1-py3-none-any.whl/flask_turnstile.py | __NAME__ = "Flask-Turnstile"
__version__ = "0.1.1"
__license__ = "MIT"
__author__ = "Kristian (originally ReCaptcha by Mardix)"
__copyright__ = "(c) 2023 Kristian (originally ReCaptcha by Mardix 2015)"
try:
from flask import request
try:
from jinja2 import Markup
except ImportError:
from markupsafe import Markup
import requests
except ImportError as ex:
print("Missing dependencies")
class BlueprintCompatibility(object):
site_key = None
secret_key = None
class DEFAULTS(object):
IS_ENABLED = True
class Turnstile(object):
VERIFY_URL = "https://challenges.cloudflare.com/turnstile/v0/siteverify"
site_key = None
secret_key = None
is_enabled = False
def __init__(self, app=None, site_key=None, secret_key=None, is_enabled=True, **kwargs):
if site_key:
BlueprintCompatibility.site_key = site_key
BlueprintCompatibility.secret_key = secret_key
self.is_enabled = is_enabled
elif app:
self.init_app(app=app)
def init_app(self, app=None):
self.__init__(site_key=app.config.get("TURNSTILE_SITE_KEY"),
secret_key=app.config.get("TURNSTILE_SECRET_KEY"),
is_enabled=app.config.get("TURNSTILE_ENABLED", DEFAULTS.IS_ENABLED))
@app.context_processor
def get_code():
return dict(turnstile=Markup(self.get_code()))
def get_code(self):
"""
Returns the new Turnstile captcha code
:return:
"""
return "" if not self.is_enabled else ("""
<script src="https://challenges.cloudflare.com/turnstile/v0/api.js" async defer></script>
<div class="cf-turnstile" data-sitekey="{SITE_KEY}"></div>
""".format(SITE_KEY=BlueprintCompatibility.site_key))
def verify(self, response=None, remote_ip=None):
if self.is_enabled:
data = {
"secret": BlueprintCompatibility.secret_key,
"response": response or request.form.get('cf-turnstile-response'),
"remoteip": remote_ip or request.environ.get('REMOTE_ADDR')
}
r = requests.post(self.VERIFY_URL, data=data)
return r.json()["success"] if r.status_code == 200 else False
return True | PypiClean |
/MDSuite-0.2.0-py3-none-any.whl/mdsuite/calculators/green_kubo_ionic_conductivity.py | from abc import ABC
from dataclasses import dataclass
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from bokeh.models import HoverTool, LinearAxis, Span
from bokeh.models.ranges import Range1d
from bokeh.plotting import figure
from scipy.integrate import cumtrapz
from tqdm import tqdm
from mdsuite.calculators.calculator import call
from mdsuite.calculators.trajectory_calculator import TrajectoryCalculator
from mdsuite.database.mdsuite_properties import mdsuite_properties
from mdsuite.utils.units import boltzmann_constant, elementary_charge
@dataclass
class Args:
"""
Data class for the saved properties.
"""
data_range: int
correlation_time: int
tau_values: np.s_
atom_selection: np.s_
integration_range: int
class GreenKuboIonicConductivity(TrajectoryCalculator, ABC):
"""
Class for the Green-Kubo ionic conductivity implementation
Attributes
----------
experiment : object
Experiment class to call from
x_label : str
X label of the tensor_values when plotted
y_label : str
Y label of the tensor_values when plotted
analysis_name : str
Name of the analysis
loaded_property : str
Property loaded from the database_path for the analysis
See Also
--------
mdsuite.calculators.calculator.Calculator class
Examples
--------
experiment.run_computation.GreenKuboIonicConductivity(data_range=500,
plot=True, correlation_time=10)
"""
def __init__(self, **kwargs):
"""
Attributes
----------
experiment : object
Experiment class to call from
"""
# update experiment class
super().__init__(**kwargs)
self.scale_function = {"linear": {"scale_factor": 5}}
self.loaded_property = mdsuite_properties.ionic_current
self.system_property = True
self.x_label = r"$$\text{Time} / s$$"
self.y_label = r"$$\text{JACF} / C^{2}\cdot m^{2}/s^{2}$$"
self.analysis_name = "Green_Kubo_Ionic_Conductivity"
self.result_keys = ["ionic_conductivity", "uncertainty"]
self.result_series_keys = ["time", "acf", "integral", "integral_uncertainty"]
self.prefactor = None
self._dtype = tf.float64
@call
def __call__(
self,
plot=True,
data_range=500,
correlation_time=1,
tau_values: np.s_ = np.s_[:],
integration_range: int = None,
):
"""
Parameters
----------
plot : bool
if true, plot the output.
data_range : int
Data range to use in the analysis.
correlation_time : int
Correlation time to use in the window sampling.
integration_range : int
Range over which integration should be performed.
"""
self.plot = plot
self.jacf: np.ndarray
self.sigma = []
if integration_range is None:
integration_range = data_range - 1
# set args that will affect the computation result
self.args = Args(
data_range=data_range,
correlation_time=correlation_time,
tau_values=tau_values,
atom_selection=np.s_[:],
integration_range=integration_range,
)
self.time = self._handle_tau_values()
self.jacf = np.zeros(self.data_resolution)
self.acfs = []
self.sigmas = []
def check_input(self):
"""
Check the user input to ensure no conflicts are present.
Returns
-------
"""
self._run_dependency_check()
def _calculate_prefactor(self):
"""
Compute the ionic conductivity prefactor.
Returns
-------
"""
# TODO improve docstring
# Calculate the prefactor
numerator = (elementary_charge**2) * (self.experiment.units.length**2)
denominator = (
3
* boltzmann_constant
* self.experiment.temperature
* self.experiment.volume
* self.experiment.units.volume
* self.experiment.units.time
)
self.prefactor = numerator / denominator
def _apply_averaging_factor(self):
"""
Apply the averaging factor to the msd array.
Returns
-------
"""
pass
def ensemble_operation(self, ensemble: tf.Tensor):
"""
Calculate and return the msd.
Parameters
----------
ensemble : tf.Tensor
Ensemble on which to operate.
Returns
-------
ACF of the tensor_values.
"""
ensemble = tf.gather(ensemble, self.args.tau_values, axis=1)
jacf = tfp.stats.auto_correlation(ensemble, normalize=False, axis=1, center=False)
jacf = tf.squeeze(tf.reduce_sum(jacf, axis=-1), axis=0)
self.acfs.append(jacf)
self.sigmas.append(cumtrapz(jacf, x=self.time))
def _post_operation_processes(self):
"""
call the post-op processes
Returns
-------
"""
sigma = np.mean(self.sigmas, axis=0)
sigma_SEM = np.std(self.sigmas, axis=0) / np.sqrt(len(self.sigmas))
acf = np.mean(self.acfs, axis=0)
ionic_conductivity = self.prefactor * sigma[self.args.integration_range - 1]
ionic_conductivity_SEM = (
self.prefactor * sigma_SEM[self.args.integration_range - 1]
)
data = {
self.result_keys[0]: [ionic_conductivity],
self.result_keys[1]: [ionic_conductivity_SEM],
self.result_series_keys[0]: self.time.tolist(),
self.result_series_keys[1]: acf.tolist(),
self.result_series_keys[2]: sigma.tolist(),
self.result_series_keys[3]: sigma_SEM.tolist(),
}
self.queue_data(data=data, subjects=["System"])
def plot_data(self, data):
"""Plot the data"""
for selected_species, val in data.items():
fig = figure(x_axis_label=self.x_label, y_axis_label=self.y_label)
integral = np.array(val[self.result_series_keys[2]])
integral_err = np.array(val[self.result_series_keys[3]])
time = np.array(val[self.result_series_keys[0]])
acf = np.array(val[self.result_series_keys[1]])
# Compute the span
span = Span(
location=np.array(val[self.result_series_keys[0]])[
self.args.integration_range - 1
],
dimension="height",
line_dash="dashed",
)
# Compute vacf line
fig.line(
time,
acf,
color="#003f5c",
legend_label=(
f"{selected_species}: {val[self.result_keys[0]][0]: 0.3E} +-"
f" {val[self.result_keys[1]][0]: 0.3E}"
),
)
fig.extra_y_ranges = {
"Cond_Range": Range1d(start=0.6 * min(integral), end=1.3 * max(integral))
}
fig.add_layout(
LinearAxis(
y_range_name="Cond_Range",
axis_label=r"$$\text{Ionic Conductivity} / Scm^{-1}$$",
),
"right",
)
fig.line(time[1:], integral, y_range_name="Cond_Range", color="#bc5090")
fig.varea(
time[1:],
integral - integral_err,
integral + integral_err,
alpha=0.3,
color="#ffa600",
y_range_name="Cond_Range",
)
fig.add_tools(HoverTool())
fig.add_layout(span)
self.plot_array.append(fig)
def run_calculator(self):
"""
Run analysis.
Returns
-------
"""
self.check_input()
# Compute the pre-factor early.
self._calculate_prefactor()
dict_ref = str.encode(
"/".join([self.loaded_property.name, self.loaded_property.name])
)
batch_ds = self.get_batch_dataset([self.loaded_property.name])
for batch in tqdm(
batch_ds,
ncols=70,
total=self.n_batches,
disable=self.memory_manager.minibatch,
):
ensemble_ds = self.get_ensemble_dataset(batch, self.loaded_property.name)
for ensemble in ensemble_ds:
self.ensemble_operation(ensemble[dict_ref])
# Scale, save, and plot the data.
self._apply_averaging_factor()
self._post_operation_processes() | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/jquery/ui/jquery.ui.draggable.js | (function( $, undefined ) {
$.widget("ui.draggable", $.ui.mouse, {
widgetEventPrefix: "drag",
options: {
addClasses: true,
appendTo: "parent",
axis: false,
connectToSortable: false,
containment: false,
cursor: "auto",
cursorAt: false,
grid: false,
handle: false,
helper: "original",
iframeFix: false,
opacity: false,
refreshPositions: false,
revert: false,
revertDuration: 500,
scope: "default",
scroll: true,
scrollSensitivity: 20,
scrollSpeed: 20,
snap: false,
snapMode: "both",
snapTolerance: 20,
stack: false,
zIndex: false
},
_create: function() {
if (this.options.helper == 'original' && !(/^(?:r|a|f)/).test(this.element.css("position")))
this.element[0].style.position = 'relative';
(this.options.addClasses && this.element.addClass("ui-draggable"));
(this.options.disabled && this.element.addClass("ui-draggable-disabled"));
this._mouseInit();
},
destroy: function() {
if(!this.element.data('draggable')) return;
this.element
.removeData("draggable")
.unbind(".draggable")
.removeClass("ui-draggable"
+ " ui-draggable-dragging"
+ " ui-draggable-disabled");
this._mouseDestroy();
return this;
},
_mouseCapture: function(event) {
var o = this.options;
// among others, prevent a drag on a resizable-handle
if (this.helper || o.disabled || $(event.target).is('.ui-resizable-handle'))
return false;
//Quit if we're not on a valid handle
this.handle = this._getHandle(event);
if (!this.handle)
return false;
return true;
},
_mouseStart: function(event) {
var o = this.options;
//Create and append the visible helper
this.helper = this._createHelper(event);
//Cache the helper size
this._cacheHelperProportions();
//If ddmanager is used for droppables, set the global draggable
if($.ui.ddmanager)
$.ui.ddmanager.current = this;
/*
* - Position generation -
* This block generates everything position related - it's the core of draggables.
*/
//Cache the margins of the original element
this._cacheMargins();
//Store the helper's css position
this.cssPosition = this.helper.css("position");
this.scrollParent = this.helper.scrollParent();
//The element's absolute position on the page minus margins
this.offset = this.positionAbs = this.element.offset();
this.offset = {
top: this.offset.top - this.margins.top,
left: this.offset.left - this.margins.left
};
$.extend(this.offset, {
click: { //Where the click happened, relative to the element
left: event.pageX - this.offset.left,
top: event.pageY - this.offset.top
},
parent: this._getParentOffset(),
relative: this._getRelativeOffset() //This is a relative to absolute position minus the actual position calculation - only used for relative positioned helper
});
//Generate the original position
this.originalPosition = this.position = this._generatePosition(event);
this.originalPageX = event.pageX;
this.originalPageY = event.pageY;
//Adjust the mouse offset relative to the helper if 'cursorAt' is supplied
(o.cursorAt && this._adjustOffsetFromHelper(o.cursorAt));
//Set a containment if given in the options
if(o.containment)
this._setContainment();
//Trigger event + callbacks
if(this._trigger("start", event) === false) {
this._clear();
return false;
}
//Recache the helper size
this._cacheHelperProportions();
//Prepare the droppable offsets
if ($.ui.ddmanager && !o.dropBehaviour)
$.ui.ddmanager.prepareOffsets(this, event);
this.helper.addClass("ui-draggable-dragging");
this._mouseDrag(event, true); //Execute the drag once - this causes the helper not to be visible before getting its correct position
return true;
},
_mouseDrag: function(event, noPropagation) {
//Compute the helpers position
this.position = this._generatePosition(event);
this.positionAbs = this._convertPositionTo("absolute");
//Call plugins and callbacks and use the resulting position if something is returned
if (!noPropagation) {
var ui = this._uiHash();
if(this._trigger('drag', event, ui) === false) {
this._mouseUp({});
return false;
}
this.position = ui.position;
}
if(!this.options.axis || this.options.axis != "y") this.helper[0].style.left = this.position.left+'px';
if(!this.options.axis || this.options.axis != "x") this.helper[0].style.top = this.position.top+'px';
if($.ui.ddmanager) $.ui.ddmanager.drag(this, event);
return false;
},
_mouseStop: function(event) {
//If we are using droppables, inform the manager about the drop
var dropped = false;
if ($.ui.ddmanager && !this.options.dropBehaviour)
dropped = $.ui.ddmanager.drop(this, event);
//if a drop comes from outside (a sortable)
if(this.dropped) {
dropped = this.dropped;
this.dropped = false;
}
//if the original element is removed, don't bother to continue if helper is set to "original"
if((!this.element[0] || !this.element[0].parentNode) && this.options.helper == "original")
return false;
if((this.options.revert == "invalid" && !dropped) || (this.options.revert == "valid" && dropped) || this.options.revert === true || ($.isFunction(this.options.revert) && this.options.revert.call(this.element, dropped))) {
var self = this;
$(this.helper).animate(this.originalPosition, parseInt(this.options.revertDuration, 10), function() {
if(self._trigger("stop", event) !== false) {
self._clear();
}
});
} else {
if(this._trigger("stop", event) !== false) {
this._clear();
}
}
return false;
},
cancel: function() {
if(this.helper.is(".ui-draggable-dragging")) {
this._mouseUp({});
} else {
this._clear();
}
return this;
},
_getHandle: function(event) {
var handle = !this.options.handle || !$(this.options.handle, this.element).length ? true : false;
$(this.options.handle, this.element)
.find("*")
.andSelf()
.each(function() {
if(this == event.target) handle = true;
});
return handle;
},
_createHelper: function(event) {
var o = this.options;
var helper = $.isFunction(o.helper) ? $(o.helper.apply(this.element[0], [event])) : (o.helper == 'clone' ? this.element.clone() : this.element);
if(!helper.parents('body').length)
helper.appendTo((o.appendTo == 'parent' ? this.element[0].parentNode : o.appendTo));
if(helper[0] != this.element[0] && !(/(fixed|absolute)/).test(helper.css("position")))
helper.css("position", "absolute");
return helper;
},
_adjustOffsetFromHelper: function(obj) {
if (typeof obj == 'string') {
obj = obj.split(' ');
}
if ($.isArray(obj)) {
obj = {left: +obj[0], top: +obj[1] || 0};
}
if ('left' in obj) {
this.offset.click.left = obj.left + this.margins.left;
}
if ('right' in obj) {
this.offset.click.left = this.helperProportions.width - obj.right + this.margins.left;
}
if ('top' in obj) {
this.offset.click.top = obj.top + this.margins.top;
}
if ('bottom' in obj) {
this.offset.click.top = this.helperProportions.height - obj.bottom + this.margins.top;
}
},
_getParentOffset: function() {
//Get the offsetParent and cache its position
this.offsetParent = this.helper.offsetParent();
var po = this.offsetParent.offset();
// This is a special case where we need to modify a offset calculated on start, since the following happened:
// 1. The position of the helper is absolute, so it's position is calculated based on the next positioned parent
// 2. The actual offset parent is a child of the scroll parent, and the scroll parent isn't the document, which means that
// the scroll is included in the initial calculation of the offset of the parent, and never recalculated upon drag
if(this.cssPosition == 'absolute' && this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) {
po.left += this.scrollParent.scrollLeft();
po.top += this.scrollParent.scrollTop();
}
if((this.offsetParent[0] == document.body) //This needs to be actually done for all browsers, since pageX/pageY includes this information
|| (this.offsetParent[0].tagName && this.offsetParent[0].tagName.toLowerCase() == 'html' && $.browser.msie)) //Ugly IE fix
po = { top: 0, left: 0 };
return {
top: po.top + (parseInt(this.offsetParent.css("borderTopWidth"),10) || 0),
left: po.left + (parseInt(this.offsetParent.css("borderLeftWidth"),10) || 0)
};
},
_getRelativeOffset: function() {
if(this.cssPosition == "relative") {
var p = this.element.position();
return {
top: p.top - (parseInt(this.helper.css("top"),10) || 0) + this.scrollParent.scrollTop(),
left: p.left - (parseInt(this.helper.css("left"),10) || 0) + this.scrollParent.scrollLeft()
};
} else {
return { top: 0, left: 0 };
}
},
_cacheMargins: function() {
this.margins = {
left: (parseInt(this.element.css("marginLeft"),10) || 0),
top: (parseInt(this.element.css("marginTop"),10) || 0)
};
},
_cacheHelperProportions: function() {
this.helperProportions = {
width: this.helper.outerWidth(),
height: this.helper.outerHeight()
};
},
_setContainment: function() {
var o = this.options;
if(o.containment == 'parent') o.containment = this.helper[0].parentNode;
if(o.containment == 'document' || o.containment == 'window') this.containment = [
(o.containment == 'document' ? 0 : $(window).scrollLeft()) - this.offset.relative.left - this.offset.parent.left,
(o.containment == 'document' ? 0 : $(window).scrollTop()) - this.offset.relative.top - this.offset.parent.top,
(o.containment == 'document' ? 0 : $(window).scrollLeft()) + $(o.containment == 'document' ? document : window).width() - this.helperProportions.width - this.margins.left,
(o.containment == 'document' ? 0 : $(window).scrollTop()) + ($(o.containment == 'document' ? document : window).height() || document.body.parentNode.scrollHeight) - this.helperProportions.height - this.margins.top
];
if(!(/^(document|window|parent)$/).test(o.containment) && o.containment.constructor != Array) {
var ce = $(o.containment)[0]; if(!ce) return;
var co = $(o.containment).offset();
var over = ($(ce).css("overflow") != 'hidden');
this.containment = [
co.left + (parseInt($(ce).css("borderLeftWidth"),10) || 0) + (parseInt($(ce).css("paddingLeft"),10) || 0) - this.margins.left,
co.top + (parseInt($(ce).css("borderTopWidth"),10) || 0) + (parseInt($(ce).css("paddingTop"),10) || 0) - this.margins.top,
co.left+(over ? Math.max(ce.scrollWidth,ce.offsetWidth) : ce.offsetWidth) - (parseInt($(ce).css("borderLeftWidth"),10) || 0) - (parseInt($(ce).css("paddingRight"),10) || 0) - this.helperProportions.width - this.margins.left,
co.top+(over ? Math.max(ce.scrollHeight,ce.offsetHeight) : ce.offsetHeight) - (parseInt($(ce).css("borderTopWidth"),10) || 0) - (parseInt($(ce).css("paddingBottom"),10) || 0) - this.helperProportions.height - this.margins.top
];
} else if(o.containment.constructor == Array) {
this.containment = o.containment;
}
},
_convertPositionTo: function(d, pos) {
if(!pos) pos = this.position;
var mod = d == "absolute" ? 1 : -1;
var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
return {
top: (
pos.top // The absolute mouse position
+ this.offset.relative.top * mod // Only for relative positioned nodes: Relative offset from element to offset parent
+ this.offset.parent.top * mod // The offsetParent's offset without borders (offset + border)
- ($.browser.safari && $.browser.version < 526 && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ) * mod)
),
left: (
pos.left // The absolute mouse position
+ this.offset.relative.left * mod // Only for relative positioned nodes: Relative offset from element to offset parent
+ this.offset.parent.left * mod // The offsetParent's offset without borders (offset + border)
- ($.browser.safari && $.browser.version < 526 && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ) * mod)
)
};
},
_generatePosition: function(event) {
var o = this.options, scroll = this.cssPosition == 'absolute' && !(this.scrollParent[0] != document && $.ui.contains(this.scrollParent[0], this.offsetParent[0])) ? this.offsetParent : this.scrollParent, scrollIsRootNode = (/(html|body)/i).test(scroll[0].tagName);
var pageX = event.pageX;
var pageY = event.pageY;
/*
* - Position constraining -
* Constrain the position to a mix of grid, containment.
*/
if(this.originalPosition) { //If we are not dragging yet, we won't check for options
if(this.containment) {
if(event.pageX - this.offset.click.left < this.containment[0]) pageX = this.containment[0] + this.offset.click.left;
if(event.pageY - this.offset.click.top < this.containment[1]) pageY = this.containment[1] + this.offset.click.top;
if(event.pageX - this.offset.click.left > this.containment[2]) pageX = this.containment[2] + this.offset.click.left;
if(event.pageY - this.offset.click.top > this.containment[3]) pageY = this.containment[3] + this.offset.click.top;
}
if(o.grid) {
var top = this.originalPageY + Math.round((pageY - this.originalPageY) / o.grid[1]) * o.grid[1];
pageY = this.containment ? (!(top - this.offset.click.top < this.containment[1] || top - this.offset.click.top > this.containment[3]) ? top : (!(top - this.offset.click.top < this.containment[1]) ? top - o.grid[1] : top + o.grid[1])) : top;
var left = this.originalPageX + Math.round((pageX - this.originalPageX) / o.grid[0]) * o.grid[0];
pageX = this.containment ? (!(left - this.offset.click.left < this.containment[0] || left - this.offset.click.left > this.containment[2]) ? left : (!(left - this.offset.click.left < this.containment[0]) ? left - o.grid[0] : left + o.grid[0])) : left;
}
}
return {
top: (
pageY // The absolute mouse position
- this.offset.click.top // Click offset (relative to the element)
- this.offset.relative.top // Only for relative positioned nodes: Relative offset from element to offset parent
- this.offset.parent.top // The offsetParent's offset without borders (offset + border)
+ ($.browser.safari && $.browser.version < 526 && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollTop() : ( scrollIsRootNode ? 0 : scroll.scrollTop() ) ))
),
left: (
pageX // The absolute mouse position
- this.offset.click.left // Click offset (relative to the element)
- this.offset.relative.left // Only for relative positioned nodes: Relative offset from element to offset parent
- this.offset.parent.left // The offsetParent's offset without borders (offset + border)
+ ($.browser.safari && $.browser.version < 526 && this.cssPosition == 'fixed' ? 0 : ( this.cssPosition == 'fixed' ? -this.scrollParent.scrollLeft() : scrollIsRootNode ? 0 : scroll.scrollLeft() ))
)
};
},
_clear: function() {
this.helper.removeClass("ui-draggable-dragging");
if(this.helper[0] != this.element[0] && !this.cancelHelperRemoval) this.helper.remove();
//if($.ui.ddmanager) $.ui.ddmanager.current = null;
this.helper = null;
this.cancelHelperRemoval = false;
},
// From now on bulk stuff - mainly helpers
_trigger: function(type, event, ui) {
ui = ui || this._uiHash();
$.ui.plugin.call(this, type, [event, ui]);
if(type == "drag") this.positionAbs = this._convertPositionTo("absolute"); //The absolute position has to be recalculated after plugins
return $.Widget.prototype._trigger.call(this, type, event, ui);
},
plugins: {},
_uiHash: function(event) {
return {
helper: this.helper,
position: this.position,
originalPosition: this.originalPosition,
offset: this.positionAbs
};
}
});
$.extend($.ui.draggable, {
version: "1.8.10"
});
$.ui.plugin.add("draggable", "connectToSortable", {
start: function(event, ui) {
var inst = $(this).data("draggable"), o = inst.options,
uiSortable = $.extend({}, ui, { item: inst.element });
inst.sortables = [];
$(o.connectToSortable).each(function() {
var sortable = $.data(this, 'sortable');
if (sortable && !sortable.options.disabled) {
inst.sortables.push({
instance: sortable,
shouldRevert: sortable.options.revert
});
sortable._refreshItems(); //Do a one-time refresh at start to refresh the containerCache
sortable._trigger("activate", event, uiSortable);
}
});
},
stop: function(event, ui) {
//If we are still over the sortable, we fake the stop event of the sortable, but also remove helper
var inst = $(this).data("draggable"),
uiSortable = $.extend({}, ui, { item: inst.element });
$.each(inst.sortables, function() {
if(this.instance.isOver) {
this.instance.isOver = 0;
inst.cancelHelperRemoval = true; //Don't remove the helper in the draggable instance
this.instance.cancelHelperRemoval = false; //Remove it in the sortable instance (so sortable plugins like revert still work)
//The sortable revert is supported, and we have to set a temporary dropped variable on the draggable to support revert: 'valid/invalid'
if(this.shouldRevert) this.instance.options.revert = true;
//Trigger the stop of the sortable
this.instance._mouseStop(event);
this.instance.options.helper = this.instance.options._helper;
//If the helper has been the original item, restore properties in the sortable
if(inst.options.helper == 'original')
this.instance.currentItem.css({ top: 'auto', left: 'auto' });
} else {
this.instance.cancelHelperRemoval = false; //Remove the helper in the sortable instance
this.instance._trigger("deactivate", event, uiSortable);
}
});
},
drag: function(event, ui) {
var inst = $(this).data("draggable"), self = this;
var checkPos = function(o) {
var dyClick = this.offset.click.top, dxClick = this.offset.click.left;
var helperTop = this.positionAbs.top, helperLeft = this.positionAbs.left;
var itemHeight = o.height, itemWidth = o.width;
var itemTop = o.top, itemLeft = o.left;
return $.ui.isOver(helperTop + dyClick, helperLeft + dxClick, itemTop, itemLeft, itemHeight, itemWidth);
};
$.each(inst.sortables, function(i) {
//Copy over some variables to allow calling the sortable's native _intersectsWith
this.instance.positionAbs = inst.positionAbs;
this.instance.helperProportions = inst.helperProportions;
this.instance.offset.click = inst.offset.click;
if(this.instance._intersectsWith(this.instance.containerCache)) {
//If it intersects, we use a little isOver variable and set it once, so our move-in stuff gets fired only once
if(!this.instance.isOver) {
this.instance.isOver = 1;
//Now we fake the start of dragging for the sortable instance,
//by cloning the list group item, appending it to the sortable and using it as inst.currentItem
//We can then fire the start event of the sortable with our passed browser event, and our own helper (so it doesn't create a new one)
this.instance.currentItem = $(self).clone().appendTo(this.instance.element).data("sortable-item", true);
this.instance.options._helper = this.instance.options.helper; //Store helper option to later restore it
this.instance.options.helper = function() { return ui.helper[0]; };
event.target = this.instance.currentItem[0];
this.instance._mouseCapture(event, true);
this.instance._mouseStart(event, true, true);
//Because the browser event is way off the new appended portlet, we modify a couple of variables to reflect the changes
this.instance.offset.click.top = inst.offset.click.top;
this.instance.offset.click.left = inst.offset.click.left;
this.instance.offset.parent.left -= inst.offset.parent.left - this.instance.offset.parent.left;
this.instance.offset.parent.top -= inst.offset.parent.top - this.instance.offset.parent.top;
inst._trigger("toSortable", event);
inst.dropped = this.instance.element; //draggable revert needs that
//hack so receive/update callbacks work (mostly)
inst.currentItem = inst.element;
this.instance.fromOutside = inst;
}
//Provided we did all the previous steps, we can fire the drag event of the sortable on every draggable drag, when it intersects with the sortable
if(this.instance.currentItem) this.instance._mouseDrag(event);
} else {
//If it doesn't intersect with the sortable, and it intersected before,
//we fake the drag stop of the sortable, but make sure it doesn't remove the helper by using cancelHelperRemoval
if(this.instance.isOver) {
this.instance.isOver = 0;
this.instance.cancelHelperRemoval = true;
//Prevent reverting on this forced stop
this.instance.options.revert = false;
// The out event needs to be triggered independently
this.instance._trigger('out', event, this.instance._uiHash(this.instance));
this.instance._mouseStop(event, true);
this.instance.options.helper = this.instance.options._helper;
//Now we remove our currentItem, the list group clone again, and the placeholder, and animate the helper back to it's original size
this.instance.currentItem.remove();
if(this.instance.placeholder) this.instance.placeholder.remove();
inst._trigger("fromSortable", event);
inst.dropped = false; //draggable revert needs that
}
};
});
}
});
$.ui.plugin.add("draggable", "cursor", {
start: function(event, ui) {
var t = $('body'), o = $(this).data('draggable').options;
if (t.css("cursor")) o._cursor = t.css("cursor");
t.css("cursor", o.cursor);
},
stop: function(event, ui) {
var o = $(this).data('draggable').options;
if (o._cursor) $('body').css("cursor", o._cursor);
}
});
$.ui.plugin.add("draggable", "iframeFix", {
start: function(event, ui) {
var o = $(this).data('draggable').options;
$(o.iframeFix === true ? "iframe" : o.iframeFix).each(function() {
$('<div class="ui-draggable-iframeFix" style="background: #fff;"></div>')
.css({
width: this.offsetWidth+"px", height: this.offsetHeight+"px",
position: "absolute", opacity: "0.001", zIndex: 1000
})
.css($(this).offset())
.appendTo("body");
});
},
stop: function(event, ui) {
$("div.ui-draggable-iframeFix").each(function() { this.parentNode.removeChild(this); }); //Remove frame helpers
}
});
$.ui.plugin.add("draggable", "opacity", {
start: function(event, ui) {
var t = $(ui.helper), o = $(this).data('draggable').options;
if(t.css("opacity")) o._opacity = t.css("opacity");
t.css('opacity', o.opacity);
},
stop: function(event, ui) {
var o = $(this).data('draggable').options;
if(o._opacity) $(ui.helper).css('opacity', o._opacity);
}
});
$.ui.plugin.add("draggable", "scroll", {
start: function(event, ui) {
var i = $(this).data("draggable");
if(i.scrollParent[0] != document && i.scrollParent[0].tagName != 'HTML') i.overflowOffset = i.scrollParent.offset();
},
drag: function(event, ui) {
var i = $(this).data("draggable"), o = i.options, scrolled = false;
if(i.scrollParent[0] != document && i.scrollParent[0].tagName != 'HTML') {
if(!o.axis || o.axis != 'x') {
if((i.overflowOffset.top + i.scrollParent[0].offsetHeight) - event.pageY < o.scrollSensitivity)
i.scrollParent[0].scrollTop = scrolled = i.scrollParent[0].scrollTop + o.scrollSpeed;
else if(event.pageY - i.overflowOffset.top < o.scrollSensitivity)
i.scrollParent[0].scrollTop = scrolled = i.scrollParent[0].scrollTop - o.scrollSpeed;
}
if(!o.axis || o.axis != 'y') {
if((i.overflowOffset.left + i.scrollParent[0].offsetWidth) - event.pageX < o.scrollSensitivity)
i.scrollParent[0].scrollLeft = scrolled = i.scrollParent[0].scrollLeft + o.scrollSpeed;
else if(event.pageX - i.overflowOffset.left < o.scrollSensitivity)
i.scrollParent[0].scrollLeft = scrolled = i.scrollParent[0].scrollLeft - o.scrollSpeed;
}
} else {
if(!o.axis || o.axis != 'x') {
if(event.pageY - $(document).scrollTop() < o.scrollSensitivity)
scrolled = $(document).scrollTop($(document).scrollTop() - o.scrollSpeed);
else if($(window).height() - (event.pageY - $(document).scrollTop()) < o.scrollSensitivity)
scrolled = $(document).scrollTop($(document).scrollTop() + o.scrollSpeed);
}
if(!o.axis || o.axis != 'y') {
if(event.pageX - $(document).scrollLeft() < o.scrollSensitivity)
scrolled = $(document).scrollLeft($(document).scrollLeft() - o.scrollSpeed);
else if($(window).width() - (event.pageX - $(document).scrollLeft()) < o.scrollSensitivity)
scrolled = $(document).scrollLeft($(document).scrollLeft() + o.scrollSpeed);
}
}
if(scrolled !== false && $.ui.ddmanager && !o.dropBehaviour)
$.ui.ddmanager.prepareOffsets(i, event);
}
});
$.ui.plugin.add("draggable", "snap", {
start: function(event, ui) {
var i = $(this).data("draggable"), o = i.options;
i.snapElements = [];
$(o.snap.constructor != String ? ( o.snap.items || ':data(draggable)' ) : o.snap).each(function() {
var $t = $(this); var $o = $t.offset();
if(this != i.element[0]) i.snapElements.push({
item: this,
width: $t.outerWidth(), height: $t.outerHeight(),
top: $o.top, left: $o.left
});
});
},
drag: function(event, ui) {
var inst = $(this).data("draggable"), o = inst.options;
var d = o.snapTolerance;
var x1 = ui.offset.left, x2 = x1 + inst.helperProportions.width,
y1 = ui.offset.top, y2 = y1 + inst.helperProportions.height;
for (var i = inst.snapElements.length - 1; i >= 0; i--){
var l = inst.snapElements[i].left, r = l + inst.snapElements[i].width,
t = inst.snapElements[i].top, b = t + inst.snapElements[i].height;
//Yes, I know, this is insane ;)
if(!((l-d < x1 && x1 < r+d && t-d < y1 && y1 < b+d) || (l-d < x1 && x1 < r+d && t-d < y2 && y2 < b+d) || (l-d < x2 && x2 < r+d && t-d < y1 && y1 < b+d) || (l-d < x2 && x2 < r+d && t-d < y2 && y2 < b+d))) {
if(inst.snapElements[i].snapping) (inst.options.snap.release && inst.options.snap.release.call(inst.element, event, $.extend(inst._uiHash(), { snapItem: inst.snapElements[i].item })));
inst.snapElements[i].snapping = false;
continue;
}
if(o.snapMode != 'inner') {
var ts = Math.abs(t - y2) <= d;
var bs = Math.abs(b - y1) <= d;
var ls = Math.abs(l - x2) <= d;
var rs = Math.abs(r - x1) <= d;
if(ts) ui.position.top = inst._convertPositionTo("relative", { top: t - inst.helperProportions.height, left: 0 }).top - inst.margins.top;
if(bs) ui.position.top = inst._convertPositionTo("relative", { top: b, left: 0 }).top - inst.margins.top;
if(ls) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: l - inst.helperProportions.width }).left - inst.margins.left;
if(rs) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: r }).left - inst.margins.left;
}
var first = (ts || bs || ls || rs);
if(o.snapMode != 'outer') {
var ts = Math.abs(t - y1) <= d;
var bs = Math.abs(b - y2) <= d;
var ls = Math.abs(l - x1) <= d;
var rs = Math.abs(r - x2) <= d;
if(ts) ui.position.top = inst._convertPositionTo("relative", { top: t, left: 0 }).top - inst.margins.top;
if(bs) ui.position.top = inst._convertPositionTo("relative", { top: b - inst.helperProportions.height, left: 0 }).top - inst.margins.top;
if(ls) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: l }).left - inst.margins.left;
if(rs) ui.position.left = inst._convertPositionTo("relative", { top: 0, left: r - inst.helperProportions.width }).left - inst.margins.left;
}
if(!inst.snapElements[i].snapping && (ts || bs || ls || rs || first))
(inst.options.snap.snap && inst.options.snap.snap.call(inst.element, event, $.extend(inst._uiHash(), { snapItem: inst.snapElements[i].item })));
inst.snapElements[i].snapping = (ts || bs || ls || rs || first);
};
}
});
$.ui.plugin.add("draggable", "stack", {
start: function(event, ui) {
var o = $(this).data("draggable").options;
var group = $.makeArray($(o.stack)).sort(function(a,b) {
return (parseInt($(a).css("zIndex"),10) || 0) - (parseInt($(b).css("zIndex"),10) || 0);
});
if (!group.length) { return; }
var min = parseInt(group[0].style.zIndex) || 0;
$(group).each(function(i) {
this.style.zIndex = min + i;
});
this[0].style.zIndex = min + group.length;
}
});
$.ui.plugin.add("draggable", "zIndex", {
start: function(event, ui) {
var t = $(ui.helper), o = $(this).data("draggable").options;
if(t.css("zIndex")) o._zIndex = t.css("zIndex");
t.css('zIndex', o.zIndex);
},
stop: function(event, ui) {
var o = $(this).data("draggable").options;
if(o._zIndex) $(ui.helper).css('zIndex', o._zIndex);
}
});
})(jQuery); | PypiClean |
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/modules/decoders/t5_decoder.py |
import sys
from typing import Optional, Union
import torch
from torch import nn
from BIT_DL.pytorch.modules.encoders.multihead_attention import Cache
from BIT_DL.pytorch.modules.pretrained.t5_utils import \
T5LayerNorm, MultiheadRPRAttention
from BIT_DL.pytorch.modules.decoders.transformer_decoders import \
TokenEmbedder, TokenPosEmbedder
from BIT_DL.pytorch.modules.encoders.transformer_encoder import \
default_transformer_poswise_net_hparams
from BIT_DL.pytorch.modules.decoders.transformer_decoders \
import TransformerDecoder
from BIT_DL.pytorch.modules.networks.networks import FeedForwardNetwork
class T5Decoder(TransformerDecoder):
r"""T5 decoder that applies multi-head self-attention with relative
position representation for sequence decoding.
It is a stack of
:class:`~BIT_DL.pytorch.modules.pretrained.t5_utilsMultiheadRPRAttention`,
:class:`~BIT_DL.pytorch.modules.FeedForwardNetwork`, and residual connections.
Args:
token_embedder: An instance of :torch_nn:`Module`, or a function taking
a :tensor:`LongTensor` ``tokens`` as argument. This is the embedder
called in :meth:`embed_tokens` to convert input tokens to
embeddings.
token_pos_embedder: An instance of :torch_nn:`Module`, or a function
taking two :tensor:`LongTensor`\ s ``tokens`` and ``positions`` as
argument. This is the embedder called in :meth:`embed_tokens` to
convert input tokens with positions to embeddings.
.. note::
Only one among :attr:`token_embedder` and
:attr:`token_pos_embedder` should be specified. If neither is
specified, you must subclass :class:`TransformerDecoder` and
override :meth:`embed_tokens`.
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
:attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~BIT_DL.pytorch.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
def __init__(self,
token_embedder: Optional[TokenEmbedder] = None,
token_pos_embedder: Optional[TokenPosEmbedder] = None,
vocab_size: Optional[int] = None,
output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,
hparams=None):
super().__init__(
token_embedder, token_pos_embedder,
vocab_size=vocab_size, output_layer=output_layer, hparams=hparams)
self.final_layer_norm = T5LayerNorm(self._input_size, # type: ignore
eps=self._hparams.eps)
def initialize_blocks(self):
r"""Helper function to initialize blocks.
"""
for i in range(self._hparams.num_blocks):
attn_module = MultiheadRPRAttention(
self._input_size,
self._hparams.multihead_attention,
stores_relative_position=bool(i == 0))
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadRPRAttention should be equal "
"to the dim of T5Decoder")
self.self_attns.append(attn_module)
self.self_attn_layer_norm.append(
T5LayerNorm(self._input_size, eps=self._hparams.eps))
attn_module = MultiheadRPRAttention(
self._input_size, self._hparams.multihead_attention,
stores_relative_position=bool(i == 0)
)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadRPRAttention should be equal "
"to the dim of T5Decoder")
self.enc_dec_attns.append(attn_module)
self.end_dec_attn_layer_norm.append(
T5LayerNorm(self._input_size, eps=self._hparams.eps))
poswise_network = FeedForwardNetwork(
hparams=self._hparams.poswise_feedforward)
if (poswise_network.hparams.layers[-1]['kwargs']['out_features']
!= self._hparams.dim):
raise ValueError("The output dimension of "
"FeedForwardNetwork should be equal "
"to the dim of T5Decoder")
self.poswise_networks.append(poswise_network)
self.poswise_layer_norm.append(
T5LayerNorm(self._input_size, eps=self._hparams.eps))
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in T5Encoder
"num_blocks": 6,
"dim": 512,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_rpr_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'use_bias': False,
'is_decoder': True,
'relative_attention_num_buckets': 32
},
"initializer": None,
"eps": 1e-6,
"name": "t5_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"embedding_dropout"`: float
Dropout rate of the input embedding.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See
:func:`~BIT_DL.pytorch.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :class:`~BIT_DL.pytorch.modules.MultiheadRPRAttention`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~BIT_DL.pytorch.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"eps"`: float
Epsilon values for layer norm layers.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_rpr_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
'is_decoder': True,
'relative_attention_num_buckets': 32
},
'eps': 1e-6,
'initializer': None,
'name': "t5_decoder",
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None
) -> torch.Tensor:
r"""Forward through the stacked multi-head rpr attentions.
"""
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = self.embed_dropout(inputs)
position_bias = None
encdec_position_bias = None
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output, position_bias = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache,
position_bias=position_bias
)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output, encdec_position_bias = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias,
position_bias=encdec_position_bias
)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x) | PypiClean |
/JoUtil-1.3.3-py3-none-any.whl/JoTools/txkj/fackImg.py |
# 伪造的数据 左上角 有 5*5 有 F 字母的标注,
# 位置不能重复,每一个位置有缓冲区域
# 支持透明和不透明 jpg 和 png 两个格式的贴图
import os
import cv2
import copy
import shutil
import numpy as np
import PIL.Image as Image
from JoTools.txkjRes.deteRes import DeteRes
class FakeImg(object):
def __init__(self, img_path, icon_dir):
# 原图路径
self.img_path = img_path
# 贴图路径
self.icon_path = icon_dir
# 原图矩阵
self.img_ndarry = cv2.imdecode(np.fromfile(self.img_path, dtype=np.uint8), 1)
# 贴图文件夹路径
self.icon_dir = icon_dir
@staticmethod
def mark_img(assign_img_array):
"""伪造图片要打上固定的 F 标记"""
fake_icon = np.array(
[[0,0,0,0],
[0,1,1,1],
[0,0,0,0],
[0,1,1,1],
[0,1,1,1]], dtype=np.uint8)
fake_icon *= 255
fake_icon = np.stack([fake_icon, fake_icon, fake_icon], axis=2)
assign_img_array[:5, :4, :] = fake_icon
return assign_img_array
def make_fake_img(self, fake_img_info, save_img_path, save_xml_path):
"""制作伪造图片,位图信息需要进行传入"""
# [{‘loc_lt_point’:(x, y), 'icon_path':'', 'new_size':(width, height), 'tag':'new_tag'}]
a = DeteRes(assign_img_path=self.img_path)
img_array = copy.deepcopy(self.img_ndarry)
for each_icon_info in fake_img_info:
(x, y) = each_icon_info['loc_lt_point']
icon_path = each_icon_info['icon_path']
new_size = each_icon_info['new_size']
tag = each_icon_info['tag']
icon_img = Image.open(icon_path)
icon_img = icon_img.resize(new_size)
icon_img_array = np.array(icon_img)
img_array[x:x+new_size[0], y:y+new_size[1], :] = icon_img_array[:,:,:3]
a.add_obj(x1=x, y1=y, x2=x+new_size[0], y2=y+new_size[1], tag=tag)
# mark
img_array = self.mark_img(img_array)
# save
cv2.imencode('.jpg', img_array)[1].tofile(save_img_path)
a.save_to_xml(save_xml_path)
# class FakeImgOpt(object):
#
# # todo
if __name__ == "__main__":
imgPath = r"C:\Users\14271\Desktop\fake_img\origin_img\57c72bdea659d35fb0afd9a794f7f07e.jpg"
iconDir = r"C:\Users\14271\Desktop\fake_img\icon_img\005.png"
saveFakePath = r"C:\Users\14271\Desktop\fake_img\fake_img\123.jpg"
saveFakeXmlPath = r"C:\Users\14271\Desktop\fake_img\fake_img\123.xml"
a = FakeImg(img_path=imgPath, icon_dir=iconDir)
fake_info = [{'loc_lt_point':(100, 100), 'icon_path':r'C:\Users\14271\Desktop\fake_img\icon_img\001.png', 'new_size':(200, 200), 'tag':'test'}]
a.make_fake_img(fake_img_info=fake_info, save_img_path=saveFakePath, save_xml_path=saveFakeXmlPath)
pass | PypiClean |
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Model/Grammar/Automata.py |
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+----------------------------------------------
#| Standard library imports
#+----------------------------------------------
import os
import subprocess
import tempfile
#+----------------------------------------------
#| Related third party imports
#+----------------------------------------------
#+----------------------------------------------
#| Local application imports
#+----------------------------------------------
from netzob.Common.Utils.Decorators import typeCheck, public_api, NetzobLogger
from netzob.Model.Grammar.States.State import State
from netzob.Model.Grammar.Transitions.OpenChannelTransition import OpenChannelTransition
from netzob.Model.Grammar.Transitions.CloseChannelTransition import CloseChannelTransition
from netzob.Inference.Grammar.AutomataFactories.OneStateAutomataFactory import OneStateAutomataFactory
from netzob.Inference.Grammar.AutomataFactories.ChainedStatesAutomataFactory import ChainedStatesAutomataFactory
from netzob.Inference.Grammar.AutomataFactories.PTAAutomataFactory import PTAAutomataFactory
@NetzobLogger
class Automata(object):
"""Class which describes an automaton (i.e. state machine) of a
protocol. The underlying structure of the automaton is a Mealy
machine (cf. https://en.wikipedia.org/wiki/Mealy_machine).
The Automata constructor expects some parameters:
:param initialState: The initial state of the automaton.
:param symbols: The list of permitted symbols for every transition of the automaton.
:type initialState: :class:`State <netzob.Model.Grammar.States.State.State>`, required
:type symbols: a :class:`list` of :class:`Symbol <netzob.Model.Vocabulary.Symbol.Symbol>`, required
The Automata class provides the following public variables:
:var initialState: This parameter is the initial state of the automaton.
:var symbols: This parameter is the list of permitted symbols for every transition of the automaton.
:vartype initialState: :class:`State <netzob.Model.Grammar.States.State.State>`
:vartype symbols: a :class:`list` of :class:`Symbol <netzob.Model.Vocabulary.Symbol.Symbol>`
The following example shows the definition of an automaton with
three states `s0`, `s1`, `s2`, an opening transition between
states `s0` and `s1`, a standard transition within the same state
`s1` which accepts the input symbol `inputSymbol` and generate the
output symbol `outputSymbol`, and a closing transition between
states `s1` and `s2`.
>>> # Creation of some states and transitions
>>> from netzob.all import *
>>> s0 = State(name="S0")
>>> s1 = State(name="S1")
>>> s2 = State(name="S2")
>>> openTransition = OpenChannelTransition(startState=s0, endState=s1, name="Open")
>>> inputSymbol = Symbol()
>>> outputSymbol = Symbol()
>>> mainTransition = Transition(startState=s1, endState=s1, inputSymbol=inputSymbol, outputSymbols=[outputSymbol], name="hello")
>>> closeTransition = CloseChannelTransition(startState=s1, endState=s2, name="Close")
>>> # Creation of the automata
>>> automata = Automata(s0, [inputSymbol, outputSymbol])
"""
@public_api
@typeCheck(State, list)
def __init__(self, initialState, symbols):
# Initialize public variables from parameters
self.initialState = initialState
self.symbols = symbols # A list of symbols accepted by the automaton
# Initialize local variables
self.cbk_read_symbol_timeout = None # list of callbacks
self.cbk_read_unexpected_symbol = None # list of callbacks
self.cbk_read_unknown_symbol = None # list of callbacks
@public_api
def copy(self):
r"""Copy the current automaton.
This method copies the states and transitions of the
automaton, but keeps references to the original callbacks and
symbols.
:return: A new object of the same type.
:rtype: :class:`Automata <netzob.Model.Grammar.Automata.Automata>`
>>> # Creation of some states and transitions
>>> from netzob.all import *
>>> s0 = State(name="S0")
>>> s1 = State(name="S1")
>>> s2 = State(name="S2")
>>> openTransition = OpenChannelTransition(startState=s0, endState=s1, name="open transition")
>>> inputSymbol = Symbol()
>>> outputSymbol = Symbol()
>>> mainTransition = Transition(startState=s1, endState=s1, inputSymbol=inputSymbol, outputSymbols=[outputSymbol], name="main transition")
>>> closeTransition = CloseChannelTransition(startState=s1, endState=s2, name="close transition")
>>> # Creation of the automata
>>> automata = Automata(s0, [inputSymbol, outputSymbol])
>>> automata_bis = automata.copy()
"""
map_new_states = {} # Store mapping [original state -> cloned state]
for state in self.getStates():
new_transitions = []
for transition in state.transitions:
new_transition = transition.copy()
new_transitions.append(new_transition)
# Handle startState
if transition.startState in map_new_states.keys():
new_transition._startState = map_new_states[transition.startState]
else:
new_transition._startState = transition.startState.copy()
map_new_states[transition.startState] = new_transition.startState
# Handle endState
if transition.endState in map_new_states.keys():
new_transition.endState = map_new_states[transition.endState]
else:
new_transition.endState = transition.endState.copy()
map_new_states[transition.endState] = new_transition.endState
if state in map_new_states.keys():
map_new_states[state].transitions = new_transitions
else:
map_new_states[state] = state.copy()
map_new_states[state].transitions = new_transitions
automata = Automata(map_new_states[self.initialState], self.symbols)
automata.cbk_read_symbol_timeout = self.cbk_read_symbol_timeout
automata.cbk_read_unexpected_symbol = self.cbk_read_unexpected_symbol
automata.cbk_read_unknown_symbol = self.cbk_read_unknown_symbol
return automata
@public_api
def generateASCII(self):
"""Render the ASCII representation of the automaton.
:return: A string containing an ASCII representation of the automaton.
:rtype: :class:`str`
"""
f = tempfile.NamedTemporaryFile(delete=False)
f.write(str.encode(self.generateDotCode()))
f.close()
binary_path = '/usr/bin/graph-easy'
if not os.path.isfile(binary_path):
error_message = "Cannot generate ASCII graph as a dependency is missing: 'graph-easy' (see libgraph-easy-perl package)"
self._logger.warning(error_message)
return error_message
cmd = [binary_path, '--input', f.name, '--as_ascii']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p.wait()
result = p.stdout.read().decode("utf-8")
os.unlink(f.name)
return result
@public_api
def generateDotCode(self):
"""Generates the dot code representing the automaton.
:return: A string containing a dot code representation of the automaton.
:rtype: :class:`str`
>>> # Create some states and transitions
>>> from netzob.all import *
>>> s0 = State(name="S0")
>>> s1 = State(name="S1")
>>> s2 = State(name="S2")
>>> openTransition = OpenChannelTransition(startState=s0, endState=s1, name="Open")
>>> inputSymbol = Symbol()
>>> outputSymbol = Symbol()
>>> mainTransition = Transition(startState=s1, endState=s1, inputSymbol=inputSymbol, outputSymbols=[outputSymbol], name="hello")
>>> closeTransition = CloseChannelTransition(startState=s1, endState=s2, name="Close")
>>> # Create the automaton
>>> automata = Automata(s0, [inputSymbol, outputSymbol])
>>> print(automata.generateDotCode()) #doctest: +ELLIPSIS
digraph G {
"S0" [shape=doubleoctagon, label="S0", style=filled, fillcolor=white, URL="..."];
"S1" [shape=ellipse, label="S1", style=filled, fillcolor=white, URL="..."];
"S2" [shape=ellipse, label="S2", style=filled, fillcolor=white, URL="..."];
"S0" -> "S1" [fontsize=5, label="OpenChannelTransition", URL="..."];
"S1" -> "S1" [fontsize=5, label="hello (Symbol;{Symbol})", URL="..."];
"S1" -> "S2" [fontsize=5, label="CloseChannelTransition", URL="..."];
}
:return: a string containing the dot code of the automata.
:rtype: a :class:`list`
"""
dotCode = []
dotCode.append("digraph G {")
# First we include all the states declared in the automata
states = self.getStates()
for state in states:
if state.active:
color = "red"
else:
color = "white"
if state == self.initialState:
shape = "doubleoctagon"
else:
shape = "ellipse"
descr = state.name
for cbk in state.cbk_modify_transition:
descr += " [CBK modify transition] "
dotCode.append(
'"{}" [shape={}, label="{}", style=filled, fillcolor={}, URL="{}"];'.
format(state.name, shape, descr, color, id(state)))
for inputState in states:
for transition in inputState.transitions:
outputState = transition.endState
descr = transition.description
for cbk in transition.cbk_modify_symbol:
descr += " [CBK modify symbol] "
dotCode.append(
'"{}" -> "{}" [fontsize=5, label="{}", URL="{}"];'.
format(inputState.name, outputState.name,
descr, id(transition)))
dotCode.append("}")
return '\n'.join(dotCode)
@public_api
def getStates(self, main_states=False):
"""Visits the automata to discover all the available states.
:param main_states: Specify that all states except the initial state and the closing states are returned. Default value is ``False``, meaning that all states are returned.
:type main_states: :class:`bool`, optional
:return: A list containing all the automaton states.
:rtype: a :class:`list` of :class:`State <netzob.Model.Grammar.States.State.State>`
>>> from netzob.all import *
>>> # Create some states and transitions
>>> s0 = State(name="State 0")
>>> s1 = State(name="State 1")
>>> s2 = State(name="State 2")
>>> openTransition = OpenChannelTransition(startState=s0, endState=s1, name="Open")
>>> inputSymbol = Symbol()
>>> outputSymbol = Symbol()
>>> mainTransition = Transition(startState=s1, endState=s1, inputSymbol=inputSymbol, outputSymbols=[outputSymbol], name="hello")
>>> closeTransition = CloseChannelTransition(startState=s1, endState=s2, name="Close")
>>> # Create the automata
>>> automata = Automata(s0, [inputSymbol, outputSymbol])
>>> for state in automata.getStates():
... print(state)
State 0
State 1
State 2
>>> for state in automata.getStates(main_states=True):
... print(state)
State 1
"""
states = []
toAnalyze = []
toAnalyze.append(self.initialState)
while (len(toAnalyze) > 0):
currentState = toAnalyze.pop()
if currentState is not None:
found = False
for tmpState in states:
if id(tmpState) == id(currentState):
found = True
break
if not found:
for transition in currentState.transitions:
outputState = transition.endState
found = False
for tmpState in states:
if id(tmpState) == id(outputState):
found = True
break
for tmpState in toAnalyze:
if id(tmpState) == id(outputState):
found = True
break
if not found:
toAnalyze.append(outputState)
states.append(currentState)
states_to_drop = []
if main_states:
for state in states:
if len(state.transitions) == 1 and isinstance(state.transitions[0], OpenChannelTransition):
states_to_drop.append(state)
else:
for t in state.transitions:
if isinstance(t, CloseChannelTransition):
states_to_drop.append(t.endState)
for state in states_to_drop:
states.remove(state)
return states
@public_api
@typeCheck(str)
def getState(self, name):
"""Returns the State object of the given name.
:param name: The name of the State object
:type name: :class:`str`, required
:return: The State object with stateName as name.
:rtype: :class:`State <netzob.Model.Grammar.States.State.State>`
:raise: :class:`KeyError` if the name is not found.
"""
states = self.getStates()
for state in states:
if state.name == name:
return state
else:
raise KeyError("State with name '{}' does not exist".format(name))
@public_api
def getTransitions(self):
"""Return all the transitions of the automaton.
:return: A list containing all the automaton transitions.
:rtype: a :class:`list` of :class:`Transition <netzob.Model.Grammar.States.AbstractTransition.AbstractTransition>`
"""
states = self.getStates()
transitions = set()
for state in states:
for transition in state.transitions:
transitions.add(transition)
return list(transitions)
@public_api
@typeCheck(str)
def getTransition(self, name):
"""Returns the Transition object of the given name.
:param name: The name of the Transition object
:type name: :class:`str`, required
:return: The Transition object.
:rtype: :class:`Transition <netzob.Model.Grammar.Transitions.Transition.Transition>`
:raise: :class:`KeyError` if the name is not found.
"""
transitions = self.getTransitions()
for transition in transitions:
if transition.name == name:
return transition
else:
raise KeyError("Transition with name '{}' does not exist".format(name))
@staticmethod
@typeCheck(list, list)
def generateChainedStatesAutomata(abstractSession, symbolList):
"""Generate an automata that contains as many states and
transitions as the number of request-response couples in the
abstract session. This automata has thus the shape of a uniq
chain.
>>> from netzob.all import *
>>> symbolSYN = Symbol([Field(String("SYN"))], name="Symbol_SYN")
>>> symbolSYNACK = Symbol([Field(String("SYN/ACK"))], name="Symbol_SYNACK")
>>> symbolACK = Symbol([Field(String("ACK"))], name="Symbol_ACK")
>>> symbolPUSH = Symbol([Field(String("PUSH"))], name="Symbol_PUSH")
>>> symbolList = [symbolSYN, symbolSYNACK, symbolACK, symbolPUSH]
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> msg4 = RawMessage("PUSH", source="B", destination="A")
>>> session = Session([msg1, msg2, msg3, msg4])
>>> abstractSession = session.abstract(symbolList)
>>> automata = Automata.generateChainedStatesAutomata(abstractSession, symbolList)
>>> dotcode = automata.generateDotCode()
>>> len(dotcode)
926
>>> print(dotcode) #doctest: +ELLIPSIS
digraph G {
"Start state" [shape=doubleoctagon, label="Start state", style=filled, fillcolor=white, URL="..."];
"State 1" [shape=ellipse, label="State 1", style=filled, fillcolor=white, URL="..."];
"State 2" [shape=ellipse, label="State 2", style=filled, fillcolor=white, URL="..."];
"State 3" [shape=ellipse, label="State 3", style=filled, fillcolor=white, URL="..."];
"End state" [shape=ellipse, label="End state", style=filled, fillcolor=white, URL="..."];
"Start state" -> "State 1" [fontsize=5, label="OpenChannelTransition", URL="..."];
"State 1" -> "State 2" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_SYNACK})", URL="..."];
"State 2" -> "State 3" [fontsize=5, label="Transition (Symbol_ACK;{Symbol_PUSH})", URL="..."];
"State 3" -> "End state" [fontsize=5, label="CloseChannelTransition", URL="..."];
}
:return: an automata with one sequence of chained states.
:rtype: a :class:`Automata <netzob.Model.Grammar.Automata.Automata>`
"""
return ChainedStatesAutomataFactory.generate(abstractSession,
symbolList)
@staticmethod
@typeCheck(list, list)
def generateOneStateAutomata(abstractSession, symbolList):
"""Generate an automata that, according to an abstract
session, contains a main state where each request-response
couples are permitted.
>>> from netzob.all import *
>>> symbolSYN = Symbol([Field(String("SYN"))], name="Symbol_SYN")
>>> symbolSYNACK = Symbol([Field(String("SYN/ACK"))], name="Symbol_SYNACK")
>>> symbolACK = Symbol([Field(String("ACK"))], name="Symbol_ACK")
>>> symbolPUSH = Symbol([Field(String("PUSH"))], name="Symbol_PUSH")
>>> symbolList = [symbolSYN, symbolSYNACK, symbolACK, symbolPUSH]
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> msg4 = RawMessage("PUSH", source="B", destination="A")
>>> session = Session([msg1, msg2, msg3, msg4])
>>> abstractSession = session.abstract(symbolList)
>>> automata = Automata.generateOneStateAutomata(abstractSession, symbolList)
>>> dotcode = automata.generateDotCode()
>>> len(dotcode)
754
>>> print(dotcode) #doctest: +ELLIPSIS
digraph G {
"Start state" [shape=doubleoctagon, label="Start state", style=filled, fillcolor=white, URL="..."];
"Main state" [shape=ellipse, label="Main state", style=filled, fillcolor=white, URL="..."];
"End state" [shape=ellipse, label="End state", style=filled, fillcolor=white, URL="..."];
"Start state" -> "Main state" [fontsize=5, label="OpenChannelTransition", URL="..."];
"Main state" -> "Main state" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_SYNACK})", URL="..."];
"Main state" -> "Main state" [fontsize=5, label="Transition (Symbol_ACK;{Symbol_PUSH})", URL="..."];
"Main state" -> "End state" [fontsize=5, label="CloseChannelTransition", URL="..."];
}
:return: an automata with one main state.
:rtype: a :class:`Automata <netzob.Model.Grammar.Automata.Automata>`
"""
return OneStateAutomataFactory.generate(abstractSession, symbolList)
@staticmethod
@typeCheck(list, list)
def generatePTAAutomata(abstractSessions, symbolList):
"""Generate an automata according to the observed sequence of messages/symbols
>>> from netzob.all import *
>>> symbolSYN = Symbol([Field(String("SYN"))], name="Symbol_SYN")
>>> symbolSYNACK = Symbol([Field(String("SYN/ACK"))], name="Symbol_SYNACK")
>>> symbolACK = Symbol([Field(String("ACK"))], name="Symbol_ACK")
>>> symbolPUSH = Symbol([Field(String("PUSH"))], name="Symbol_PUSH")
>>> symbolList = [symbolSYN, symbolSYNACK, symbolACK, symbolPUSH]
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> msg4 = RawMessage("PUSH", source="B", destination="A")
>>> session = Session([msg1, msg2, msg3, msg4])
>>> abstractSession1 = session.abstract(symbolList)
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("SYN", source="A", destination="B")
>>> msg4 = RawMessage("PUSH", source="B", destination="A")
>>> msg5 = RawMessage("SYN", source="A", destination="B")
>>> msg6 = RawMessage("PUSH", source="B", destination="A")
>>> session = Session([msg1, msg2, msg3, msg4, msg5, msg6])
>>> abstractSession2 = session.abstract(symbolList)
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> msg4 = RawMessage("PUSH", source="B", destination="A")
>>> msg5 = RawMessage("SYN", source="A", destination="B")
>>> msg6 = RawMessage("PUSH", source="B", destination="A")
>>> msg7 = RawMessage("SYN", source="A", destination="B")
>>> msg8 = RawMessage("PUSH", source="B", destination="A")
>>> session = Session([msg1, msg2, msg3, msg4, msg5, msg6, msg7, msg8])
>>> abstractSession3 = session.abstract(symbolList)
>>> abstractSessions = [abstractSession1, abstractSession2, abstractSession3]
>>> automata = Automata.generatePTAAutomata(abstractSessions, symbolList)
>>> dotcode = automata.generateDotCode()
>>> print(dotcode) #doctest: +ELLIPSIS
digraph G {
"Start state" [shape=doubleoctagon, label="Start state", style=filled, fillcolor=white, URL="..."];
"State 0" [shape=ellipse, label="State 0", style=filled, fillcolor=white, URL="..."];
"State 1" [shape=ellipse, label="State 1", style=filled, fillcolor=white, URL="..."];
"State 4" [shape=ellipse, label="State 4", style=filled, fillcolor=white, URL="..."];
"State 5" [shape=ellipse, label="State 5", style=filled, fillcolor=white, URL="..."];
"End state 6" [shape=ellipse, label="End state 6", style=filled, fillcolor=white, URL="..."];
"State 2" [shape=ellipse, label="State 2", style=filled, fillcolor=white, URL="..."];
"State 7" [shape=ellipse, label="State 7", style=filled, fillcolor=white, URL="..."];
"State 8" [shape=ellipse, label="State 8", style=filled, fillcolor=white, URL="..."];
"End state 9" [shape=ellipse, label="End state 9", style=filled, fillcolor=white, URL="..."];
"End state 3" [shape=ellipse, label="End state 3", style=filled, fillcolor=white, URL="..."];
"Start state" -> "State 0" [fontsize=5, label="OpenChannelTransition", URL="..."];
"State 0" -> "State 1" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_SYNACK})", URL="..."];
"State 1" -> "State 2" [fontsize=5, label="Transition (Symbol_ACK;{Symbol_PUSH})", URL="..."];
"State 1" -> "State 4" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_PUSH})", URL="..."];
"State 4" -> "State 5" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_PUSH})", URL="..."];
"State 5" -> "End state 6" [fontsize=5, label="CloseChannelTransition", URL="..."];
"State 2" -> "End state 3" [fontsize=5, label="CloseChannelTransition", URL="..."];
"State 2" -> "State 7" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_PUSH})", URL="..."];
"State 7" -> "State 8" [fontsize=5, label="Transition (Symbol_SYN;{Symbol_PUSH})", URL="..."];
"State 8" -> "End state 9" [fontsize=5, label="CloseChannelTransition", URL="..."];
}
:return: an automata based on a PTA (Prefix Tree Acceptator).
:rtype: a :class:`Automata <netzob.Model.Grammar.Automata.Automata>`
"""
return PTAAutomataFactory.generate(abstractSessions, symbolList)
@public_api
def set_cbk_read_symbol_timeout(self, cbk_method, states=None):
"""Function called to handle cases where a timeout appears when
waiting for a symbol. In a non initiator context, this symbol would
correspond to the input symbol that should trigger a
transition. In an initiator context, this symbol would correspond to an
output symbol that is expected according to the current
transition.
:param cbk_method: A function used to handle the selection of the next
state when no symbol is received after the timeout
has expired.
:type cbk_method: :class:`Callable <collections.abc.Callable>`, required
:param states: A list of states on which the callback function should apply.
If no states are specified, the callback function is
applied on all states of the automaton.
:type states: ~typing.List[~netzob.Model.Grammar.States.State.State], optional
:raise: :class:`TypeError` if :attr:`cbk_method` is not a callable function
The callback function that can be used in the
:attr:`cbk_method` parameter has the following prototype:
.. function:: cbk_method(current_state, current_transition)
:noindex:
:param current_state:
Corresponds to the current state in the automaton.
It is expected that the current state cannot be ``None``, because when an actor visits an automaton, it is always positioned at a state even if it is executing a transition (in such case, the current state is the initial state of the transition).
:type current_state: ~netzob.Model.Grammar.States.State.State
:param current_transition:
Corresponds to the current transition in the automaton.
It is expected that the current transition may be ``None``, especially
in a non initiator context, where no transition has been initiated.
:type current_transition: ~netzob.Model.Grammar.Transitions.Transition.Transition
:return: The callback function should return the next
state. For example, to stay at the same state,
the callback function would have to return the
:attr:`current_state` value.
:rtype: :class:`~netzob.Model.Grammar.States.State.State`
"""
if not callable(cbk_method):
raise TypeError("'cbk_method' should be a callable function")
self.cbk_read_symbol_timeout = cbk_method
@public_api
def set_cbk_read_unexpected_symbol(self, cbk_method, states=None):
"""Function called to handle cases where a symbol is received but not
expected. In a non initiator context, this symbol would not match the input
symbol of the available transitions. In an initiator context, this
symbol would not match the expected output symbols of the current
transition.
The method expects some parameters:
:param cbk_method: A function used to handle the selection of the next
state when a unexpected symbol is received.
:type cbk_method: :class:`Callable <collections.abc.Callable>`, required
:param states: A list of states on which the callback function should apply.
If no states are specified, the callback function is
applied on all states of the automaton.
:type states: ~typing.List[~netzob.Model.Grammar.States.State.State], optional
:raise: :class:`TypeError` if :attr:`cbk_method` is not a callable function
The callback function that can be used in the
:attr:`cbk_method` parameter has the following prototype:
.. function:: cbk_method(current_state, current_transition,\
received_symbol, received_message, received_structure)
:noindex:
:param current_state:
Corresponds to the current state in the automaton.
It is expected that the current state cannot be ``None``, because when an actor visits an automaton, it is always positioned at a state even if it is executing a transition (in such case, the current state is the initial state of the transition).
:type current_state: ~netzob.Model.Grammar.States.State.State
:param current_transition:
Corresponds to the current transition in the automaton.
It is expected that the current transition may be ``None``, especially
in a non initiator context, where no transition has been initiated.
:type current_transition: ~netzob.Model.Grammar.Transitions.Transition.Transition
:param received_symbol:
Corresponds to the received symbol.
:type received_symbol: ~netzob.Model.Vocabulary.Symbol.Symbol
:param received_message:
Corresponds to the received raw message.
:type received_message: :class:`bytes`
:param received_structure:
Corresponds to the received message structure.
:type received_structure: :class:`OrderedDict` where keys are :class:`~netzob.Model.Vocabulary.Field.Field` and values are :class:`bytes`
:return: The callback function should return the next
state. For example, to stay at the same state,
the callback function would have to return the
:attr:`current_state` value.
:rtype: :class:`~netzob.Model.Grammar.States.State.State`
"""
if not callable(cbk_method):
raise TypeError("'cbk_method' should be a callable function")
self.cbk_read_unexpected_symbol = cbk_method
@public_api
def set_cbk_read_unknown_symbol(self, cbk_method, states=None):
"""Function called to handle cases where a message is received but
does not correspond to a known symbol. In a non initiator context,
this message would not match the input symbol of the available
transitions. In an initiator context, this message would not match
the expected output symbols of the current transition.
The method expects some parameters:
:param cbk_method: A callable function used to handle the selection of
the next state when an unknown symbol is received.
:type cbk_method: :class:`Callable <collections.abc.Callable>`, required
:param states: A list of states on which the callback function should apply.
If no states are specified, the callback function is
applied on all states of the automaton.
:type states: ~typing.List[~netzob.Model.Grammar.States.State.State], optional
:raise: :class:`TypeError` if :attr:`cbk_method` is not a callable function
The callback function that can be used in the
:attr:`cbk_method` parameter has the following prototype:
.. function:: cbk_method(current_state, current_transition,\
received_message)
:noindex:
:param current_state:
Corresponds to the current state in the automaton.
It is expected that the current state cannot be ``None``, because when an actor visits an automaton, it is always positioned at a state even if it is executing a transition (in such case, the current state is the initial state of the transition).
:type current_state: ~netzob.Model.Grammar.States.State.State
:param current_transition:
Corresponds to the current transition in the automaton.
It is expected that the current transition may be ``None``,
especially in a non initiator context, where no transition has been initiated.
:type current_transition: ~netzob.Model.Grammar.Transitions.Transition.Transition
:param received_message:
Corresponds to the received raw message.
:type received_message: :class:`bytes`
:return: The callback function should return the next
state. For example, to stay at the same state,
the callback function would have to return the
:attr:`current_state` value.
:rtype: :class:`~netzob.Model.Grammar.States.State.State`
"""
if not callable(cbk_method):
raise TypeError("'cbk_method' should be a callable function")
self.cbk_read_unknown_symbol = cbk_method
## Automata Fuzzing ##
@public_api
def mutate(self, strategy=None, target=None, generator=None, seed=None):
r"""This is the mutation method of the automaton. This method returns
a new automaton that may be used for fuzzing purpose.
The mutate method expects some parameters:
:param strategy: The strategy used to build the new automaton.
The following strategies are available:
* :attr:`AutomataMutatorStrategy.RANDOM`: Randomly insert and remove transitions between states of the original automaton,
* :attr:`AutomataMutatorStrategy.FULL`: At each state of the automaton, it is possible to reach any states,
* :attr:`AutomataMutatorStrategy.ONESTATE`: Build an automaton with one main state that accepts every symbols.
* :attr:`AutomataMutatorStrategy.TARGETED`: Build an automaton similar to the original one, where a targeted state, given in parameters, will accept every symbols.
If set to None, the default strategy is :attr:`AutomataMutatorStrategy.RANDOM`.
:param target: The name of the state considered for targeted fuzzing (should be used with :attr:`AutomataMutatorStrategy.TARGETED`).
:param generator: The underlying generator used to produce
pseudo-random or deterministic
values.
Default generator is ``'xorshift'``, which is
efficient to produce unique pseudo-random
numbers.
:param seed: An integer used to initialize the underlying
generator.
If None, the default value will be set to :attr:`Mutator.SEED_DEFAULT`. The :attr:`Mutator.SEED_DEFAULT` constant is initialized from the configuration variable :attr:`Conf.seed` from the Netzob API :class:`Conf` class.
:type strategy: :class:`AutomataMutatorStrategy`, optional
:type target: :class:`str`, optional
:type generator: :class:`iter`, optional
:type seed: :class:`int`, optional
:return: The mutated automata.
:rtype: :class:`Automata <netzob.Model.Grammar.Automata.Automata>`
**Catching abnormal responses from the remote target**
By default, the state machine is configured so that the
reception of abnormal messages from the remote peer will
terminate the visit loop of the automaton from the current
actor. When applying fuzzing, this behavior could be annoying
as it will quickly stop the fuzzing session as soon as a
non-legitimate response is received. In order to catch this
kind of responses and adapt the current actor behavior, it is
recommended to set the following callbacks on the automaton:
* :attr:`set_cbk_read_symbol_timeout <netzob.Model.Grammar.Automata.Automata.set_cbk_read_symbol_timeout>`
* :attr:`set_cbk_read_unexpected_symbol <netzob.Model.Grammar.Automata.Automata.set_cbk_read_unexpected_symbol>`
* :attr:`set_cbk_read_unknown_symbol <netzob.Model.Grammar.Automata.Automata.set_cbk_read_unknown_symbol>`
The following example shows how to specify a global behavior,
on all states and transitions, in order to catch reception of
unexpected symbols (i.e. symbols that are known but not
expected at this state/transition) and unknown messages
(i.e. messages that cannot be abstracted to a symbol).
>>> from netzob.all import *
>>> import time
>>>
>>> # First we create the symbols
>>> symbol1 = Symbol(name="Hello1", fields=[Field("hello1")])
>>> symbol2 = Symbol(name="Hello2", fields=[Field("hello2")])
>>> symbolList = [symbol1, symbol2]
>>>
>>> # Create Bob's automaton
>>> bob_s0 = State(name="S0")
>>> bob_s1 = State(name="S1")
>>> bob_s2 = State(name="S2")
>>> bob_s3 = State(name="S3")
>>> bob_error_state = State(name="Error state")
>>> bob_openTransition = OpenChannelTransition(startState=bob_s0, endState=bob_s1, name="Open")
>>> bob_mainTransition = Transition(startState=bob_s1, endState=bob_s2,
... inputSymbol=symbol1, outputSymbols=[symbol2],
... name="T1")
>>> bob_closeTransition1 = CloseChannelTransition(startState=bob_error_state, endState=bob_s3, name="Close")
>>> bob_closeTransition2 = CloseChannelTransition(startState=bob_s2, endState=bob_s3, name="Close")
>>> bob_automata = Automata(bob_s0, symbolList)
>>>
>>> def cbk_method(current_state, current_transition, received_symbol, received_message, received_structure):
... return bob_error_state
>>> bob_automata.set_cbk_read_unexpected_symbol(cbk_method)
>>> bob_automata.set_cbk_read_unknown_symbol(cbk_method)
>>>
>>> automata_ascii = bob_automata.generateASCII()
>>> print(automata_ascii)
#=========================#
H S0 H
#=========================#
|
| OpenChannelTransition
v
+-------------------------+
| S1 |
+-------------------------+
|
| T1 (Hello1;{Hello2})
v
+-------------------------+
| S2 |
+-------------------------+
|
| CloseChannelTransition
v
+-------------------------+
| S3 |
+-------------------------+
<BLANKLINE>
>>>
>>> # Create Alice's automaton
>>> alice_s0 = State(name="S0")
>>> alice_s1 = State(name="S1")
>>> alice_s2 = State(name="S2")
>>> alice_openTransition = OpenChannelTransition(startState=alice_s0, endState=alice_s1, name="Open")
>>> alice_mainTransition = Transition(startState=alice_s1, endState=alice_s1,
... inputSymbol=symbol1, outputSymbols=[symbol1],
... name="T1")
>>> alice_closeTransition = CloseChannelTransition(startState=alice_s1, endState=alice_s2, name="Close")
>>> alice_automata = Automata(alice_s0, symbolList)
>>>
>>> automata_ascii = alice_automata.generateASCII()
>>> print(automata_ascii)
#=========================#
H S0 H
#=========================#
|
| OpenChannelTransition
v
+-------------------------+ T1 (Hello1;{Hello1})
| | -----------------------+
| S1 | |
| | <----------------------+
+-------------------------+
|
| CloseChannelTransition
v
+-------------------------+
| S2 |
+-------------------------+
<BLANKLINE>
>>>
>>> # Create Bob actor (a client)
>>> channel = UDPClient(remoteIP="127.0.0.1", remotePort=8887, timeout=1.)
>>> bob = Actor(automata=bob_automata, channel=channel, name="Bob")
>>> bob.nbMaxTransitions = 10
>>>
>>> # Create Alice actor (a server)
>>> channel = UDPServer(localIP="127.0.0.1", localPort=8887, timeout=1.)
>>> alice = Actor(automata=alice_automata, channel=channel, initiator=False, name="Alice")
>>>
>>> alice.start()
>>> time.sleep(0.5)
>>> bob.start()
>>>
>>> time.sleep(1)
>>>
>>> bob.stop()
>>> alice.stop()
>>>
>>> print(bob.generateLog())
Activity log for actor 'Bob' (initiator):
[+] At state 'S0'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 'Open' (open channel)
[+] Transition 'Open' lead to state 'S1'
[+] At state 'S1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 'T1' (initiator)
[+] During transition 'T1', sending input symbol ('Hello1')
[+] During transition 'T1', receiving unexpected symbol triggered a callback that lead to state 'Error state'
[+] At state 'Error state'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 'Close' (close channel)
[+] Transition 'Close' lead to state 'S3'
[+] At state 'S3'
[+] Randomly choosing a transition to execute or to wait for an input symbol
>>> print(alice.generateLog())
Activity log for actor 'Alice' (not initiator):
[+] At state 'S0'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 'Open' (open channel)
[+] Going to execute transition 'Open'
[+] Transition 'Open' lead to state 'S1'
[+] At state 'S1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Hello1' corresponds to transition 'T1'
[+] During transition 'T1', choosing an output symbol ('Hello1')
[+] Transition 'T1' lead to state 'S1'
[+] At state 'S1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
**Basic example of automata fuzzing**
Mutators may be used in order to create fuzzed/mutated automaton.
The following code shows the creation of the new automaton with
random transitions between the existing states:
>>> from netzob.all import *
>>> import time
>>> sym1 = Symbol([Field(String(nbChars=3))], name='Sym1')
>>> sym2 = Symbol([Field(String(nbChars=5))], name='Sym2')
>>> symbols = [sym1, sym2]
>>> s0 = State(name="s0")
>>> s1 = State(name="s1")
>>> s2 = State(name="s2")
>>> s3 = State(name="s3")
>>> s4 = State(name="s4")
>>> t0 = OpenChannelTransition(startState=s0, endState=s1,
... name="t0")
>>> t1 = Transition(startState=s1, endState=s1,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t1")
>>> t2 = Transition(startState=s1, endState=s2,
... inputSymbol=sym2, outputSymbols=[sym2],
... name="t2")
>>> t3 = Transition(startState=s2, endState=s3,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t3")
>>> t4 = CloseChannelTransition(startState=s2, endState=s4,
... name="t4")
>>>
>>> automata = Automata(s0, symbols=symbols)
>>> automata_ascii = automata.generateASCII()
>>> print(automata_ascii)
#========================#
H s0 H
#========================#
|
| OpenChannelTransition
v
+------------------------+ t1 (Sym1;{Sym1})
| | -------------------+
| s1 | |
| | <------------------+
+------------------------+
|
| t2 (Sym2;{Sym2})
v
+----+ CloseChannelTransition +------------------------+
| s4 | <------------------------ | s2 |
+----+ +------------------------+
|
| t3 (Sym1;{Sym1})
v
+------------------------+
| s3 |
+------------------------+
<BLANKLINE>
>>>
>>> # Generate a random automaton
>>>
>>> mutatedAutomata = automata.mutate()
>>> automata_ascii_2 = mutatedAutomata.generateASCII()
>>> print(automata_ascii_2)
#========================#
H s0 H
#========================#
|
| OpenChannelTransition
v
+----------------------------------------------------------------------------------------+ t1 (Sym1;{Sym1})
| | -------------------+
| s1 | |
| | <------------------+
+----------------------------------------------------------------------------------------+
| ^ ^
| t2 (Sym2;{Sym2}) | t_random (Sym1;{Sym1,Sym2}) | t_random (Sym2;{Sym1,Sym2})
v | |
+----------------------------------------------------------------------------------------+
| s2 |
+----------------------------------------------------------------------------------------+
| |
| t3 (Sym1;{Sym1}) | CloseChannelTransition
v v
+------------------------+ +------------------------------+
| s3 | | s4 |
+------------------------+ +------------------------------+
<BLANKLINE>
>>>
>>> # Generate a full automaton
>>>
>>> mutatedAutomata = automata.mutate(strategy=AutomataMutatorStrategy.FULL)
>>>
>>> # The ASCII representation is not displayed as it is too big
>>>
>>> # Generate an automaton with one main state
>>>
>>> mutatedAutomata = automata.mutate(strategy=AutomataMutatorStrategy.ONESTATE)
>>> automata_ascii_2 = mutatedAutomata.generateASCII()
>>> print(automata_ascii_2)
#========================#
H Initial state H
#========================#
|
| OpenChannelTransition
v
t_random (Sym2;{Sym1,Sym2}) +------------------------+ t_random (Sym1;{Sym1,Sym2})
+------------------------------ | | ------------------------------+
| | Main state | |
+-----------------------------> | | <-----------------------------+
+------------------------+
<BLANKLINE>
>>>
>>> # Generate an automaton with targeted fuzzing on one specific state
>>>
>>> mutatedAutomata = automata.mutate(strategy=AutomataMutatorStrategy.TARGETED, target=s2.name)
>>> automata_ascii_2 = mutatedAutomata.generateASCII()
>>> print(automata_ascii_2)
#========================#
H s0 H
#========================#
|
| OpenChannelTransition
v
+------------------------+
| s1 |
+------------------------+
|
| t2 (Sym2;{Sym2})
v
t_random (Sym2;{Sym1,Sym2}) +------------------------+ t_random (Sym1;{Sym1,Sym2})
+------------------------------ | | ------------------------------+
| | s2 | |
+-----------------------------> | | <-----------------------------+
+------------------------+
<BLANKLINE>
**Combining message formats and automata fuzzing**
By combining message formats and automata fuzzing, it is possible
to fuzz specific message formats at specific states in the
automaton.
The following code shows the creation of a mutated automaton
with targeted automaton mutations at state 's6', and with a
precision concerning the state at which fuzzing of message
formats will be performed. Here, the message format fuzzing
only applies at state 's6'. An actor is also created to
simulate a target.
>>> from netzob.all import *
>>> import time
>>> sym1 = Symbol([Field(uint16())], name='Sym1')
>>> symbols = [sym1]
>>> s0 = State(name="s0")
>>> s1 = State(name="s1")
>>> s2 = State(name="s2")
>>> s3 = State(name="s3")
>>> s4 = State(name="s4")
>>> s5 = State(name="s5")
>>> s6 = State(name="s6")
>>> s7 = State(name="s7")
>>> t0 = OpenChannelTransition(startState=s0, endState=s1,
... name="t0")
>>> t1 = Transition(startState=s1, endState=s1,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t1")
>>> t2 = Transition(startState=s1, endState=s2,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t2")
>>> t3 = Transition(startState=s2, endState=s3,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t3")
>>> t4 = Transition(startState=s2, endState=s4,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t4")
>>> t5 = Transition(startState=s4, endState=s6,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t5")
>>> t6 = Transition(startState=s3, endState=s5,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t6")
>>> t7 = Transition(startState=s5, endState=s6,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t7")
>>> t8 = Transition(startState=s6, endState=s6,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="t8")
>>> t9 = CloseChannelTransition(startState=s6, endState=s7,
... name="t9")
>>>
>>> automata = Automata(s0, symbols=symbols)
>>> automata_ascii = automata.generateASCII()
>>> print(automata_ascii)
#=========================#
H s0 H
#=========================#
|
| OpenChannelTransition
v
+-------------------------+ t1 (Sym1;{Sym1})
| | -------------------+
| s1 | |
| | <------------------+
+-------------------------+
|
| t2 (Sym1;{Sym1})
v
+----+ t4 (Sym1;{Sym1}) +-------------------------+
| s4 | <------------------ | s2 |
+----+ +-------------------------+
| |
| | t3 (Sym1;{Sym1})
| v
| +-------------------------+
| | s3 |
| +-------------------------+
| |
| | t6 (Sym1;{Sym1})
| v
| +-------------------------+
| | s5 |
| +-------------------------+
| |
| | t7 (Sym1;{Sym1})
| v
| +-------------------------+ t8 (Sym1;{Sym1})
| | | -------------------+
| t5 (Sym1;{Sym1}) | s6 | |
+----------------------> | | <------------------+
+-------------------------+
|
| CloseChannelTransition
v
+-------------------------+
| s7 |
+-------------------------+
<BLANKLINE>
>>> # Creation of a mutated automaton
>>> mutatedAutomata = automata.mutate(strategy=AutomataMutatorStrategy.TARGETED, target=s6.name, seed=42)
>>> automata_ascii = mutatedAutomata.generateASCII()
>>> print(automata_ascii)
#========================#
H s0 H
#========================#
|
| OpenChannelTransition
v
+------------------------+
| s1 |
+------------------------+
|
| t2 (Sym1;{Sym1})
v
+------------------------+
| s2 |
+------------------------+
|
| t4 (Sym1;{Sym1})
v
+------------------------+
| s4 |
+------------------------+
|
| t5 (Sym1;{Sym1})
v
+------------------------+ t_random (Sym1;{Sym1})
| | -------------------------+
| s6 | |
| | <------------------------+
+------------------------+
<BLANKLINE>
>>>
>>> # Define fuzzing configuration
>>> preset_symbol1 = Preset(sym1)
>>> preset_symbol1.fuzz(sym1)
>>>
>>> # Creation of an automaton visitor/actor and a channel on which to emit the fuzzed symbol
>>> bob_channel = UDPClient(remoteIP="127.0.0.1", remotePort=8887, timeout=1.)
>>> bob_actor = Actor(automata=mutatedAutomata, channel=bob_channel, name='Fuzzer')
>>> bob_actor.fuzzing_presets = [preset_symbol1]
>>> bob_actor.fuzzing_states = [s6.name]
>>> bob_actor.nbMaxTransitions = 7
>>>
>>> # Create Alice's automaton
>>> alice_s0 = State(name="s0")
>>> alice_s1 = State(name="s1")
>>> alice_openTransition = OpenChannelTransition(startState=alice_s0, endState=alice_s1, name="Open")
>>> alice_transition1 = Transition(startState=alice_s1, endState=alice_s1,
... inputSymbol=sym1, outputSymbols=[sym1],
... name="T1")
>>> alice_transition2 = Transition(startState=alice_s1, endState=alice_s1,
... inputSymbol=sym2, outputSymbols=[sym2],
... name="T2")
>>> alice_automata = Automata(alice_s0, symbols)
>>> automata_ascii = alice_automata.generateASCII()
>>> print(automata_ascii)
#========================#
H s0 H
#========================#
|
| OpenChannelTransition
v
T2 (Sym2;{Sym2}) +------------------------+ T1 (Sym1;{Sym1})
+------------------- | | -------------------+
| | s1 | |
+------------------> | | <------------------+
+------------------------+
<BLANKLINE>
>>>
>>> # Creation of an automaton visitor/actor and a channel on which to receive the fuzzing traffic
>>> alice_channel = UDPServer(localIP="127.0.0.1", localPort=8887, timeout=1.)
>>>
>>> # Creation of a callback function that returns a new transition
>>> def cbk_modifyTransition(availableTransitions, nextTransition, current_state,
... last_sent_symbol, last_sent_message, last_sent_structure,
... last_received_symbol, last_received_message, last_received_structure, memory):
... if nextTransition is None:
... return alice_transition2
... else:
... return nextTransition
>>>
>>> alice_automata.getState('s1').add_cbk_modify_transition(cbk_modifyTransition)
>>>
>>> alice_actor = Actor(automata=alice_automata, channel=alice_channel, initiator=False, name='Target')
>>>
>>> # We start the targeted actor
>>> alice_actor.start()
>>> time.sleep(0.5)
>>>
>>> # We start the visitor, thus the fuzzing of message formats will be applied when specific states are reached
>>> bob_actor.start()
>>> time.sleep(1)
>>>
>>> bob_actor.stop()
>>> alice_actor.stop()
>>>
>>> print(bob_actor.generateLog())
Activity log for actor 'Fuzzer' (initiator):
[+] At state 's0'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't0' (open channel)
[+] Transition 't0' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't2' (initiator)
[+] During transition 't2', sending input symbol ('Sym1')
[+] During transition 't2', receiving expected output symbol ('Sym1')
[+] Transition 't2' lead to state 's2'
[+] At state 's2'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't4' (initiator)
[+] During transition 't4', sending input symbol ('Sym1')
[+] During transition 't4', receiving expected output symbol ('Sym1')
[+] Transition 't4' lead to state 's4'
[+] At state 's4'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't5' (initiator)
[+] During transition 't5', sending input symbol ('Sym1')
[+] During transition 't5', receiving expected output symbol ('Sym1')
[+] Transition 't5' lead to state 's6'
[+] At state 's6'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't_random' (initiator)
[+] During transition 't_random', sending input symbol ('Sym1')
[+] During transition 't_random', fuzzing activated
[+] During transition 't_random', receiving expected output symbol ('Sym1')
[+] Transition 't_random' lead to state 's6'
[+] At state 's6'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't_random' (initiator)
[+] During transition 't_random', sending input symbol ('Sym1')
[+] During transition 't_random', fuzzing activated
[+] During transition 't_random', receiving expected output symbol ('Sym1')
[+] Transition 't_random' lead to state 's6'
[+] At state 's6'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 't_random' (initiator)
[+] During transition 't_random', sending input symbol ('Sym1')
[+] During transition 't_random', fuzzing activated
[+] During transition 't_random', receiving expected output symbol ('Sym1')
[+] Transition 't_random' lead to state 's6'
[+] At state 's6', we reached the max number of transitions (7), so we stop
>>> print(alice_actor.generateLog())
Activity log for actor 'Target' (not initiator):
[+] At state 's0'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Picking transition 'Open' (open channel)
[+] Going to execute transition 'Open'
[+] Transition 'Open' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
[+] Input symbol 'Sym1' corresponds to transition 'T1'
[+] Changing transition to 'T1' (not initiator), through callback
[+] During transition 'T1', choosing an output symbol ('Sym1')
[+] Transition 'T1' lead to state 's1'
[+] At state 's1'
[+] Randomly choosing a transition to execute or to wait for an input symbol
[+] Waiting for an input symbol to decide the transition (not initiator)
"""
from netzob.Fuzzing.Mutators.AutomataMutator import AutomataMutator, AutomataMutatorStrategy
from netzob.Fuzzing.Mutator import Mutator
if generator is None:
generator = 'xorshift'
if seed is None:
seed = Mutator.SEED_DEFAULT
if strategy is None:
strategy = AutomataMutatorStrategy.RANDOM
# Create mutator
mutator = AutomataMutator(self, generator=generator, seed=seed)
# Mutat automata
mutatedAutomata = mutator.mutate(strategy=strategy, target=target)
return mutatedAutomata
## Public properties ##
@public_api
@property
def initialState(self):
return self.__initialState
@initialState.setter # type: ignore
@typeCheck(State)
def initialState(self, initialState):
if initialState is None:
raise TypeError("AbstractionLayer cannot be None")
self.__initialState = initialState
@public_api
@property
def symbols(self):
return self.__symbols
@symbols.setter # type: ignore
@typeCheck(list)
def symbols(self, symbols):
self.__symbols = symbols | PypiClean |
/Allegra-0.63.zip/Allegra-0.63/lib/prompt.py |
"http://laurentszyster.be/blog/prompt/"
import sys, types
def compact_traceback (exc_info=None):
"""return a compact traceback tuple from sys.exc_info(), like:
(['error name',
('filename', 'lineno', 'function'),
...
], 'error message')
a compact traceback is a simple data structure made of 8-bit byte
strings, ready to be serialized."""
t, v, tb = exc_info or sys.exc_info ()
if type (t) == types.ClassType:
t = t.__name__
elif type (t) != str:
t = str (t)
tbinfo = []
assert tb # Must have a traceback ?
while tb:
tbinfo.append ((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str (tb.tb_lineno)
))
tb = tb.tb_next
del tb # just to be safe ?
return t, str (v), tbinfo
def python_eval (co, env):
"""try to eval the compiled co in the environement env
return either ('eval', result) or ('excp', traceback)"""
try:
return ('eval', eval (co, env))
except:
return ('excp', compact_traceback ())
def python_exec (co, env):
"""try to exec the compiled co in the environement env
return either ('exec', None) or ('excp', traceback)"""
try:
exec co in env
except:
return ('excp', compact_traceback ())
else:
return ('exec', None)
def python_prompt (line, env):
"""try eval first, if that fails try exec, return ('eval', result)
('exec', None) or ('excp', traceback)"""
try:
try:
co = compile (line, 'python_line', 'eval')
except SyntaxError:
co = compile (line, 'python_line', 'exec')
method, result = python_exec (co, env)
else:
method, result = python_eval (co, env)
except:
return ('excp', compact_traceback ())
else:
return (method, result)
# Synopsis
#
# >>> from allegra import prompt
# >>> env = {}
# >>> prompt.python_prompt ('1+1', env)
# ('eval', 2)
# >>> prompt.python_prompt ('a=1+1', env)
# ('exec', None)
# >>> env['a']
# 2
# >>> prompt.python_prompt ('foobar', env)
# ('excp', (
# 'exceptions.NameError',
# "name 'foobar' is not defined",
# [
# ('prompt.py', 'python_eval', '53'),
# ('python_line', '?', '0')
# ]
# ))
# >>> try:
# ... foobar
# ... except:
# ... prompt.compact_traceback ()
# ...
# (
# 'exceptions.NameError',
# "name 'foobar' is not defined",
# [('<stdin>', '?', '2')]
# ) | PypiClean |
/Chips-python-2.2.3.tar.gz/Chips-python-2.2.3/chips/chipsweb/examples.py | examples = {
"Fast Fourier Transform": """
/* fft.c */
/* Jonathan P Dawson */
/* 2013-12-23 */
#include <math.h>
#include <stdio.h>
/*globals*/
const int n = 1024;
const int m = 10;
double twiddle_step_real[m];
double twiddle_step_imaginary[m];
/*calculate twiddle factors and store them*/
void calculate_twiddles(){
unsigned stage, span;
for(stage=0; stage<m; stage++){
span = 1 << stage;
twiddle_step_real[stage] = cos(M_PI/span);
twiddle_step_imaginary[stage] = -sin(M_PI/span);
}
}
/*bit reverse*/
unsigned bit_reverse(unsigned forward){
unsigned reversed=0;
unsigned i;
for(i=0; i<m; i++){
reversed <<= 1;
reversed |= forward & 1;
forward >>= 1;
}
return reversed;
}
/*calculate fft*/
void fft(double reals[], double imaginaries[]){
int stage, subdft_size, span, i, ip, j;
double sr, si, temp_real, temp_imaginary, imaginary_twiddle, real_twiddle;
//read data into array
for(i=0; i<n; i++){
ip = bit_reverse(i);
if(i < ip){
temp_real = reals[i];
temp_imaginary = imaginaries[i];
reals[i] = reals[ip];
imaginaries[i] = imaginaries[ip];
reals[ip] = temp_real;
imaginaries[ip] = temp_imaginary;
}
}
//butterfly multiplies
for(stage=0; stage<m; stage++){
subdft_size = 2 << stage;
span = subdft_size >> 1;
//initialize trigonometric recurrence
real_twiddle=1.0;
imaginary_twiddle=0.0;
sr = twiddle_step_real[stage];
si = twiddle_step_imaginary[stage];
report(stage);
for(j=0; j<span; j++){
for(i=j; i<n; i+=subdft_size){
ip=i+span;
temp_real = reals[ip]*real_twiddle - imaginaries[ip]*imaginary_twiddle;
temp_imaginary = reals[ip]*imaginary_twiddle + imaginaries[ip]*real_twiddle;
reals[ip] = reals[i]-temp_real;
imaginaries[ip] = imaginaries[i]-temp_imaginary;
reals[i] = reals[i]+temp_real;
imaginaries[i] = imaginaries[i]+temp_imaginary;
}
//trigonometric recurrence
temp_real=real_twiddle;
real_twiddle = temp_real*sr - imaginary_twiddle*si;
imaginary_twiddle = temp_real*si + imaginary_twiddle*sr;
}
}
}
const int x_re_in = input("x_re");
const int x_im_in = input("x_im");
const int fft_x_re_out = output("fft_x_re");
const int fft_x_im_out = output("fft_x_im");
void main(){
unsigned i;
double reals[n];
double imaginaries[n];
/* pre-calculate sine and cosine*/
calculate_twiddles();
while(1){
/* read time domain signal */
for(i=0; i<n; i++){
reals[i] = fget_double(x_re_in);
imaginaries[i] = fget_double(x_im_in);
}
/* transform into frequency domain */
fft(reals, imaginaries);
/* output frequency domain signal*/
for(i=0; i<n; i++){
fput_double(reals[i], fft_x_re_out);
fput_double(imaginaries[i], fft_x_im_out);
}
}
}
""",
"Taylor Series": """
/* taylor.c */
/* Jonathan P Dawson */
/* 2013-12-23 */
/* Note that the math.h implementations of trig functions are synthesisable and
* more efficient than those shown here. */
#include <stdio.h>
/* globals */
double pi=3.14159265359;
/* approximate the cosine function using Taylor series */
double taylor(double angle){
double old, approximation, sign, power, fact;
unsigned count, i;
approximation = angle;
old = 0.0;
sign = -1.0;
count = 1;
power = 1.0;
fact = 1.0;
for(i=3; approximation!=old; i+=2){
old = approximation;
while(count<=i){
power*=angle;
fact*=count;
count++;
}
approximation += sign*(power/fact);
sign = -sign;
}
return approximation;
}
/* return the sine of angle in radians */
double sin(double angle){
return taylor(angle);
}
/* return the cosine of angle in radians */
double cos(double angle){
return sin(angle+(pi/2));
}
/* test routine */
const int x_in = input("x");
const int sin_x_out = output("sin_x");
const int cos_x_out = output("cos_x");
void main(){
double x;
while(1){
x = fget_double(x_in);
fput_double(sin(x), sin_x_out);
fput_double(cos(x), cos_x_out);
}
}
""",
"Square Root": """
/* sqrt.c */
/* Jonathan P Dawson */
/* 2013-12-23 */
#include <stdio.h>
/* approximate sqrt using newton's method*/
double sqrt(double n){
double square, x, old;
x = n;
old = 0.0;
while(old != x){
old = x;
x = (x + n/x)*0.5;
}
return x;
}
/* test sqrt function*/
const int x_in = input("x");
const int sqrt_x_out = output("sqrt_x");
void main(){
double x;
while(1){
x = fget_float(x_in);
fput_float(sqrt(x), sqrt_x_out);
}
}""",
"Edge Detection": """
/*Edge Detection*/
/*Jonathan P Dawson 2014-07-06*/
const int HEIGHT = 64;
const int WIDTH = 64;
const int SIZE = 4096;
void set_xy(int image[], int x, int y, int pixel){
if(x<0) return;
if(x>=WIDTH) return;
image[x+y*WIDTH] = pixel;
}
int get_xy(int image[], int x, int y){
if(x<0) return 0;
if(x>=WIDTH) return 0;
return image[x+y*WIDTH];
}
void main()
{
unsigned image_in = input("image_in");
unsigned image_out = output("image_out");
unsigned image[SIZE];
unsigned new_image[SIZE];
int x, y, pixel;
while(1){
/* read in image */
for(y=0; y<HEIGHT; y++){
for(x=0; x<WIDTH; x++){
set_xy(image, x, y, fgetc(image_in));
}
}
/* apply edge detect */
for(y=0; y<HEIGHT; y++){
for(x=0; x<WIDTH; x++){
pixel = get_xy(image, x, y ) << 2;
pixel -= get_xy(image, x-1, y+1);
pixel -= get_xy(image, x+1, y-1);
pixel -= get_xy(image, x-1, y-1);
pixel -= get_xy(image, x+1, y+1);
set_xy(new_image, x, y, pixel);
}
}
/* write out image */
for(y=0; y<HEIGHT; y++){
for(x=0; x<WIDTH; x++){
fputc(get_xy(new_image, x, y), image_out);
}
}
}
}
""",
"FIR Filter": """
/* Chips-2.0 FIR Filter Example */
/* Jonathan P Dawson 2014-07-05 */
#include <stdio.h>
unsigned in = input("a");
unsigned out = output("z");
unsigned kernel_in = input("k");
const int N = 10;
void main(){
unsigned i = 0;
unsigned inp = 0;
float delay[N];
float kernel[N];
float data_out;
/* read in filter kernel */
for(i=0; i<N; i++){
kernel[i] = fget_float(kernel_in);
}
/* execute filter on input stream */
while(1){
delay[inp] = fget_float(in);
data_out=0.0; i=0;
while(1){
data_out += delay[inp] * kernel[i];
if(i == N-1) break;
i++;
if(inp == N-1){
inp=0;
}else{
inp++;
}
}
fput_float(data_out, out);
}
}
""",
"Knight Rider": """
/* Knight Rider Style LEDs */
int leds = output("leds");
void main()
{
int shifter = 1;
while(1){
while(shifter < 0x80){
shifter <<= 1;
fputc(shifter, leds);
wait_clocks(10000000); /*0.1 seconds @ 100MHz*/
}
while(shifter > 1){
shifter >>= 1;
fputc(shifter, leds);
wait_clocks(10000000); /*0.1 seconds @ 100MHz*/
}
}
}
""",
"Seven Segment": """
/* Seven Segment Display Driver */
int nibble = input("nibble");
int leds = output("leds");
int digits[] = {
0x7E, 0x30, 0x6D, 0x79,
0x33, 0x5B, 0x5F, 0x70,
0x7F, 0x7B, 0x77, 0x1F,
0x4E, 0x3D, 0x4F, 0x47};
void main()
{
while(1) fputc(digits[fgetc(nibble)], leds);
}
""",
"LZSS Compress": """
/*LZSS Compression Component*/
/*Jonathan P Dawson 2014-07.10*/
const int N = 1024;
const int LOG2N = 10;
unsigned raw_in = input("raw_in");
unsigned compressed_out = output("compressed_out");
/*Create a to send data of an arbitrary bit length*/
unsigned packed, stored = 0;
void send_bits(unsigned data, unsigned bits){
unsigned i;
for(i=0; i<bits; i++){
packed >>= 1;
packed |= (data & 1) << 31;
data >>= 1;
stored++;
if(stored == 32){
fputc(packed, compressed_out);
stored = 0;
}
}
}
/*A function that reads a stream of uncompressed data,
and creates a stream of compressed data*/
void main(){
unsigned pointer, match, match_length, longest_match, longest_match_length;
unsigned buffer[N];
unsigned new_size;
while(1){
for(pointer=0; pointer<N; pointer++){
buffer[pointer] = fgetc(raw_in);
}
pointer=0;
new_size = 0;
while(pointer<N){
/*Find the longest matching string already sent*/
longest_match = 0;
longest_match_length = 0;
for(match=0; match<pointer; match++){
/*match length of 0 indicates no match*/
match_length = 0;
/*search through buffer to find a match*/
while(buffer[match+match_length] == buffer[pointer+match_length]){
match_length++;
}
/*If this is the longest match, remember it*/
if(match_length > longest_match_length){
longest_match = match;
longest_match_length = match_length;
}
}
/*send data*/
if(longest_match_length >= 3){
send_bits(0, 1);
send_bits(longest_match_length, LOG2N);
send_bits(pointer - longest_match, LOG2N);
pointer += longest_match_length;
new_size += LOG2N + LOG2N + 1;
}
else{
send_bits(1, 1);
send_bits(buffer[pointer], 8);
pointer++;
new_size += 9;
}
}
}
}
""",
"LZSS Decompress": """
/*LZSS Decompression Component*/
/* Jonathan P Dawson 2014-07-10*/
const int N = 1024;
const int LOG2N = 10;
unsigned raw_out = output("raw_out");
unsigned compressed_in = input("compressed_in");
/* A function to get data of an arbitrary bit length data */
unsigned stored = 0;
unsigned packed;
unsigned get_bits(unsigned bits){
unsigned i, value = 0;
for(i=0; i<bits; i++){
if(!stored){
stored = 32;
packed = fgetc(compressed_in);
}
value >>= 1;
value |= (packed & 1) << 31;
packed >>= 1;
stored--;
}
return value >> (32 - bits);
}
/* Decompress a stream of lzss compressed data,
and generate a stream of raw data*/
void main(){
unsigned i, pointer, distance, length, data;
unsigned buffer[N];
while(1){
/*get distance length*/
if(get_bits(1)){
data = get_bits(8);
buffer[pointer] = data;
pointer++;
fputc(data, raw_out);
}
else{
length = get_bits(LOG2N);
distance = get_bits(LOG2N);
for(i=0; i<length; i++){
data = buffer[pointer-distance];
buffer[pointer] = data;
pointer++;
fputc(data, raw_out);
}
}
}
}"""
} | PypiClean |
/Adyan_test-0.2.9-py3-none-any.whl/Adyan/Utils/H5_TK.py |
import asyncio
import time
from pyppeteer.launcher import launch
from faker import Faker
from pyppeteer import launcher
# launcher.DEFAULT_ARGS.remove("--enable-automation")
fake = Faker()
class GetTK:
def __init__(self, mongo_conn):
self.mongo_conn = mongo_conn
async def get_content(self, url):
browser = await launch(
{
'headless': True,
"args": [
"--disable-infobars",
],
"dumpio": True,
"userDataDir": "",
}
)
page = await browser.newPage()
await page.setViewport({'width': 1200, 'height': 700})
# await page.setUserAgent(
# 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36')
# try:
await page.goto(url, {'timeout': 0})
# except:
# return url
await page.evaluate(
'''() =>{ Object.defineProperties(navigator, { webdriver:{ get: () => false } }) }''')
await page.evaluate('''() =>{ window.navigator.chrome = { runtime: {}, }; }''')
await page.evaluate(
'''() =>{ Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); }''')
await page.evaluate(
'''() =>{ Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); }''')
await asyncio.sleep(2)
cookie_list = await page.cookies()
print(cookie_list)
cookies = {}
for cookie in cookie_list:
print(cookie.get("name"))
if cookie.get("name") == '_m_h5_tk' or cookie.get("name") == '_m_h5_tk_enc':
cookies[cookie.get("name")] = cookie.get("value")
cookies['time'] = int(time.time() + 3600)
self.mongo_conn.update_one({'id': '1'}, cookies)
await browser.close()
def headers(self):
while True:
user_agent = fake.chrome(
version_from=63, version_to=80, build_from=999, build_to=3500
)
if "Android" in user_agent or "CriOS" in user_agent:
continue
else:
break
return user_agent
def start(self):
url = 'https://detail.1688.com/offer/627056024629.html?clickid=7ff082f5c2214f04a00580fcad6c8d52&sessionid=6947210c01f988d9ad53f73e6ec90f24'
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.get_content(url)) | PypiClean |
/Flask-Model-Management-0.1.0.tar.gz/Flask-Model-Management-0.1.0/flask_model_management/domain.py | import warnings
from datetime import date
from datetime import datetime
from decimal import Decimal
from functools import partial
import attr
from wtforms import DecimalField
from wtforms import FloatField
from wtforms import IntegerField
from wtforms import StringField
from wtforms.fields import DateField
from wtforms.fields import DateTimeField
from wtforms.fields import RadioField
from .crud import CRUD_OPERATIONS
def true_false_or_none(value):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
return None
def field_from_column(column):
if column.type == int:
field = IntegerField
elif column.type == bool:
field = partial(
RadioField,
coerce=true_false_or_none,
choices=(("true", "True"), ("false", "False"), ("none", "None")),
)
elif column.type == float:
field = FloatField
elif column.type == Decimal:
field = DecimalField
elif column.type == datetime:
field = DateTimeField
elif column.type == date:
field = DateField
else:
# stops '' being passed
field = partial(StringField, filters=[lambda x: x or None])
return field(column.name)
@attr.s(eq=False)
class ColumnType:
"""A representation of a sqlalchemy column type"""
python_type = attr.ib()
sqlalchemy_type = attr.ib()
@classmethod
def from_sqlalchemy_col_type(cls, col_type):
return cls(col_type.python_type, str(col_type))
def __eq__(self, other):
return self.python_type is other
def __str__(self):
return self.python_type.__name__
@attr.s
class Column:
"""A representation of a sqlalchemy model column"""
key = attr.ib()
name = attr.ib()
type = attr.ib()
required = attr.ib(default=True)
default = attr.ib(default=None)
primary_key = attr.ib(default=False)
foreign_key = attr.ib(default=False)
autoincrement = attr.ib(default=False)
@property
def is_key(self):
return self.primary_key or self.foreign_key
@property
def nullable(self):
return not self.required
@classmethod
def from_sqlalchemy_column(cls, col):
column = cls(
col.key,
col.name,
ColumnType.from_sqlalchemy_col_type(col.type),
required=(not col.nullable),
default=col.default.arg if col.default is not None else None,
primary_key=col.primary_key,
foreign_key=bool(col.foreign_keys),
autoincrement=col.autoincrement,
)
return column
@attr.s
class Model:
"""A representation of a sqlalchemy database model"""
model = attr.ib()
excluded_columns = attr.ib(factory=list)
excluded_operations = attr.ib(factory=list)
view_decorators = attr.ib(factory=list)
@property
def name(self):
return str(self.model.__tablename__)
@property
def columns(self):
cols = []
for col in self.model.__table__.columns:
if col.name not in self.excluded_columns:
cols.append(Column.from_sqlalchemy_column(col))
elif col.name in self.excluded_columns and not col.nullable:
warnings.warn(
f"You have excluded the column: {col.name}. It is a "
f"non-nullable column, and therefore required. By excluding "
f"it you will not be able to 'create'"
)
return cols
@property
def operations(self):
allowed_operations = [
operation for operation in CRUD_OPERATIONS if operation not in self.excluded_operations
]
return allowed_operations
def form(self, operation, multi_dict):
from .form import get_form
return get_form(self, operation, multi_dict) | PypiClean |
/Hive_ML-1.0.1.tar.gz/Hive_ML-1.0.1/Hive_ML_scripts/Hive_ML_feature_selection.py |
import datetime
import importlib.resources
import json
import os
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
from textwrap import dedent
import numpy as np
import pandas as pd
from Hive.utils.log_utils import (
get_logger,
add_verbosity_options_to_argparser,
log_lvl_from_verbosity_args,
)
from joblib import parallel_backend
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import Hive_ML.configs
from Hive_ML.data_loader.feature_loader import load_feature_set
from Hive_ML.training.models import adab_tree, random_forest, knn, decicion_tree, lda, qda, naive, svm_kernel, \
logistic_regression, ridge, mlp
from Hive_ML.utilities.feature_utils import data_shuffling, feature_normalization, prepare_features
TIMESTAMP = "{:%Y-%m-%d_%H-%M-%S}".format(datetime.datetime.now())
MODELS = {
"rf": random_forest,
"adab": adab_tree,
"lda": lda,
"qda": qda,
"logistic_regression": logistic_regression,
"knn": knn,
"naive": naive,
"decision_tree": decicion_tree,
"svm": svm_kernel,
"ridge": ridge,
"mlp": mlp
}
DESC = dedent(
"""
Script to run Sequential 5-CV Forward Feature Selection on a Feature Set. The SFFS summary (in JSON format) is saved
in the experiment folder, defined by the ``experiment_name`` argument.
""" # noqa: E501
)
EPILOG = dedent(
"""
Example call:
::
{filename} -feature-file /path/to/feature_table.csv --config-file config_file.json --experiment-name Radiomics
""".format( # noqa: E501
filename=Path(__file__).name
)
)
import warnings
warnings.filterwarnings("ignore")
def get_arg_parser():
pars = ArgumentParser(description=DESC, epilog=EPILOG, formatter_class=RawTextHelpFormatter)
pars.add_argument(
"--feature-file",
type=str,
required=True,
help="Input Dataset folder",
)
pars.add_argument(
"--config-file",
type=str,
required=True,
help="Configuration JSON file with experiment and dataset parameters.",
)
pars.add_argument(
"--experiment-name",
type=str,
required=True,
help="Experiment name used to save the SFFS summary.",
)
add_verbosity_options_to_argparser(pars)
return pars
def main():
parser = get_arg_parser()
arguments = vars(parser.parse_args())
logger = get_logger(
name=Path(__file__).name,
level=log_lvl_from_verbosity_args(arguments),
)
try:
with open(arguments["config_file"]) as json_file:
config_dict = json.load(json_file)
except FileNotFoundError:
with importlib.resources.path(Hive_ML.configs, arguments["config_file"]) as json_path:
with open(json_path) as json_file:
config_dict = json.load(json_file)
selected_features = {}
models = config_dict["models"]
aggregation = "Flat"
stats_4D = False
flatten_features = True
if "feature_aggregator" in config_dict:
aggregation = config_dict["feature_aggregator"]
if aggregation != "Flat":
stats_4D = True
flatten_features = False
elif aggregation.endswith("Norm"):
stats_4D = False
flatten_features = False
feature_set, subject_ids, subject_labels, feature_names, mean_features, sum_features, std_features, mean_delta_features = load_feature_set(
arguments["feature_file"],
get_4D_stats=stats_4D,
flatten_features=flatten_features)
if aggregation == "Flat":
features = feature_set
elif aggregation == "Mean":
features = mean_features
elif aggregation == "SD":
features = std_features
elif aggregation == "Sum":
features = sum_features
elif aggregation == "Delta":
features = mean_delta_features
label_set = np.array(subject_labels)
if aggregation.endswith("Norm"):
features = feature_set
feature_set_3D = np.array(features).squeeze(-2)
train_feature_set, train_label_set, test_feature_set, test_label_set = data_shuffling(
np.swapaxes(feature_set_3D, 0, 1), label_set, config_dict["random_seed"])
else:
n_features = features.shape[1]
n_subjects = features.shape[0]
filtered_feature_set = []
filtered_feature_names = []
features = np.nan_to_num(features)
for feature in range(n_features):
exclude = False
for feature_val in np.unique(features[:, feature]):
if (np.count_nonzero(features[:, feature] == feature_val) / n_subjects) > 0.5:
exclude = True
print("Excluding:", feature_names[feature])
break
if not exclude:
filtered_feature_set.append(list(features[:, feature]))
filtered_feature_names.append(feature_names[feature])
feature_set = np.vstack(filtered_feature_set).T
feature_names = filtered_feature_names
print("# Features: {}".format(feature_set.shape[1]))
print("# Labels: {}".format(label_set.shape))
train_feature_set, train_label_set, test_feature_set, test_label_set = data_shuffling(feature_set, label_set,
config_dict[
"random_seed"])
experiment_name = arguments["experiment_name"]
experiment_dir = Path(os.environ["ROOT_FOLDER"]).joinpath(
experiment_name, config_dict["feature_selection"],
aggregation,
"FS")
experiment_dir.mkdir(parents=True, exist_ok=True)
n_iterations = 0
for classifier in models:
if classifier in ["rf", "adab"]:
n_iterations += 1
else:
n_iterations += config_dict["n_folds"]
pbar = tqdm(total=n_iterations)
with parallel_backend('loky', n_jobs=-1):
for classifier in models:
if classifier in ["rf", "adab"]:
pbar.update(1)
continue
selected_features[classifier] = {}
kf = StratifiedKFold(n_splits=config_dict["n_folds"], random_state=config_dict["random_seed"], shuffle=True)
for fold, (train_index, _) in enumerate(kf.split(train_feature_set, train_label_set)):
pbar.set_description(f"{classifier}, fold {fold} FS")
fs_summary = Path(experiment_dir).joinpath(f"FS_summary_{classifier}_fold_{fold}.json")
if fs_summary.is_file():
with open(fs_summary, "r") as f:
selected_features[classifier][fold] = json.load(f)
else:
x_train, y_train, _, _ = prepare_features(train_feature_set, train_label_set, train_index,
aggregation)
n_features = config_dict["n_features"]
if n_features > x_train.shape[1]:
n_features = x_train.shape[1]
x_train, _, _ = feature_normalization(x_train)
clf = MODELS[classifier](**models[classifier], random_state=config_dict["random_seed"])
sffs_model = SFS(clf,
k_features=n_features,
forward=True,
floating=True,
scoring='roc_auc',
verbose=0,
n_jobs=-1,
cv=5)
df_features_x = []
for x_train_row in x_train:
df_row = {}
for idx, feature_name in enumerate(feature_names):
df_row[feature_name] = x_train_row[idx]
df_features_x.append(df_row)
df_features_x = pd.DataFrame.from_records(df_features_x)
sffs = sffs_model.fit(df_features_x, y_train)
sffs_features = sffs.subsets_
for key in sffs_features:
sffs_features[key]['cv_scores'] = sffs_features[key]['cv_scores'].tolist()
selected_features[classifier][fold] = sffs_features
with open(fs_summary, "w") as f:
json.dump(sffs_features, f)
pbar.update(1)
with open(str(Path(experiment_dir).joinpath(f"{experiment_name}_FS_summary.json")), "w") as f:
json.dump(selected_features, f)
if __name__ == "__main__":
main() | PypiClean |
/Flask-DotEnv-0.1.2.tar.gz/Flask-DotEnv-0.1.2/README.rst | Flask-DotEnv
------------
.. image:: https://travis-ci.org/grauwoelfchen/flask-dotenv.svg?branch=master
:target: https://travis-ci.org/grauwoelfchen/flask-dotenv
.. image:: https://img.shields.io/pypi/v/Flask-Dotenv.svg
:target: https://pypi.python.org/pypi/Flask-Dotenv/
| Adds support for the ``.env`` file to flask style config class for applications.
| Version ``0.0.3`` and above support setting config variables without using ``os.environ``.
|
``Flask-DotEnv`` will directly set (add, update, map as alias and eval as
literal) variables from ``.env`` file, and cast them to Python native types
as appropriate.
(optional)
* ``alias()`` makes alias vars
* ``eval()`` evaluate var to literal (via ``ast``)
Repositories
------------
| My main repository is on GitLab (.com).
| But pull requests on GitHub are also welcome. :-D
* https://gitlab.com/grauwoelfchen/flask-dotenv.git (main)
* https://github.com/grauwoelfchen/flask-dotenv.git
Install
-------
::
$ pip install Flask-DotEnv
Usage
-----
**********
DotEnv
**********
::
from flask import Flask
from flask_dotenv import DotEnv
app = Flask(__name__)
env = DotEnv(app)
As a factory pattern.
::
env = DotEnv()
env.init_app(app)
| This ``env`` module may be useful in your Config class.
| e.g.
::
class Config:
SECRET_KEY = ":'("
...
@classmethod
def init_app(self, app)
env = DotEnv()
env.init_app(app)
Then in your app:
::
from config import Config
app = Flask(__name__)
app.config.from_object(config[config_name])
See also:
`flask.Config.from_object <http://flask.pocoo.org/docs/1.0/api/#flask.Config.from_object>`_ (Flask's API documentation)
**********
Arguments
**********
You can pass the ``.env`` file path as a second argument of ``init_app()``.
::
env.init_app(app, env_file="/path/to/.env", verbose_mode=True)
| The second argument (``env_file``) is optional, and the default is ``os.path.join(os.getcwd(), '.env')``.
| The third argument (``verbose_mode``) is also optional, and defaults to ``False``.
| If ``verbose_mode`` is ``True``, then server outputs nice log message showing which vars will be set,
| like this:
::
* Overwriting an existing config var: SECRET_KEY
* Setting an entirely new config var: DEVELOPMENT_DATABASE_URL
* Casting a denoted var as a literal: MAIL_PORT => <class 'int'>
* Making a specified var as an alias: DEVELOPMENT_DATABASE_URL -> SQLALCHEMY_DATABASE_URI
...
**********
Alias
**********
The ``alias()`` method takes a dict argument. Each key is the existing config var,
while each value is the new alias.
::
env.alias(maps={
'TEST_DATABASE_URL': 'SQLALCHEMY_DATABASE_URI',
'TEST_HOST': 'HOST'
})
Here's an example of its use:
::
class Config:
SECRET_KEY = ":'("
...
@classmethod
def init_app(self, app)
env = DotEnv()
env.init_app(app)
# The following will store in `SQLALCHEMY_DATABASE_URI` the value
# in, for example, `DEVELOPMENT_DATABASE_URL`
prefix = self.__name__.replace('Config', '').upper()
env.alias(maps={
prefix + '_DATABASE_URL': 'SQLALCHEMY_DATABASE_URI'
})
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = None
config = {
'development': DevelopmentConfig
}
**********
Eval
**********
``eval()`` also takes a dict argument. These keys are also the existing config
var, while the values are the type they should evaluate to. If the type is
something else, the config var is skipped with a log message shown.
::
env.eval(keys={
'MAIL_PORT': int,
'SETTINGS': dict
})
And here's an example of its use:
::
class Config:
SECRET_KEY = ":'("
...
@classmethod
def init_app(self, app)
env = DotEnv()
env.init_app(app)
# `MAIL_PORT` will be set the the integer verson of the value found there
# using `ast.literal_eval`.
env.eval(keys={
MAIL_PORT: int
})
.env File
-----------
The following lines are all valid.
::
SECRET_KEY="123"
USERNAME=john
DATABASE_URL='postgresql://user:password@localhost/production?sslmode=require'
FEATURES={'DotEnv': True}
# comment and blank lines are also supported
export ENV="production"
export env="staging"
Development
-----------
Run the unit tests with:
::
$ python setup.py test
Link
----
Inspired by:
* `python-dotenv`_
* `django-dotenv`_
Other packages that also set configuration variables:
* `Flask-EnvConfig`_
* `Flask-UserEnvConfig`_
License
-------
BSD 2-Clause License
.. _python-dotenv: https://github.com/theskumar/python-dotenv
.. _django-dotenv: https://github.com/jpadilla/django-dotenv
.. _Flask-EnvConfig: https://bitbucket.org/romabysen/flask-envconfig
.. _Flask-UserEnvConfig: https://github.com/caustin/flask-userenvconfig
| PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/latex.py | import os, datetime, filecmp, shutil, math
import pandas as pd
from io import StringIO
from .data import replace_missing_csv
def date_time_move_latex(tablename,filepath, folder_name='Tables'):
r"""
Takes a LaTeX tex and PDF after the PDF's been created by pdflatex and moves it into a table
folder with the date, checking if the table was just previously created. If it's the same as
before, it just deletes the file.
Required arguments:
tablename: operating system name of the table, without extensions
filepath: full filepath of table, without table name. put r before quotes as follows: r'C:\Users\Folder'
"""
def remove_if_exists(filepath):
try:
os.remove(filepath)
except FileNotFoundError:
print('Cannot delete: did not find {}'.format(filepath))
def move_if_exists(inpath, outpath):
try:
shutil.move(inpath, outpath)
except FileNotFoundError:
print('Cannot move: did not find {}'.format(inpath))
def remove_all_if_exist(filepaths):
[remove_if_exists(filepath) for filepath in filepaths]
def move_all_if_exists(inpaths, outfolder):
[move_if_exists(inpath, outfolder) for inpath in inpaths]
def exit_sequence():
inpath_aux = os.path.join(filepath, str(tablename) + '.aux')
inpath_log = os.path.join(filepath, str(tablename) + '.log')
remove_all_if_exist([inpath_aux, inpath_log])
return
os.chdir(filepath) #sets working directory to current directory of table
table_pdf = tablename + ".pdf"
table_tex = tablename + ".tex"
table_xlsx = tablename + ".xlsx"
inpath_pdf = os.path.join(filepath,table_pdf)
inpath_tex = os.path.join(filepath,table_tex)
inpath_xlsx = os.path.join(filepath,table_xlsx)
all_inpaths = [inpath_pdf, inpath_tex, inpath_xlsx]
tables_path = os.path.join(filepath, folder_name) #set table directory
if not os.path.exists(tables_path): #create a general table directory if it doesn't exist
os.makedirs(tables_path)
current_date = datetime.datetime.today().timetuple()
current_time = datetime.datetime.today().timetuple()
format_time = [str(current_time[0]),str(current_time[1]),str(current_time[2])]
for i in range(3):
if current_time[i] < 10:
format_time[i] = "0" + str(current_time[i])
datetime_str = "{}-{}-{}_".format(format_time[0],format_time[1],format_time[2])
count = 0 #set up count variable
while True: #continuous loop
count += 1
str_count = "Num" + str(count)
name_str = datetime_str + str_count
folder_path = os.path.join(tables_path,name_str)
outpath_tex = os.path.join(folder_path, table_tex)
if os.path.exists(folder_path): #if the folder already exists
if os.path.exists(outpath_tex): #if there is already a tex file with the same name
if filecmp.cmp(outpath_tex,inpath_tex) == True: #if this is the same exact table
exit_sequence()
return remove_all_if_exist(all_inpaths)
else: #if there is a tex file with the same name but it's not the same table
continue #go to next iteration of loop (change output number)
else:
move_all_if_exists(all_inpaths, folder_path)
return exit_sequence()
else: #if the folder doesn't exist
os.mkdir(folder_path) #create the folder
move_all_if_exists(all_inpaths, folder_path)
return exit_sequence()
def csv_to_raw_latex(infile, csvstring=False, missing_rep=" - ", formatstr='{:.3f}', skipfix=None):
'''
Takes a CSV text file and converts it to a LaTeX formatted list, with each line of the LaTeX
file as an item in the list.
Required options:
infile: Full file path of CSV (include r before quotes)
Optional options:
csvstring: True to pass a CSV string to infile rather than load from file
missing_rep: Representation for missing numbers, default " - "
formatstr: Python string for number formatting, for example '{:.3f}' with quotes
skipfix: String or list of strings of fixes to skip, options are ['&','%','_']
'''
latex_list = []
if not csvstring:
f = open(infile,'r')
else:
from io import StringIO
f = StringIO(infile)
if skipfix:
if isinstance(skipfix, str):
skipfix = [skipfix]
assert isinstance(skipfix, list)
csv_list = f.readlines()
miss_csv_list = replace_missing_csv(csv_list,missing_rep)
latex_list = []
for i, line in enumerate(miss_csv_list):
line_string = ''
for j, item in enumerate(line):
if j is not 0: #don't put an & before the first item in line
line_string += ' & '
#LaTeX character fixes
if skipfix:
if '&' not in skipfix:
item = item.replace('&', '\&')
if '%' not in skipfix:
item = item.replace('%', '\%')
if '_' not in skipfix:
item = item.replace('_', '\_')
else: #make all replacements
item = item.replace('&','\&')
item = item.replace('%','\%')
item = item.replace('_','\_')
if item.find('.') is not -1: #if we are dealing with a number with decimals
if formatstr:
try:
item = formatstr.format(float(item))
except:
pass
item = item.replace('\n','')
line_string += item
line_string += " \\\ \n"
if i is 0: #on the first line, remove quotes from names
line_string = line_string.replace('''"''','') #strip out quotes
latex_list.append(line_string)
if not csvstring:
f.close()
return latex_list
def df_to_pdf_and_move(dflist, outfolder, outname='table', tabular_string='', string_format='',
above_text='', below_text='',
font_size=12, caption='', missing_rep=' - ', landscape=False,
panel_names=None, colname_flags=None,
as_document=True, outmethod='pandas'):
'''
Takes a dataframe or list of dataframes as input and outputs to a LaTeX formatted table with multiple panels,
creates a PDF, and moves the LaTeX file and PDF to a dated folder.
Required options:
dflist: Dataframe or list of dataframes.
outfolder: Output folder for LaTeX file and PDF. Inside of this folder, a folder called Tables will be created,
inside of which the two files will be put inside another folder with the date.
Optional options:
outname: Name of output table, default is table
tabular_string: Can take any string that would normally used in tabular (i.e. rrr for three columns right aligned
as well as L{<width>), C{<width>}, and R{<width>} (i.e. L{3cm}) for left, center, and right aligned
fixed width. Additionally . aligns on the decimal. Default is first column left aligned, rest
center aligned.
string_format: String or list of format of numbers in the table. Please see Python number formats. Pass a blank
string to leave formatting untouched (the default).
above_text: String of text to display above table
below_text: String of text to display below table
font_size: Font size, default 12
caption: Title of table
missing_rep: Representation for missing numbers, default " - "
landscape: Boolean. True to switch table to landscape output instead of portrait.
panel_names: Python list of names of each panel, to go below column names, e.g. ['Table','Other Table']
colname_flags: Python list of yes or no flags for whether to display column names for each panel. Default is to
display column names only for the first panel, as usually the panels have the same columns.
The default input for a three panel table would be ['y','n','n']
as_document: Boolean. True to output latex wrappers for table to be a standalone document. False to write
only table wrappers so that table can be included in another document
outmethod: String, 'pandas' or 'csv'. If 'pandas', uses pandas' built in df.to_latex() to build latex. If
'csv', uses df.to_csv() and then dero.raw_csv_to_latex(). The latter case is useful when the table
itself contains latex expressions.
'''
if isinstance(dflist, pd.DataFrame):
dflist = [dflist]
assert isinstance(dflist, list)
if isinstance(string_format, str):
string_format = [string_format] * len(dflist)
assert isinstance(string_format, list)
def is_number(s):
try:
float(s)
return True
except (ValueError, TypeError):
return False
def latex_replacements(string):
return string.replace('&','\&').replace('%','\%').replace('_','\_')
def latex_filename_replacements(string):
return string.replace('%', 'pct').replace('/','_').replace('\\','_')
def all_latex_replacements(*tuple):
return [latex_replacements(item) for item in tuple]
# Latex string replacements will be made in the data below. Here make adjustments to titles, above/below text, etc.
caption, above_text, below_text = all_latex_replacements(caption, above_text, below_text)
outname = latex_filename_replacements(outname)
if panel_names is not None:
panel_names = all_latex_replacements(*panel_names)
outname_tex = str(outname) + ".tex"
outpath = os.path.join(outfolder, outname_tex)
latex_string_list = [] #set container for final LaTeX table contents
if (colname_flags is None) or (len(colname_flags) is not len(dflist)): #if the user didn't specify whether to use colnames, or they specified an incorrect number of flags
colname_flags = ['y'] #set first colnames to show
for i in range(len(dflist) - 1):
colname_flags.append('n') #set rest of colnames not to show
panel_order = -1
for i, df in enumerate(dflist): #for each csv in the list
if string_format[i]:
df = dflist[i].applymap(lambda x: string_format[i].format(float(x)) if is_number(x) else x)
df = df.fillna(missing_rep)
if outmethod.lower() == 'pandas':
latex_list = [line for line in df.to_latex().split('\n') if not line.startswith('\\')]
elif outmethod.lower() == 'csv':
latex_list = [line for line in csv_to_raw_latex(df.to_csv(), missing_rep=missing_rep,
csvstring=True, skipfix='_',
formatstr=string_format[i]) if not line.startswith('\\')]
number_of_columns = 1 + latex_list[0].count(' & ') #number of columns is 1 + number of seperators
if colname_flags[i].lower() in ('n','no'): #if the flag for colnames is no for this panel
latex_list = latex_list[1:] #chop off colnames
if panel_names is not None and panel_names[i]:
panel_order += 1 #In combination with next line, sets panel to A, etc.
panel_letter = chr(panel_order + ord('A')) #sets first panel to A, second to B, and so on
#LaTeX formatting code
panel_latex_list = [
r'\midrule \\[-11pt]',
'\n',
r'\multicolumn{' + str(number_of_columns) + '}{c}{Panel '+ panel_letter + ': ' + panel_names[i] + '} \\\ \\\[-11pt]',
'\n',
r'\midrule',
'\n'
]
else: #if there is no panel name, just put in a midrule
panel_latex_list = [
r'\midrule',
'\n'
]
latex_list = panel_latex_list + latex_list
latex_string = "\n".join(latex_list) #convert list to string
latex_string_list.append(latex_string) #add this csv's LaTeX table string to the full list of LaTeX table strings
if tabular_string == "": #set default tabular format
tabular_string = 'l' + 'c' * (number_of_columns - 1) #first column left aligned, rest centered
#Set list of lines to be written to output file at beginning
document_header_list = [r'\documentclass[' + str(font_size) + 'pt]{article}',r'\usepackage{amsmath}',r'\usepackage{pdflscape}',r'\usepackage[margin=0.3in]{geometry}',
r'\usepackage{dcolumn}',r'\usepackage{booktabs}',r'\usepackage{array}', r'\usepackage{threeparttable}',
r'\newcolumntype{L}[1]{>{\raggedright\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}',
r'\newcolumntype{C}[1]{>{\centering\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}',
r'\newcolumntype{R}[1]{>{\raggedleft\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}',
r'\newcolumntype{.}{D{.}{.}{-1}}',r'\title{\LaTeX}',r'\date{}',r'\author{Nick DeRobertis}',
r'\begin{document}']
table_header_list = [r'\begin{table}',r'\centering',r'\begin{threeparttable}',
above_text,r'\caption{' + caption + '}',r'\begin{tabular}{' + tabular_string + '}',
r'\toprule']
#Set list of lines to be written to output file at end
table_footer_list = [r'\bottomrule',r'\end{tabular}',r'\begin{tablenotes}[para,flushleft]',r'\item ' + below_text,r'\end{tablenotes}',
r'\end{threeparttable}',r'\end{table}']
document_footer_list = [r'\end{document}']
if landscape:
table_header_list.insert(0, r'\begin{landscape}')
table_footer_list.append(r'\end{landscape}')
if as_document:
latex_header_list = document_header_list + table_header_list
latex_footer_list = table_footer_list + document_footer_list
else:
latex_header_list = table_header_list
latex_footer_list = table_footer_list
#Actually write to file
with open(outpath,'w') as f:
for line in latex_header_list: #write each line in the header list, with carriage returns in between
f.write(line)
f.write("\n")
for latex_string in latex_string_list: #write each csv table to file in LaTeX format
f.write(latex_string)
for line in latex_footer_list: #write each line in the footer list, with carriage returns in between
f.write(line)
f.write("\n")
f.close()
os.chdir(outfolder) #changes working filepath
# Only create pdf if we are creating a standalone document
if as_document:
os.system('pdflatex ' + '"' + outname_tex + '"') #create PDF
date_time_move_latex(outname,outfolder) #move table into appropriate date/number folder
def latex_equations_to_pdf(latex_list, directory, name='Equations', below_text=None,
math_size=18, text_size=14, title=None, para_space='1em',
inline=False, as_document=True):
script_size = math.ceil(math_size * (2/3))
scriptscript_size = math.ceil(math_size * .5)
assert text_size in (8, 9, 10, 11, 12, 14, 17, 20) #latex allowed font sizes
if inline:
surround_char_beg = '$'
surround_char_end = '$'
else:
surround_char_beg = r'\begin{dmath}'
surround_char_end = r'\end{dmath}'
# Header and footer are needed to create a standalone document using the equations.
# If as_document=False, header and footer will not be used.
headers = [r'\documentclass[{} pt]{{extarticle}}'.format(text_size),
#First size is text size, second is math size, third is script size,
#fourth is scriptscript size
r'\DeclareMathSizes{{{0}}}{{{1}}}{{{2}}}{{{3}}}'.format(
text_size, math_size, script_size, scriptscript_size),
r'\usepackage{amsmath}',
r'\usepackage{breqn}',
r'\usepackage[margin=0.3in]{geometry}',
r'\author{Nick DeRobertis}' ,r'\begin{document}', r'\setlength{{\parskip}}{{{}}}'.format(para_space)]
footers = [r'\end{document}']
name_tex = name + '.tex'
file_path = os.path.join(directory, name_tex)
# Actually write to file
with open(file_path, 'w') as f:
if as_document:
f.write('\n'.join(headers) + '\n')
[f.write(surround_char_beg + '{}'.format(line) + surround_char_end + '\n\n') for line in latex_list]
if below_text:
f.write('\n' + below_text + '\n')
if as_document:
f.write('\n'.join(footers))
os.chdir(directory)
# Only create pdf if we are creating a standalone document
if as_document:
os.system('pdflatex ' + '"' + name_tex + '"') #create pdf
date_time_move_latex(name, directory, 'Equations') | PypiClean |
/LambdaTool-0.9.5.tar.gz/LambdaTool-0.9.5/lambdatool/template/service/main.py | import os
import json
from flask import request
from zevon import FlaskLambda
'''
The FlaskLambda object that is created is the entry point for the lambda. The
LambdaTool deployer expects this to be called 'lambda_handler'
'''
lambda_handler = FlaskLambda(__name__)
@lambda_handler.route('/', methods=['GET'])
def OK():
'''
Redirect to the README doc
Args:
None
Returns:
tuple of (body, status code, content type) that API Gateway understands
'''
return (
'OK',
200,
{'Content-Type': 'text/plain'}
)
@lambda_handler.route('/doc', methods=['GET'])
def document():
'''
Redirect to the README doc
Args:
None
Returns:
tuple of (body, status code, content type) that API Gateway understands
'''
return (
slash_html,
200,
{'Content-Type': 'text/html'}
)
@lambda_handler.route('/answer', methods=['GET'])
def get_answer():
'''
Example of getting someething from function.properties
Args:
None
Returns:
tuple of (body, status code, content type) that API Gateway understands
'''
answer = os.environ.get('ANSWER', '0')
args = json.dumps(request.args.copy(), indent=2)
msg = f'answer = {answer} args = {args}'
return (
msg,
200,
{'Content-Type': 'text/plain'}
)
@lambda_handler.route('/example', methods=['GET', 'POST'])
def food():
'''
A contrived example function that will return some meta-data about the
invocation.
Args:
None
Returns:
tuple of (body, status code, content type) that API Gateway understands
'''
data = {
'form': request.form.copy(),
'args': request.args.copy(),
'json': request.json
}
return (
json.dumps(data, indent=4, sort_keys=True),
200,
{'Content-Type': 'application/json'}
)
slash_html = '''<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Lambtool Readme</title>
<meta http-equiv="refresh" content="0;URL='https://github.com/muckamuck/lambda-tool/blob/master/README.md'" />
</head>
<body></body>
</html>
'''
if __name__ == '__main__':
lambda_handler.run(debug=True) | PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/geometry/fuselage.py | import numpy as np
from aerosandbox import AeroSandboxObject
from aerosandbox.geometry.common import *
from typing import List, Dict, Any, Union, Tuple, Optional, Callable
import copy
class Fuselage(AeroSandboxObject):
"""
Definition for a Fuselage or other slender body (pod, fuel tank, etc.).
Anatomy of a Fuselage:
A fuselage consists chiefly of a collection of cross-sections, or "xsecs". A cross-section is a 2D "slice" of
a fuselage. These can be accessed with `Fuselage.xsecs`, which gives a list of xsecs in the Fuselage. Each
xsec is a FuselageXSec object, a class that is defined separately.
You may also see references to fuselage "sections", which are different from cross-sections (xsecs)! Sections
are the portions of the fuselage that are in between xsecs. In other words, a fuselage with N cross-sections
(xsecs, FuselageXSec objects) will always have N-1 sections. Sections are never explicitly defined,
since you can get all needed information by lofting from the adjacent cross-sections. For example,
section 0 (the first one) is a loft between cross-sections 0 and 1.
Fuselages are lofted linearly between cross-sections.
"""
def __init__(self,
name: Optional[str] = "Untitled",
xsecs: List['FuselageXSec'] = None,
color: Optional[Union[str, Tuple[float]]] = None,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
**kwargs, # Only to allow for capturing of deprecated arguments, don't use this.
):
"""
Defines a new fuselage object.
Args:
name: Name of the fuselage [optional]. It can help when debugging to give each fuselage a sensible name.
xsecs: A list of fuselage cross-sections ("xsecs") in the form of FuselageXSec objects.
color: Determines what color to use for this component when drawing the airplane. Optional,
and for visualization purposes only. If left as None, a default color will be chosen at the time of
drawing (usually, black). Can be any color format recognized by MatPlotLib, namely:
* A RGB or RGBA tuple of floats in the interval [0, 1], e.g., (0.1, 0.2, 0.5, 0.3)
* Case-insensitive hex RGB or RGBA string, e.g., '#0f0f0f80'
* String representation of float value in closed interval [0, 1] for grayscale values, e.g.,
'0.8' for light gray
* Single character shorthand notation for basic colors, e.g., 'k' -> black, 'r' -> red
See also: https://matplotlib.org/stable/tutorials/colors/colors.html
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if xsecs is None:
xsecs: List['FuselageXSec'] = []
if analysis_specific_options is None:
analysis_specific_options = {}
### Initialize
self.name = name
self.xsecs = xsecs
self.color = color
self.analysis_specific_options = analysis_specific_options
### Handle deprecated parameters
if 'symmetric' in locals():
raise DeprecationWarning(
"The `symmetric` argument for Fuselage objects is deprecated. Make your fuselages separate instead!")
if 'xyz_le' in locals():
import warnings
warnings.warn(
"The `xyz_le` input for Fuselage is pending deprecation and will be removed in a future version. Use Fuselage().translate(xyz) instead.",
stacklevel=2
)
self.xsecs = [
xsec.translate(xyz_le)
for xsec in self.xsecs
]
def __repr__(self) -> str:
n_xsecs = len(self.xsecs)
return f"Fuselage '{self.name}' ({len(self.xsecs)} {'xsec' if n_xsecs == 1 else 'xsecs'})"
def add_loft(self,
kind: str,
to_xsec: 'FuselageXSec',
from_xsec: 'FuselageXSec' = None,
n_points: int = 5,
spacing: Callable[[float, float, int], np.ndarray] = np.cosspace,
) -> "Fuselage":
raise NotImplementedError # Function under construction!
### Set defaults
if from_xsec is None:
if len(self.xsecs) == 0:
from_xsec = FuselageXSec(
xyz_c=[0, 0, 0],
width=0,
height=0,
shape=2
)
else:
from_xsec = self.xsecs[-1]
### Define a nondimensional coordinate
t = spacing(0, 1, n_points)
if kind == "linear":
new_xsecs = [
FuselageXSec(
xyz_c=from_xsec.xyz_c * (1 - ti) + to_xsec.xyz_c * ti,
width=from_xsec.width * (1 - ti) + to_xsec.width * ti,
height=from_xsec.height * (1 - ti) + to_xsec.height * ti,
shape=from_xsec.shape * (1 - ti) + to_xsec.shape * ti,
analysis_specific_options=from_xsec.analysis_specific_options,
)
for ti in t
]
elif kind == "ellipsoid-nose":
new_xsecs = [
FuselageXSec(
xyz_c=from_xsec.xyz_c * (1 - ti) + to_xsec.xyz_c * ti,
width=from_xsec.width * (1 - ti) + to_xsec.width * ti,
height=from_xsec.height * (1 - ti) + to_xsec.height * ti,
shape=from_xsec.shape * (1 - ti) + to_xsec.shape * ti,
analysis_specific_options=from_xsec.analysis_specific_options,
)
for ti in t
]
self.xsecs.extend(new_xsecs)
def translate(self,
xyz: Union[np.ndarray, List[float]]
) -> "Fuselage":
"""
Translates the entire Fuselage by a certain amount.
Args:
xyz:
Returns: self
"""
new_fuse = copy.copy(self)
new_fuse.xsecs = [
xsec.translate(xyz)
for xsec in new_fuse.xsecs
]
return new_fuse
def area_wetted(self) -> float:
"""
Returns the wetted area of the fuselage.
:return:
"""
area = 0
perimeters = [xsec.xsec_perimeter() for xsec in self.xsecs]
for i in range(len(self.xsecs) - 1):
x_separation = self.xsecs[i + 1].xyz_c[0] - self.xsecs[i].xyz_c[0]
area += (perimeters[i] + perimeters[i + 1]) / 2 * x_separation
return area
def area_projected(self,
type: str = "XY",
) -> float:
"""
Returns the area of the fuselage as projected onto one of the principal planes.
Args:
type: A string, which determines which principal plane to use for projection. One of:
* "XY", in which case the projected area is onto the XY plane (i.e., top-down)
* "XZ", in which case the projected area is onto the XZ plane (i.e., side-view)
Returns: The projected area.
"""
area = 0
for i in range(len(self.xsecs) - 1):
x_separation = self.xsecs[i + 1].xyz_c[0] - self.xsecs[i].xyz_c[0]
if type == "XY":
width_a = self.xsecs[i].width
width_b = self.xsecs[i + 1].width
area += (width_a + width_b) / 2 * x_separation
elif type == "XZ":
height_a = self.xsecs[i].height
height_b = self.xsecs[i + 1].height
area += (height_a + height_b) / 2 * x_separation
else:
raise ValueError("Bad value of `type`!")
return area
def area_base(self) -> float:
"""
Returns the area of the base (i.e. "trailing edge") of the fuselage. Useful for certain types of drag
calculation.
Returns:
"""
return self.xsecs[-1].xsec_area()
def fineness_ratio(
self,
assumed_shape="cylinder",
) -> float:
"""
Approximates the fineness ratio using the volume and length. The fineness ratio of a fuselage is defined as:
FR = length / max_diameter
Args:
assumed_shape: A string, which determines the assumed shape of the fuselage for the approximation. One of:
* "cylinder", in which case the fuselage is assumed to have a cylindrical shape.
* "sears-haack", in which case the fuselage is assumed to have Sears-Haack fuselage shape.
Returns: An approximate value of the fuselage's fineness ratio.
"""
if assumed_shape == "cylinder":
return np.sqrt(
self.length() ** 3 / self.volume() * np.pi / 4
)
elif assumed_shape == "sears-haack":
length = self.length()
r_max = np.sqrt(
self.volume() / length / (3 * np.pi ** 2 / 16)
)
return length / r_max
def length(self) -> float:
"""
Returns the total front-to-back length of the fuselage. Measured as the difference between the x-coordinates
of the leading and trailing cross-sections.
:return:
"""
return np.fabs(self.xsecs[-1].xyz_c[0] - self.xsecs[0].xyz_c[0])
def volume(self,
_sectional: bool = False
) -> Union[float, List[float]]:
"""
Computes the volume of the Fuselage.
Args:
_sectional: A boolean. If False, returns the total volume. If True, returns a list of volumes for each of
the `n-1` lofted sections (between the `n` fuselage cross-sections in fuselage.xsec).
Returns:
The computed volume.
"""
xsec_areas = [
xsec.xsec_area()
for xsec in self.xsecs
]
separations = [
xsec_b.xyz_c[0] - xsec_a.xyz_c[0]
for xsec_a, xsec_b in zip(
self.xsecs[:-1],
self.xsecs[1:]
)
]
sectional_volumes = [
separation / 3 * (area_a + area_b + (area_a * area_b + 1e-100) ** 0.5)
for area_a, area_b, separation in zip(
xsec_areas[1:],
xsec_areas[:-1],
separations
)
]
volume = sum(sectional_volumes)
if _sectional:
return sectional_volumes
else:
return volume
def x_centroid_projected(self,
type: str = "XY",
) -> float:
"""
Returns the x_g coordinate of the centroid of the planform area.
Args:
type: A string, which determines which principal plane to use for projection. One of:
* "XY", in which case the projected area is onto the XY plane (i.e., top-down)
* "XZ", in which case the projected area is onto the XZ plane (i.e., side-view)
Returns: The x_g coordinate of the centroid.
"""
total_x_area_product = 0
total_area = 0
for xsec_a, xsec_b in zip(self.xsecs, self.xsecs[1:]):
x_a = xsec_a.xyz_c[0]
x_b = xsec_b.xyz_c[0]
if type == "XY":
r_a = xsec_a.width / 2
r_b = xsec_b.width / 2
elif type == "XZ":
r_a = xsec_a.height / 2
r_b = xsec_b.height / 2
else:
raise ValueError("Bad value of `type`!")
dx = x_b - x_a
x_c = x_a + (r_a + 2 * r_b) / (3 * (r_a + r_b)) * dx
area = (r_a + r_b) / 2 * dx
total_area += area
total_x_area_product += x_c * area
x_centroid = total_x_area_product / total_area
return x_centroid
def mesh_body(self,
method="quad",
tangential_resolution: int = 36,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Meshes the fuselage as a solid (thickened) body.
Uses the `(points, faces)` standard mesh format. For reference on this format, see the documentation in
`aerosandbox.geometry.mesh_utilities`.
Args:
method: A string, which determines whether to mesh the fuselage as a series of quadrilaterals or triangles.
* "quad" meshes the fuselage as a series of quadrilaterals.
* "tri" meshes the fuselage as a series of triangles.
tangential_resolution: An integer, which determines the number of points to use to mesh each cross-section.
Returns: Standard unstructured mesh format: A tuple of`points` and `faces`, where:
* `points` is a `n x 3` array of points, where `n` is the number of points in the mesh.
* `faces` is a `m x 3` array of faces if `method` is "tri", or a `m x 4` array of faces if `method` is "quad".
* Each row of `faces` is a list of indices into `points`, which specifies a face.
"""
t = np.linspace(0, 2 * np.pi, tangential_resolution + 1)[:-1]
points = np.concatenate([
np.stack(
xsec.get_3D_coordinates(theta=t),
axis=1
)
for xsec in self.xsecs
],
axis=0
)
faces = []
num_i = len(self.xsecs)
num_j = len(t)
def index_of(iloc, jloc):
return iloc * num_j + (jloc % num_j)
def add_face(*indices):
entry = list(indices)
if method == "quad":
faces.append(entry)
elif method == "tri":
faces.append([entry[0], entry[1], entry[3]])
faces.append([entry[1], entry[2], entry[3]])
for i in range(num_i - 1):
for j in range(num_j):
add_face(
index_of(i, j),
index_of(i, j + 1),
index_of(i + 1, j + 1),
index_of(i + 1, j),
)
faces = np.array(faces)
return points, faces
def mesh_line(self,
y_nondim: Union[float, List[float]] = 0.,
z_nondim: Union[float, List[float]] = 0.,
) -> List[np.ndarray]:
"""
Returns points along a line that goes through each of the FuselageXSec objects in this Fuselage.
Args:
y_nondim: The nondimensional (width-normalized) y-coordinate that the line should go through. Can either
be a single value used at all cross-sections, or can be an iterable of values to be used at the
respective cross-sections.
z_nondim: The nondimensional (height-normalized) z-coordinate that the line should go through. Can either
be a single value used at all cross-sections, or can be an iterable of values to be used at the
respective cross-sections.
Returns: A list of points, where each point is a 3-element array of the form `[x, y, z]`. Goes from the nose
to the tail.
"""
points_on_line: List[np.ndarray] = []
try:
if len(y_nondim) != len(self.xsecs):
raise ValueError(
f"If `y_nondim` is an iterable, it should be the same length as `Fuselage.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
try:
if len(z_nondim) != len(self.xsecs):
raise ValueError(
f"If `z_nondim` is an iterable, it should be the same length as `Fuselage.xsecs` ({len(self.xsecs)})."
)
except TypeError:
pass
for i, xsec in enumerate(self.xsecs):
origin = xsec.xyz_c
xg_local, yg_local, zg_local = xsec.compute_frame()
try:
xsec_y_nondim = y_nondim[i]
except (TypeError, IndexError):
xsec_y_nondim = y_nondim
try:
xsec_z_nondim = z_nondim[i]
except (TypeError, IndexError):
xsec_z_nondim = z_nondim
xsec_point = origin + (
xsec_y_nondim * (xsec.width / 2) * yg_local +
xsec_z_nondim * (xsec.height / 2) * zg_local
)
points_on_line.append(xsec_point)
return points_on_line
def draw(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw()
**kwargs: Keyword arguments to pass through to Airplane.draw()
Returns: Same return as Airplane.draw()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw(*args, **kwargs)
def draw_wireframe(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_wireframe() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_wireframe()
**kwargs: Keyword arguments to pass through to Airplane.draw_wireframe()
Returns: Same return as Airplane.draw_wireframe()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw_wireframe(*args, **kwargs)
def draw_three_view(self, *args, **kwargs):
"""
An alias to the more general Airplane.draw_three_view() method. See there for documentation.
Args:
*args: Arguments to pass through to Airplane.draw_three_view()
**kwargs: Keyword arguments to pass through to Airplane.draw_three_view()
Returns: Same return as Airplane.draw_three_view()
"""
from aerosandbox.geometry.airplane import Airplane
return Airplane(fuselages=[self]).draw_three_view(*args, **kwargs)
def subdivide_sections(self,
ratio: int,
spacing_function: Callable[[float, float, float], np.ndarray] = np.linspace
) -> "Fuselage":
"""
Generates a new Fuselage that subdivides the existing sections of this Fuselage into several smaller ones. Splits
each section into N=`ratio` smaller subsections by inserting new cross-sections (xsecs) as needed.
This can allow for finer aerodynamic resolution of sectional properties in certain analyses.
Args:
ratio: The number of new sections to split each old section into.
spacing_function: A function that takes in three arguments: the start, end, and number of points to generate.
The default is `np.linspace`, which generates a linearly-spaced array of points.
Other options include `np.cosspace`, which generates a cosine-spaced array of points.
Returns: A new Fuselage object with subdivided sections.
"""
if not (ratio >= 2 and isinstance(ratio, int)):
raise ValueError("`ratio` must be an integer greater than or equal to 2.")
new_xsecs = []
length_fractions_along_section = spacing_function(0, 1, ratio + 1)[:-1]
for xsec_a, xsec_b in zip(self.xsecs[:-1], self.xsecs[1:]):
for s in length_fractions_along_section:
a_weight = 1 - s
b_weight = s
new_xsecs.append(
FuselageXSec(
xyz_c=xsec_a.xyz_c * a_weight + xsec_b.xyz_c * b_weight,
width=xsec_a.width * a_weight + xsec_b.width * b_weight,
height=xsec_a.height * a_weight + xsec_b.height * b_weight,
shape=xsec_a.shape * a_weight + xsec_b.shape * b_weight,
analysis_specific_options=xsec_a.analysis_specific_options,
)
)
new_xsecs.append(self.xsecs[-1])
return Fuselage(
name=self.name,
xsecs=new_xsecs,
analysis_specific_options=self.analysis_specific_options
)
def _compute_frame_of_FuselageXSec(self, index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local frame of a FuselageXSec, given the index of the FuselageXSec in the Fuselage.xsecs list.
Args:
index: The index of the FuselageXSec in the Fuselage.xsecs list.
Returns: A tuple:
xg_local: The x-axis of the local coordinate frame, in aircraft geometry axes.
yg_local: The y-axis of the local coordinate frame, in aircraft geometry axes.
zg_local: The z-axis of the local coordinate frame, in aircraft geometry axes.
"""
import warnings
warnings.warn(
"Fuselage._compute_frame_of_FuselageXSec() is deprecated. "
"Use FuselageXSec.compute_frame() instead.",
DeprecationWarning
)
return self.xsecs[index].compute_frame()
class FuselageXSec(AeroSandboxObject):
"""
Definition for a fuselage cross-section ("X-section").
"""
def __init__(self,
xyz_c: Union[np.ndarray, List[float]] = None,
xyz_normal: Union[np.ndarray, List[float]] = None,
radius: float = None,
width: float = None,
height: float = None,
shape: float = 2.,
analysis_specific_options: Optional[Dict[type, Dict[str, Any]]] = None,
):
"""
Defines a new Fuselage cross-section.
Fuselage cross-sections are essentially a sketch on a 2D plane.
* This plane is defined by a center point (`xyz_c`) and a normal vector (`xyz_normal`).
* The cross-section is a superellipse shape, which is a generalization of a circle and a square.
It is mathematically defined by three parameters, using `y` and `z` as the two axes:
abs(y / width) ^ shape + abs(z / height) ^ shape = 1
See also: https://en.wikipedia.org/wiki/Superellipse
There are some notable special cases:
* A circle is a special case of a superellipse, where `shape = 2`.
* A square is a special case of a superellipse, where `shape = Inf` (in practice, set this to some
high value like 1000).
* A diamond is a special case of a superellipse, where `shape = 1`.
Must specify either `radius` or both `width` and `height`. Cannot specify both.
Args:
xyz_c: An array-like that represents the xyz-coordinates of the center of this fuselage cross-section,
in geometry axes.
xyz_normal: An array-like that represents the xyz-coordinates of the normal vector of this fuselage
cross-section, in geometry axes.
radius: Radius of the fuselage cross-section.
width: Width of the fuselage cross-section.
height: Height of the fuselage cross-section.
shape: A parameter that determines what shape the cross-section is. Should be in the range 1 < shape < infinity.
In short, here's how to interpret this value:
* shape=2 is a circle.
* shape=1 is a diamond shape.
* A high value of, say, 10, will get you a square-ish shape.
To be more precise:
* If the `shape` parameter is `s`, then the corresponding shape is the same as a level-set of a L^s norm in R^2.
* Defined another way, if the `shape` parameter is `s`, then the shape is the solution to the equation:
* x^s + y^s = 1 in the first quadrant (x>0, y>0); then mirrored for all four quadrants.
analysis_specific_options: Analysis-specific options are additional constants or modeling assumptions
that should be passed on to specific analyses and associated with this specific geometry object.
This should be a dictionary where:
* Keys are specific analysis types (typically a subclass of asb.ExplicitAnalysis or
asb.ImplicitAnalysis), but if you decide to write your own analysis and want to make this key
something else (like a string), that's totally fine - it's just a unique identifier for the
specific analysis you're running.
* Values are a dictionary of key:value pairs, where:
* Keys are strings.
* Values are some value you want to assign.
This is more easily demonstrated / understood with an example:
>>> analysis_specific_options = {
>>> asb.AeroBuildup: dict(
>>> include_wave_drag=True,
>>> )
>>> }
"""
### Set defaults
if xyz_c is None:
xyz_c = np.array([0., 0., 0.])
if xyz_normal is None:
xyz_normal = np.array([1., 0., 0.]) # points backwards
if analysis_specific_options is None:
analysis_specific_options = {}
### Set width and height
radius_specified = (radius is not None)
width_height_specified = [
(width is not None),
(height is not None)
]
if radius_specified:
if any(width_height_specified):
raise ValueError(
"Cannot specify both `radius` and (`width`, `height`) parameters - must be one or the other."
)
self.width = 2 * radius
self.height = 2 * radius
else:
if not all(width_height_specified):
raise ValueError(
"Must specify either `radius` or both (`width`, `height`) parameters."
)
self.width = width
self.height = height
### Initialize
self.xyz_c = np.array(xyz_c)
self.xyz_normal = np.array(xyz_normal)
self.shape = shape
self.analysis_specific_options = analysis_specific_options
def __repr__(self) -> str:
return f"FuselageXSec (xyz_c: {self.xyz_c}, width: {self.width}, height: {self.height}, shape: {self.shape})"
def xsec_area(self):
"""
Computes the FuselageXSec's cross-sectional (xsec) area.
The computation method is a closed-form approximation for the area of a superellipse. The exact equation for
the area of a superellipse with shape parameter `s` is:
area = width * height * (gamma(1 + 1/n))^2 / gamma(1 + 2/n)
where gamma() is the gamma function. The gamma function is (relatively) computationally expensive to evaluate
and differentiate, so we replace this area calculation with a closed-form approximation (with essentially no
loss in accuracy):
area = width * height / (s^-1.8717618013591173 + 1)
This approximation has the following properties:
* It is numerically exact for the case of s = 1 (a diamond)
* It is numerically exact for the case of s = 2 (a circle)
* It is correct in the asymptotic limit where s -> infinity (a square)
* In the range of sensible s values (1 < s < infinity), its error is less than 0.6%.
* It always produces a positive area for any physically-meaningful value of s (s > 0). In the range of s
values where s is physically-meaningful but not in a sensible range (0 < s < 1), this equation will
over-predict area.
The value of the constant seen in this expression (1.872...) is given by log(4/pi - 1) / log(2), and it is
chosen as such so that the expression is exactly correct in the s=2 (circle) case.
Returns:
"""
area = self.width * self.height / (self.shape ** -1.8717618013591173 + 1)
return area
def xsec_perimeter(self):
"""
Computes the FuselageXSec's perimeter. ("Circumference" in the case of a circular cross-section.)
The computation method is a closed-form approximation for the perimeter of a superellipse. The exact equation
for the perimeter of a superellipse is quite long and is not repeated here for brevity; a Google search will
bring it up. More importantly, this exact equation can only be represented as an infinite sum - not
particularly useful for fast computation.
We replace this exact equation with the following closed-form approximation obtained from symbolic regression:
Imagine a superellipse centered on the origin of a 2D plane. Now, imagine that the superellipse is
stretched such that the first quadrant (e.g., x>0, y>0) goes from (1, 0) to (0, h). Assume it has shape
parameter s (where, as a reminder, s=1 is a diamond, s=2 is a circle, s=Inf is a square).
Then, the perimeter of that single quadrant is:
h + (((((s-0.88487077) * h + 0.2588574 / h) ^ exp(s / -0.90069205)) + h) + 0.09919785) ^ (-1.4812293 / s)
See `AeroSandbox/studies/SuperellipseProperties` for details about how this was obtained.
We can extrapolate from here to the general case of a superellipse, as shown in the code below.
This approximation has the following properties:
* For the s=1 case (diamond), the error is +0.2%.
* For the s=2 case (circle), the error is -0.1%.
* In the s -> infinity limit (square), the error is +0.1%.
Returns:
"""
try:
if self.width == 0:
return 2 * self.height
elif self.height == 0:
return 2 * self.width
except RuntimeError: # Will error if width and height are optimization variables, as truthiness is indeterminate
pass
s = self.shape
h = np.maximum(
(self.width + 1e-16) / (self.height + 1e-16),
(self.height + 1e-16) / (self.width + 1e-16)
)
nondim_quadrant_perimeter = (
h + (((((s - 0.88487077) * h + 0.2588574 / h) ** np.exp(s / -0.90069205)) + h) + 0.09919785) ** (
-1.4812293 / s)
)
perimeter = 2 * nondim_quadrant_perimeter * np.minimum(self.width, self.height)
return np.where(
self.width == 0,
2 * self.height,
np.where(
self.height == 0,
2 * self.width,
perimeter
)
)
def compute_frame(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Computes the local coordinate frame of the FuselageXSec, in aircraft geometry axes.
xg_local is aligned with the FuselageXSec's normal vector.
zg_local is roughly aligned with the z-axis of the aircraft geometry axes, but projected onto the FuselageXSec's plane.
yg_local is the cross product of zg_local and xg_local.
Returns: A tuple:
xg_local: The x-axis of the local coordinate frame, in aircraft geometry axes.
yg_local: The y-axis of the local coordinate frame, in aircraft geometry axes.
zg_local: The z-axis of the local coordinate frame, in aircraft geometry axes.
"""
xyz_normal = self.xyz_normal / np.linalg.norm(self.xyz_normal)
xg_local = xyz_normal
zg_local = np.array([0, 0, 1])
zg_local = zg_local - np.dot(zg_local, xg_local) * xg_local
yg_local = np.cross(zg_local, xg_local)
return xg_local, yg_local, zg_local
def get_3D_coordinates(self,
theta: Union[float, np.ndarray] = None
) -> Tuple[Union[float, np.ndarray]]:
"""
Samples points from the perimeter of this FuselageXSec.
Args:
theta: Coordinate in the tangential-ish direction to sample points at. Given in the 2D FuselageXSec
coordinate system, where:
* y_2D points along the (global) y_g
* z_2D points along the (global) z_g
In other words, a value of:
* theta=0 -> samples points from the right side of the FuselageXSec
* theta=pi/2 -> samples points from the top of the FuselageXSec
* theta=pi -> samples points from the left side of the FuselageXSec
* theta=3pi/2 -> samples points from the bottom of the FuselageXSec
Returns: Points sampled from the perimeter of the FuselageXSec, as a [x, y, z] tuple.
If theta is a float, then each of x, y, and z will be floats.
If theta is an array, then x, y, and z will also be arrays of the same size.
"""
### Set defaults
if theta is None:
theta = np.linspace(
0,
2 * np.pi,
60 + 1
)[:-1]
st = np.sin(np.mod(theta, 2 * np.pi))
ct = np.cos(np.mod(theta, 2 * np.pi))
y = (self.width / 2) * np.abs(ct) ** (2 / self.shape) * np.where(ct > 0, 1, -1)
z = (self.height / 2) * np.abs(st) ** (2 / self.shape) * np.where(st > 0, 1, -1)
xg_local, yg_local, zg_local = self.compute_frame()
return (
self.xyz_c[0] + y * yg_local[0] + z * zg_local[0],
self.xyz_c[1] + y * yg_local[1] + z * zg_local[1],
self.xyz_c[2] + y * yg_local[2] + z * zg_local[2],
)
def equivalent_radius(self,
preserve="area"
) -> float:
"""
Computes an equivalent radius for non-circular cross-sections. This may be necessary when doing analysis that
uses axisymmetric assumptions.
Can either hold area or perimeter fixed, depending on whether cross-sectional area or wetted area is more
important.
Args:
preserve: One of:
* "area": holds the cross-sectional area constant
* "perimeter": holds the cross-sectional perimeter (i.e., the wetted area of the Fuselage) constant
Returns: An equivalent radius value.
"""
if preserve == "area":
return (self.xsec_area() / np.pi + 1e-16) ** 0.5
elif preserve == "perimeter":
return (self.xsec_perimeter() / (2 * np.pi))
else:
raise ValueError("Bad value of `preserve`!")
def translate(self,
xyz: Union[np.ndarray, List[float]]
) -> "FuselageXSec":
"""
Returns a copy of this FuselageXSec that has been translated by `xyz`.
Args:
xyz: The amount to translate the FuselageXSec. Given as a 3-element NumPy vector.
Returns: A copy of this FuselageXSec, translated by `xyz`.
"""
new_xsec = copy.copy(self)
new_xsec.xyz_c = new_xsec.xyz_c + np.array(xyz)
return new_xsec
if __name__ == '__main__':
fuse = Fuselage(
xsecs=[
FuselageXSec(
xyz_c=[0, 0, 1],
radius=0,
),
FuselageXSec(
xyz_c=[1, 0, 1],
width=0.5,
height=0.2,
shape=5
),
FuselageXSec(
xyz_c=[2, 0, 1],
radius=0.2,
)
]
).translate([0, 0, 2])
fuse.draw() | PypiClean |
/GSAS-II-WONDER_linux-1.0.1.tar.gz/GSAS-II-WONDER_linux-1.0.1/GSAS-II-WONDER/GSASIIpwdGUI.py | from __future__ import division, print_function
import platform
import sys
import os.path
# Don't depend on graphics for scriptable
try:
import wx
import wx.grid as wg
except ImportError:
pass
import numpy as np
import numpy.linalg as nl
import numpy.ma as ma
import math
import copy
import random as ran
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
import pickle as cPickle
import scipy.interpolate as si
import GSASIIpath
GSASIIpath.SetVersionNumber("$Revision: 4146 $")
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
import GSASIIfiles as G2fil
import GSASIIobj as G2obj
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIIindex as G2indx
import GSASIIplot as G2plt
import GSASIIdataGUI as G2gd
import GSASIIphsGUI as G2phsG
import GSASIIctrlGUI as G2G
import GSASIIElemGUI as G2elemGUI
import GSASIIElem as G2elem
import GSASIIsasd as G2sasd
import G2shapes
VERY_LIGHT_GREY = wx.Colour(235,235,235)
WACV = wx.ALIGN_CENTER_VERTICAL
if '2' in platform.python_version_tuple()[0]:
GkDelta = unichr(0x0394)
Pwr10 = unichr(0x0b9)+unichr(0x2070)
Pwr20 = unichr(0x0b2)+unichr(0x2070)
Pwrm1 = unichr(0x207b)+unichr(0x0b9)
Pwrm2 = unichr(0x207b)+unichr(0x0b2)
Pwrm6 = unichr(0x207b)+unichr(0x2076)
Pwrm4 = unichr(0x207b)+unichr(0x2074)
Angstr = unichr(0x00c5)
else:
GkDelta = chr(0x0394)
Pwr10 = chr(0x0b9)+chr(0x2070)
Pwr20 = chr(0x0b2)+chr(0x2070)
Pwrm1 = chr(0x207b)+chr(0x0b9)
Pwrm2 = chr(0x207b)+chr(0x0b2)
Pwrm6 = chr(0x207b)+chr(0x2076)
Pwrm4 = chr(0x207b)+chr(0x2074)
Angstr = chr(0x00c5)
# trig functions in degrees
sind = lambda x: math.sin(x*math.pi/180.)
tand = lambda x: math.tan(x*math.pi/180.)
cosd = lambda x: math.cos(x*math.pi/180.)
asind = lambda x: 180.*math.asin(x)/math.pi
################################################################################
###### class definitions
################################################################################
class SubCellsDialog(wx.Dialog):
def __init__(self,parent,title,controls,SGData,items,phaseDict):
wx.Dialog.__init__(self,parent,-1,title,
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.controls = controls
self.SGData = SGData #for parent phase
self.items = items
self.phaseDict = phaseDict
self.Draw()
def Draw(self):
def RefreshGrid(event):
r,c = event.GetRow(),event.GetCol()
br = self.items[r]
phase = self.phaseDict[br]
rLab = magDisplay.GetRowLabelValue(r)
pname = '(%s) %s'%(rLab,phase['Name'])
if c == 0:
mSGData = phase['SGData']
text,table = G2spc.SGPrint(mSGData,AddInv=True)
if 'magAtms' in phase:
msg = 'Magnetic space group information'
text[0] = ' Magnetic Space Group: '+mSGData['MagSpGrp']
text[3] = ' The magnetic lattice point group is '+mSGData['MagPtGp']
OprNames,SpnFlp = G2spc.GenMagOps(mSGData)
G2G.SGMagSpinBox(self.panel,msg,text,table,mSGData['SGCen'],OprNames,
mSGData['SpnFlp'],False).Show()
else:
msg = 'Space Group Information'
G2G.SGMessageBox(self.panel,msg,text,table).Show()
elif c == 1:
maxequiv = phase['maxequiv']
mSGData = phase['SGData']
Uvec = phase['Uvec']
Trans = phase['Trans']
ifMag = False
if 'magAtms' in phase:
ifMag = True
allmom = phase.get('allmom',False)
magAtms = phase.get('magAtms','')
mAtoms = TestMagAtoms(phase,magAtms,self.SGData,Uvec,Trans,allmom,maxequiv)
else:
mAtoms = TestAtoms(phase,self.controls[15],self.SGData,Uvec,Trans,maxequiv)
Atms = []
AtCods = []
atMxyz = []
for ia,atom in enumerate(mAtoms):
atom[0] += '_%d'%ia
SytSym,Mul,Nop,dupDir = G2spc.SytSym(atom[2:5],mSGData)
Atms.append(atom[:2]+['',]+atom[2:5])
AtCods.append('1')
if 'magAtms' in phase:
MagSytSym = G2spc.MagSytSym(SytSym,dupDir,mSGData)
CSI = G2spc.GetCSpqinel(mSGData['SpnFlp'],dupDir)
atMxyz.append([MagSytSym,CSI[0]])
else:
CSI = G2spc.GetCSxinel(SytSym)
atMxyz.append([SytSym,CSI[0]])
G2phsG.UseMagAtomDialog(self.panel,pname,Atms,AtCods,atMxyz,ifMag=ifMag,ifOK=True).Show()
elif c in [2,3]:
if c == 2:
title = 'Conjugacy list for '+pname
items = phase['altList']
elif c == 3:
title = 'Super groups list list for '+pname
items = phase['supList']
if not items[0]:
wx.MessageBox(pname+' is a maximal subgroup',caption='Super group is parent',style=wx.ICON_INFORMATION)
return
SubCellsDialog(self.panel,title,self.controls,self.SGData,items,self.phaseDict).Show()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
rowLabels = [str(i+1) for i in range(len(self.items))]
colLabels = ['Space Gp','Uniq','nConj','nSup','Trans','Vec','a','b','c','alpha','beta','gamma','Volume']
Types = [wg.GRID_VALUE_STRING,]+3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_STRING,]+ \
3*[wg.GRID_VALUE_FLOAT+':10,5',]+3*[wg.GRID_VALUE_FLOAT+':10,3',]+[wg.GRID_VALUE_FLOAT+':10,2']
table = []
for ip in self.items:
phase = self.phaseDict[ip]
natms = phase.get('nAtoms',1)
try:
nConj = len(phase['altList'])
nSup = len(phase['supList'])
except KeyError:
nConj = 0
nSup = 0
cell = list(phase['Cell'])
trans = G2spc.Trans2Text(phase['Trans'])
vec = G2spc.Latt2text([phase['Uvec'],])
row = [phase['Name'],natms,nConj,nSup,trans,vec]+cell
table.append(row)
CellsTable = G2G.Table(table,rowLabels=rowLabels,colLabels=colLabels,types=Types)
mainSizer = wx.BoxSizer(wx.VERTICAL)
magDisplay = G2G.GSGrid(self.panel)
magDisplay.SetTable(CellsTable, True)
magDisplay.Bind(wg.EVT_GRID_CELL_LEFT_CLICK,RefreshGrid)
magDisplay.AutoSizeColumns(False)
mainSizer.Add(magDisplay,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.Destroy()
# self.EndModal(wx.ID_OK)
class RDFDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,-1,'Background radial distribution function',
pos=wx.DefaultPosition,style=wx.DEFAULT_DIALOG_STYLE)
self.panel = None
self.result = {'UseObsCalc':'obs-calc','maxR':20.0,'Smooth':'linear'}
self.Draw()
def Draw(self):
def OnUseOC(event):
self.result['UseObsCalc'] = useOC.GetValue()
def OnSmCombo(event):
self.result['Smooth'] = smCombo.GetValue()
if self.panel: self.panel.Destroy()
self.panel = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(wx.StaticText(self.panel,label='Background RDF controls:'),0,WACV)
plotType = wx.BoxSizer(wx.HORIZONTAL)
plotType.Add(wx.StaticText(self.panel,label=' Select plot type:'),0,WACV)
Choices = ['obs-back','calc-back','obs-calc']
useOC = wx.ComboBox(self.panel,value=Choices[2],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
useOC.SetValue(self.result['UseObsCalc'])
useOC.Bind(wx.EVT_COMBOBOX,OnUseOC)
plotType.Add(useOC,0,WACV)
mainSizer.Add(plotType,0,WACV)
dataSizer = wx.BoxSizer(wx.HORIZONTAL)
dataSizer.Add(wx.StaticText(self.panel,label=' Smoothing type: '),0,WACV)
smChoice = ['linear','nearest',]
smCombo = wx.ComboBox(self.panel,value=self.result['Smooth'],choices=smChoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
smCombo.Bind(wx.EVT_COMBOBOX, OnSmCombo)
dataSizer.Add(smCombo,0,WACV)
dataSizer.Add(wx.StaticText(self.panel,label=' Maximum radial dist.: '),0,WACV)
maxR = G2G.ValidatedTxtCtrl(self.panel,self.result,'maxR',nDig=(10,1),min=10.,max=50.,
typeHint=float)
dataSizer.Add(maxR,0,WACV)
mainSizer.Add(dataSizer,0,WACV)
OkBtn = wx.Button(self.panel,-1,"Ok")
OkBtn.Bind(wx.EVT_BUTTON, self.OnOk)
cancelBtn = wx.Button(self.panel,-1,"Cancel")
cancelBtn.Bind(wx.EVT_BUTTON, self.OnCancel)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.Add((20,20),1)
btnSizer.Add(OkBtn)
btnSizer.Add((20,20),1)
btnSizer.Add(cancelBtn)
btnSizer.Add((20,20),1)
mainSizer.Add(btnSizer,0,wx.EXPAND|wx.BOTTOM|wx.TOP, 10)
self.panel.SetSizer(mainSizer)
self.panel.Fit()
self.Fit()
def GetSelection(self):
return self.result
def OnOk(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_OK)
def OnCancel(self,event):
parent = self.GetParent()
parent.Raise()
self.EndModal(wx.ID_CANCEL)
################################################################################
##### Setup routines
################################################################################
def GetFileBackground(G2frame,xye,Pattern):
bxye = np.zeros(len(xye[1]))
if 'BackFile' in Pattern[0]:
backfile,mult = Pattern[0]['BackFile'][:2]
if backfile:
bId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,backfile)
if bId:
bxye = mult*G2frame.GPXtree.GetItemPyData(bId)[1][1]
else:
print('Error: background PWDR {} not found'.format(backfile))
Pattern[0]['BackFile'][0] = ''
return bxye
def IsHistogramInAnyPhase(G2frame,histoName):
'''Tests a Histogram to see if it is linked to any phases.
Returns the name of the first phase where the histogram is used.
'''
phases = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
if phases:
item, cookie = G2frame.GPXtree.GetFirstChild(phases)
while item:
data = G2frame.GPXtree.GetItemPyData(item)
histoList = data['Histograms'].keys()
if histoName in histoList:
return G2frame.GPXtree.GetItemText(item)
item, cookie = G2frame.GPXtree.GetNextChild(phases, cookie)
return False
else:
return False
def SetupSampleLabels(histName,dataType,histType):
'''Setup a list of labels and number formatting for use in
labeling sample parameters.
:param str histName: Name of histogram, ("PWDR ...")
:param str dataType:
'''
parms = []
parms.append(['Scale','Histogram scale factor: ',[10,7]])
if 'C' in histType:
parms.append(['Gonio. radius','Goniometer radius (mm): ',[10,3]])
if 'PWDR' in histName:
if dataType == 'Debye-Scherrer':
if 'T' in histType:
parms += [['Absorption',u'Sample absorption (\xb5\xb7r/l): ',[10,4]],]
else:
parms += [['DisplaceX',u'Sample X displ. perp. to beam (\xb5m): ',[10,3]],
['DisplaceY',u'Sample Y displ. || to beam (\xb5m): ',[10,3]],
['Absorption',u'Sample absorption (\xb5\xb7r): ',[10,4]],]
elif dataType == 'Bragg-Brentano':
parms += [['Shift',u'Sample displacement(\xb5m): ',[10,4]],
['Transparency',u'Sample transparency(1/\xb5eff, cm): ',[10,3]],
['SurfRoughA','Surface roughness A: ',[10,4]],
['SurfRoughB','Surface roughness B: ',[10,4]]]
elif 'SASD' in histName:
parms.append(['Thick','Sample thickness (mm)',[10,3]])
parms.append(['Trans','Transmission (meas)',[10,3]])
parms.append(['SlitLen',u'Slit length (Q,\xc5'+Pwrm1+')',[10,3]])
parms.append(['Omega','Goniometer omega:',[10,3]])
parms.append(['Chi','Goniometer chi:',[10,3]])
parms.append(['Phi','Goniometer phi:',[10,3]])
parms.append(['Azimuth','Detector azimuth:',[10,3]])
parms.append(['Time','Clock time (s):',[12,3]])
parms.append(['Temperature','Sample temperature (K): ',[10,3]])
parms.append(['Pressure','Sample pressure (MPa): ',[10,3]])
return parms
def SetDefaultSASDModel():
'Fills in default items for the SASD Models dictionary'
return {'Back':[0.0,False],
'Size':{'MinDiam':50,'MaxDiam':10000,'Nbins':100,'logBins':True,'Method':'MaxEnt',
'Distribution':[],'Shape':['Spheroid',1.0],
'MaxEnt':{'Niter':100,'Precision':0.01,'Sky':-3},
'IPG':{'Niter':100,'Approach':0.8,'Power':-1},'Reg':{},},
'Pair':{'Method':'Moore','MaxRadius':100.,'NBins':100,'Errors':'User',
'Percent error':2.5,'Background':[0,False],'Distribution':[],
'Moore':10,'Dist G':100.,'Result':[],},
'Particle':{'Matrix':{'Name':'vacuum','VolFrac':[0.0,False]},'Levels':[],},
'Shapes':{'outName':'run','NumAA':100,'Niter':1,'AAscale':1.0,'Symm':1,'bias-z':0.0,
'inflateV':1.0,'AAglue':0.0,'pdbOut':False,'boxStep':4.0},
'Current':'Size dist.','BackFile':'',
}
def SetDefaultREFDModel():
'''Fills in default items for the REFD Models dictionary which are
defined as follows for each layer:
* Name: name of substance
* Thick: thickness of layer in Angstroms (not present for top & bottom layers)
* Rough: upper surface roughness for layer (not present for toplayer)
* Penetration: mixing of layer substance into layer above-is this needed?
* DenMul: multiplier for layer scattering density (default = 1.0)
Top layer defaults to vacuum (or air/any gas); can be substituted for some other substance.
Bottom layer default: infinitely thisck Silicon; can be substituted for some other substance.
'''
return {'Layers':[{'Name':'vacuum','DenMul':[1.0,False],}, #top layer
{'Name':'vacuum','Rough':[0.,False],'Penetration':[0.,False],'DenMul':[1.0,False],}], #bottom layer
'Scale':[1.0,False],'FltBack':[0.0,False],'Zero':'Top','dQ type':'None','Layer Seq':[], #globals
'Minimizer':'LMLS','Resolution':[0.,'Const dq/q'],'Recomb':0.5,'Toler':0.5, #minimizer controls
'DualFitFiles':['',],'DualFltBacks':[[0.0,False],],'DualScales':[[1.0,False],]} #optional stuff for multidat fits?
def SetDefaultSubstances():
'Fills in default items for the SASD Substances dictionary'
return {'Substances':{'vacuum':{'Elements':{},'Volume':1.0,'Density':0.0,'Scatt density':0.0,'XImag density':0.0},
'unit scatter':{'Elements':None,'Volume':None,'Density':None,'Scatt density':1.0,'XImag density':1.0}}}
def GetFileList(G2frame,fileType):
fileList = []
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if fileType in name.split()[0]:
fileList.append(name)
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return fileList
def GetHistsLikeSelected(G2frame):
'''Get the histograms that match the current selected one:
The histogram prefix and data type (PXC etc.), the number of
wavelengths and the instrument geometry (Debye-Scherrer etc.)
must all match. The current histogram is not included in the list.
:param wx.Frame G2frame: pointer to main GSAS-II data tree
'''
histList = []
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))
hType = inst['Type'][0]
if 'Lam1' in inst:
hLam = 2
elif 'Lam' in inst:
hLam = 1
else:
hLam = 0
sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'))
# hGeom = sample.get('Type')
hstName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
hPrefix = hstName.split()[0]+' '
# cycle through tree looking for items that match the above
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if name.startswith(hPrefix) and name != hstName:
cGeom,cType,cLam, = '?','?',-1
subitem, subcookie = G2frame.GPXtree.GetFirstChild(item)
while subitem:
subname = G2frame.GPXtree.GetItemText(subitem)
if subname == 'Sample Parameters':
sample = G2frame.GPXtree.GetItemPyData(subitem)
# cGeom = sample.get('Type')
elif subname == 'Instrument Parameters':
inst,inst2 = G2frame.GPXtree.GetItemPyData(subitem)
cType = inst['Type'][0]
if 'Lam1' in inst:
cLam = 2
elif 'Lam' in inst:
cLam = 1
else:
cLam = 0
subitem, subcookie = G2frame.GPXtree.GetNextChild(item, subcookie)
if cLam == hLam and cType == hType: # and cGeom == hGeom:
if name not in histList: histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
return histList
def SetCopyNames(histName,dataType,addNames=[]):
'''Determine the items in the sample parameters that should be copied,
depending on the histogram type and the instrument type.
'''
copyNames = ['Scale',]
histType = 'HKLF'
if 'PWDR' in histName:
histType = 'PWDR'
if 'Debye' in dataType:
copyNames += ['DisplaceX','DisplaceY','Absorption']
else: #Bragg-Brentano
copyNames += ['Shift','Transparency','SurfRoughA','SurfRoughB']
elif 'SASD' in histName:
histType = 'SASD'
copyNames += ['Materials','Thick',]
if len(addNames):
copyNames += addNames
return histType,copyNames
def CopyPlotCtrls(G2frame):
'''Global copy: Copy plot controls from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
sourceData = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
if 'Offset' not in sourceData[0]: #patch for old data
sourceData[0].update({'Offset':[0.0,0.0],'delOffset':0.02,'refOffset':-1.0,
'refDelt':0.01,})
G2frame.GPXtree.SetItemPyData(G2frame.PatternId,sourceData)
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy plot controls from\n'+str(hst[5:])+' to...',
'Copy plot controls', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
keys = ['Offset','delOffset','refOffset','refDelt']
source = dict(zip(keys,[sourceData[0][item] for item in keys]))
for hist in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
data = G2frame.GPXtree.GetItemPyData(Id)
data[0].update(source)
G2frame.GPXtree.SetItemPyData(Id,data)
print ('Copy of plot controls successful')
def CopySelectedHistItems(G2frame):
'''Global copy: Copy items from current histogram to others.
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No other histograms match '+hst,G2frame)
return
choices = ['Limits','Background','Instrument Parameters','Sample Parameters']
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy which histogram sections from\n'+str(hst[5:]),
'Select copy sections', choices, filterBox=False)
dlg.SetSelections(range(len(choices)))
choiceList = []
if dlg.ShowModal() == wx.ID_OK:
choiceList = [choices[i] for i in dlg.GetSelections()]
if not choiceList: return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy parameters from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
results = []
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
finally:
dlg.Destroy()
copyList = []
for i in results:
copyList.append(histList[i])
if 'Limits' in choiceList: # Limits
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Limits'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),
copy.deepcopy(data))
if 'Background' in choiceList: # Background
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),
copy.deepcopy(data))
if 'Instrument Parameters' in choiceList: # Instrument Parameters
# for now all items in Inst. parms are copied
data,data1 = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Instrument Parameters'))
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[0].update(copy.deepcopy(data))
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters')
)[1].update(copy.deepcopy(data1))
if 'Sample Parameters' in choiceList: # Sample Parameters
data = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId,'Sample Parameters'))
# selects items to be copied
histType,copyNames = SetCopyNames(hst,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {parm:data[parm] for parm in copyNames}
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters')
).update(copy.deepcopy(copyDict))
def TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv=100,maximal=False):
found = False
anymom = False
phase['Keep'] = False
if not magAtms:
phase['Keep'] = True
return []
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
SytSym,Mul,Nop,dupDir = G2spc.SytSym(xyz,phase['SGData'])
CSI = G2spc.GetCSpqinel(phase['SGData']['SpnFlp'],dupDir)
if any(CSI[0]):
anymom = True
if allmom:
if not any(CSI[0]):
phase['Keep'] = False
found = True
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if anymom and natm <= maxequiv and not found:
phase['Keep'] = True
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
def TestAtoms(phase,magAtms,SGData,Uvec,Trans,maxequiv=100,maximal=False):
phase['Keep'] = True
invTrans = nl.inv(Trans)
atCodes = []
Phase = {'General':{'AtomPtrs':[2,1],'SGData':copy.deepcopy(phase['SGData'])},'Atoms':[]}
for matm in magAtms:
XYZ = G2spc.GenAtom(matm[3:6],SGData,False,Move=True)
xyzs = [xyz[0] for xyz in XYZ]
atCodes += len(xyzs)*['1',]
xyzs,atCodes = G2lat.ExpandCell(xyzs,atCodes,0,Trans)
for ix,x in enumerate(xyzs):
xyz = G2lat.TransformXYZ(x-Uvec,invTrans.T,np.zeros(3))%1.
Phase['Atoms'].append(matm[:2]+list(xyz))
uAtms = G2lat.GetUnique(Phase,atCodes)[0]
natm = len(uAtms)
if natm > maxequiv: #too many allowed atoms found
phase['Keep'] = False
if maximal and phase['supList'][0]:
phase['Keep'] = False
return uAtms
################################################################################
##### Powder Peaks
################################################################################
def UpdatePeakGrid(G2frame, data):
'''respond to selection of PWDR powder peaks data tree item.
'''
def OnAutoSearch(event):
PatternId = G2frame.PatternId
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
profile = Pattern[1]
bxye = GetFileBackground(G2frame,profile,Pattern)
x0 = profile[0]
iBeg = np.searchsorted(x0,limits[0])
iFin = np.searchsorted(x0,limits[1])
x = x0[iBeg:iFin]
y0 = (profile[1]+bxye)[iBeg:iFin]
ysig = 1.0*np.std(y0)
offset = [-1,1]
ymask = ma.array(y0,mask=(y0<ysig))
for off in offset:
ymask = ma.array(ymask,mask=(ymask-np.roll(y0,off)<=0.))
indx = ymask.nonzero()
mags = ymask[indx]
poss = x[indx]
refs = list(zip(poss,mags))
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,0,reverse=True) #small 2-Thetas first
else: #'T'OF
refs = G2mth.sortArray(refs,0,reverse=False) #big TOFs first
for i,ref1 in enumerate(refs): #reject picks closer than 1 FWHM
for ref2 in refs[i+1:]:
if abs(ref2[0]-ref1[0]) < 2.*G2pwd.getFWHM(ref1[0],inst):
del(refs[i])
if 'C' in Inst['Type'][0]:
refs = G2mth.sortArray(refs,1,reverse=True)
else: #'T'OF
refs = G2mth.sortArray(refs,1,reverse=False)
for pos,mag in refs:
data['peaks'].append(G2mth.setPeakparms(inst,inst2,pos,mag))
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnCopyPeaks(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy peak list from\n'+str(hst[5:])+' to...',
'Copy peaks', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Peak List'),copy.deepcopy(data))
def OnLoadPeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = []
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
while S:
if '#' in S:
S = File.readline()
continue
try:
peaks.append(eval(S))
except:
break
S = File.readline()
File.close()
finally:
dlg.Destroy()
data = {'peaks':peaks,'sigDict':{}}
UpdatePeakGrid(G2frame,data)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnSavePeaks(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PWDR peaks list file', pth, '',
'PWDR peak list files (*.pkslst)|*.pkslst',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pkslst
filename = os.path.splitext(filename)[0]+'.pkslst'
File = open(filename,'w')
File.write("#GSAS-II PWDR peaks list file; do not add/delete items!\n")
for item in data:
if item == 'peaks':
for pk in data[item]:
File.write(str(pk)+'\n')
File.close()
print ('PWDR peaks list saved to: '+filename)
finally:
dlg.Destroy()
def OnUnDo(event):
DoUnDo()
G2frame.dataWindow.UnDo.Enable(False)
def DoUnDo():
print ('Undo last refinement')
file = open(G2frame.undofile,'rb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item),cPickle.load(file))
if G2frame.dataWindow.GetName() == item:
if item == 'Background':
UpdateBackground(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Instrument Parameters':
UpdateInstrumentGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
elif item == 'Peak List':
UpdatePeakGrid(G2frame,G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, item)))
print (item+' recovered')
file.close()
def SaveState():
G2frame.undofile = os.path.join(G2frame.dirname,'GSASII.save')
file = open(G2frame.undofile,'wb')
PatternId = G2frame.PatternId
for item in ['Background','Instrument Parameters','Peak List']:
cPickle.dump(G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId,item)),file,1)
file.close()
G2frame.dataWindow.UnDo.Enable(True)
def OnLSQPeakFit(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
if not G2frame.GSASprojectfile: #force a save of the gpx file so SaveState can write in the same directory
G2frame.OnFileSaveas(event)
wx.CallAfter(OnPeakFit,'LSQ')
def OnOneCycle(event):
if reflGrid.IsCellEditControlEnabled(): # complete any grid edits in progress
reflGrid.HideCellEditControl()
reflGrid.DisableCellEditControl()
wx.CallAfter(OnPeakFit,'LSQ',oneCycle=True)
def OnSeqPeakFit(event):
histList = G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Sequential peak fits',
'Select dataset to include',histList,extraOpts=od)
names = []
if dlg.ShowModal() == wx.ID_OK:
for sel in dlg.GetSelections():
names.append(histList[sel])
dlg.Destroy()
if not names:
return
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential peak fit results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential peak fit results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
SeqResult['histNames'] = names
dlg = wx.ProgressDialog('Sequential peak fit','Data set name = '+names[0],len(names),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
controls = {'deriv type':'analytic','min dM/M':0.001,}
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
oneCycle = False
FitPgm = 'LSQ'
prevVaryList = []
peaks = None
varyList = None
if od['value_2']:
names.reverse()
try:
for i,name in enumerate(names):
print (' Sequential fit for '+name)
GoOn = dlg.Update(i,newmsg='Data set name = '+name)[0]
if not GoOn:
dlg.Destroy()
break
PatternId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if i and od['value_1']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
prevVaryList = varyList[:]
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
fixback = GetFileBackground(G2frame,data,Pattern)
peaks['sigDict'],result,sig,Rvals,varyList,parmDict,fullvaryList,badVary = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],
background,limits,inst,inst2,data,fixback,prevVaryList,oneCycle,controls)
if len(result[0]) != len(fullvaryList):
dlg.Destroy()
print (' ***** Sequential peak fit stopped at '+name+' *****')
break
else:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),copy.deepcopy(peaks))
SeqResult[name] = {'variables':result[0],'varyList':varyList,'sig':sig,'Rvals':Rvals,
'covMatrix':np.eye(len(result[0])),'title':name,'parmDict':parmDict,
'fullVary':fullvaryList,'badVary':badVary}
print (' ***** Sequential peak fit successful *****')
finally:
dlg.Destroy()
SeqResult['histNames'] = histList
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.G2plotNB.Delete('Sequential refinement') #clear away probably invalid plot
G2frame.GPXtree.SelectItem(Id)
def OnClearPeaks(event):
dlg = wx.MessageDialog(G2frame,'Delete all peaks?','Clear peak list',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
peaks = {'peaks':[],'sigDict':{}}
finally:
dlg.Destroy()
UpdatePeakGrid(G2frame,peaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeakFit(FitPgm,oneCycle=False):
SaveState()
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
if not controls:
controls = {'deriv type':'analytic','min dM/M':0.001,} #fill in defaults if needed
print ('Peak Fitting with '+controls['deriv type']+' derivatives:')
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks:
G2frame.ErrorDialog('No peaks!','Nothing to fit!')
return
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
Pattern = G2frame.GPXtree.GetItemPyData(PatternId)
data = Pattern[1]
bxye = GetFileBackground(G2frame,data,Pattern)
dlg = wx.ProgressDialog('Residual','Peak fit Rwp = ',101.0,
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
screenSize = wx.ClientDisplayRect()
Size = dlg.GetSize()
if 50 < Size[0] < 500: # sanity check on size, since this fails w/Win & wx3.0
dlg.SetSize((int(Size[0]*1.2),Size[1])) # increase size a bit along x
dlg.SetPosition(wx.Point(screenSize[2]-Size[0]-305,screenSize[1]+5))
try:
peaks['sigDict'] = G2pwd.DoPeakFit(FitPgm,peaks['peaks'],background,limits,inst,inst2,data,bxye,[],oneCycle,controls,dlg)[0]
finally:
# dlg.Destroy()
print ('finished')
newpeaks = copy.copy(peaks)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,newpeaks)
def OnResetSigGam(event):
PatternId = G2frame.PatternId
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'))
if not peaks['peaks']:
G2frame.ErrorDialog('No peaks!','Nothing to do!')
return
newpeaks = {'peaks':[],'sigDict':{}}
for peak in peaks['peaks']:
newpeaks['peaks'].append(G2mth.setPeakparms(Inst,Inst2,peak[0],peak[2]))
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Peak List'),newpeaks)
UpdatePeakGrid(G2frame,newpeaks)
# def RefreshPeakGrid(event):
#
# event.StopPropagation()
# data['peaks'] = G2frame.PeakTable.GetData()
# T = []
# for peak in data['peaks']:T.append(peak[0])
# D = dict(zip(T,data['peaks']))
# T.sort()
# X = []
# for key in T: X.append(D[key])
# data['peaks'] = X
def setBackgroundColors():
for r in range(reflGrid.GetNumberRows()):
for c in range(reflGrid.GetNumberCols()):
if reflGrid.GetColLabelValue(c) in ['position','intensity','alpha','beta','sigma','gamma']:
if float(reflGrid.GetCellValue(r,c)) < 0.:
reflGrid.SetCellBackgroundColour(r,c,wx.RED)
else:
reflGrid.SetCellBackgroundColour(r,c,wx.WHITE)
def KeyEditPeakGrid(event):
'''Respond to pressing a key to act on selection of a row, column or cell
in the Peak List table
'''
rowList = reflGrid.GetSelectedRows()
colList = reflGrid.GetSelectedCols()
selectList = reflGrid.GetSelectedCells()
data = G2frame.GPXtree.GetItemPyData(G2frame.PickId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif rowList and (event.GetKeyCode() == wx.WXK_DELETE or event.GetKeyCode() == 8):
# pressing the delete key or backspace deletes selected peak(s)
reflGrid.ClearSelection()
reflGrid.ClearGrid()
rowList.sort()
rowList.reverse()
nDel = 0
for row in rowList:
G2frame.PeakTable.DeleteRow(row)
nDel += 1
if nDel:
msg = wg.GridTableMessage(G2frame.PeakTable,
wg.GRIDTABLE_NOTIFY_ROWS_DELETED,0,nDel)
reflGrid.ProcessTableMessage(msg)
data['peaks'] = G2frame.PeakTable.GetData()[:-nDel]
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
setBackgroundColors()
elif colList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.PeakTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.PeakTable.GetNumberRows()): data['peaks'][row][col]=False
elif selectList and (event.GetKeyCode() == 89 or event.GetKeyCode() == 78):
reflGrid.ClearSelection()
key = event.GetKeyCode()
for row,col in selectList:
if G2frame.PeakTable.GetTypeName(row,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
data['peaks'][row][col]=True
elif key == 78: #'N'
data['peaks'][row][col]=False
else:
event.Skip()
return
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdatePeakGrid,G2frame,data)
def SelectVars(rows):
'''Set or clear peak refinement variables for peaks listed in rows
'''
refOpts = {reflGrid.GetColLabelValue(i):i+1 for i in range(reflGrid.GetNumberCols()) if reflGrid.GetColLabelValue(i) != "refine"}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select columns to refine',
'Refinement Selection', sorted(refOpts.keys()),
filterBox=False,toggle=False)
sels = []
try:
if dlg.ShowModal() == wx.ID_OK:
sels = [sorted(refOpts.keys())[i] for i in dlg.GetSelections()]
else:
return
finally:
dlg.Destroy()
for r in rows:
for lbl,c in refOpts.items():
data['peaks'][r][c] = lbl in sels
UpdatePeakGrid(G2frame,data)
def OnRefineSelected(event):
'''set refinement flags for the selected peaks
'''
rows = list(set([row for row,col in reflGrid.GetSelectedCells()] +
reflGrid.GetSelectedRows()))
if not rows:
wx.MessageBox('No selected rows. You must select rows or cells before using this command',
caption='No selected peaks')
return
SelectVars(rows)
def OnRefineAll(event):
'''set refinement flags for all peaks
'''
SelectVars(range(reflGrid.GetNumberRows()))
# def onCellListSClick(event):
# '''Called when a peak is selected so that it can be highlighted in the plot
# '''
# event.Skip()
# c = event.GetRow(),event.GetCol()
# if c < 0: # replot except whan a column is selected
# wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
#
def onCellListDClick(event):
'''Called after a double-click on a cell label'''
r,c = event.GetRow(),event.GetCol()
if r < 0 and c < 0:
for row in range(reflGrid.GetNumberRows()):
reflGrid.SelectRow(row,True)
for col in range(reflGrid.GetNumberCols()):
reflGrid.SelectCol(col,True)
elif r > 0: #row label: select it and replot!
reflGrid.ClearSelection()
reflGrid.SelectRow(r,True)
wx.CallAfter(G2frame.reflGrid.ForceRefresh)
wx.CallAfter(G2plt.PlotPatterns,G2frame,plotType='PWDR')
elif c > 0: #column label: just select it (& redisplay)
reflGrid.ClearSelection()
reflGrid.SelectCol(c,True)
if reflGrid.GetColLabelValue(c) != 'refine': return
choice = ['Y - vary all','N - vary none',]
dlg = wx.SingleChoiceDialog(G2frame,'Select refinement option for '+reflGrid.GetColLabelValue(c-1),
'Refinement controls',choice)
dlg.CenterOnParent()
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
if sel == 0:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=True
else:
for row in range(reflGrid.GetNumberRows()): data['peaks'][row][c]=False
wx.CallAfter(UpdatePeakGrid,G2frame,data)
#======================================================================
# beginning of UpdatePeakGrid init
#======================================================================
G2frame.GetStatusBar().SetStatusText('Global refine: select refine column & press Y or N',1)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.PeakMenu)
G2frame.Bind(wx.EVT_MENU, OnAutoSearch, id=G2G.wxID_AUTOSEARCH)
G2frame.Bind(wx.EVT_MENU, OnCopyPeaks, id=G2G.wxID_PEAKSCOPY)
G2frame.Bind(wx.EVT_MENU, OnSavePeaks, id=G2G.wxID_PEAKSAVE)
G2frame.Bind(wx.EVT_MENU, OnLoadPeaks, id=G2G.wxID_PEAKLOAD)
G2frame.Bind(wx.EVT_MENU, OnUnDo, id=G2G.wxID_UNDO)
G2frame.Bind(wx.EVT_MENU, OnRefineSelected, id=G2frame.dataWindow.peaksSel.GetId())
G2frame.Bind(wx.EVT_MENU, OnRefineAll, id=G2frame.dataWindow.peaksAll.GetId())
G2frame.Bind(wx.EVT_MENU, OnLSQPeakFit, id=G2G.wxID_LSQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnOneCycle, id=G2G.wxID_LSQONECYCLE)
G2frame.Bind(wx.EVT_MENU, OnSeqPeakFit, id=G2G.wxID_SEQPEAKFIT)
G2frame.Bind(wx.EVT_MENU, OnClearPeaks, id=G2G.wxID_CLEARPEAKS)
G2frame.Bind(wx.EVT_MENU, OnResetSigGam, id=G2G.wxID_RESETSIGGAM)
if data['peaks']:
G2frame.dataWindow.AutoSearch.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(True)
G2frame.dataWindow.PeakFit.Enable(True)
G2frame.dataWindow.PFOneCycle.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(True)
else:
G2frame.dataWindow.PeakFit.Enable(False)
G2frame.dataWindow.PeakCopy.Enable(False)
G2frame.dataWindow.PFOneCycle.Enable(False)
G2frame.dataWindow.AutoSearch.Enable(True)
G2frame.dataWindow.SeqPeakFit.Enable(False)
G2frame.PickTable = []
rowLabels = []
PatternId = G2frame.PatternId
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))[0]
for i in range(len(data['peaks'])): rowLabels.append(str(i+1))
if 'C' in Inst['Type'][0]:
colLabels = ['position','refine','intensity','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
else:
colLabels = ['position','refine','intensity','refine','alpha','refine',
'beta','refine','sigma','refine','gamma','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,1',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
T = []
for peak in data['peaks']:
T.append(peak[0])
D = dict(zip(T,data['peaks']))
T.sort()
if 'T' in Inst['Type'][0]: #want big TOF's first
T.reverse()
X = []
for key in T: X.append(D[key])
data['peaks'] = X
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
G2frame.GPXtree.SetItemPyData(G2frame.PickId,data)
G2frame.PeakTable = G2G.Table(data['peaks'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Peak List')
G2frame.dataWindow.currentGrids = []
reflGrid = G2G.GSGrid(parent=G2frame.dataWindow)
reflGrid.SetTable(G2frame.PeakTable, True)
setBackgroundColors()
# reflGrid.Bind(wg.EVT_GRID_CELL_CHANGE, RefreshPeakGrid)
reflGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
# reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_CLICK, onCellListSClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, onCellListSClick)
reflGrid.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK, onCellListDClick)
# G2frame.dataWindow.Bind(wg.EVT_GRID_CELL_LEFT_DCLICK, onCellListDClick)
reflGrid.AutoSizeColumns(False)
reflGrid.SetScrollRate(10,10)
G2frame.reflGrid = reflGrid
mainSizer.Add(reflGrid,1,wx.ALL|wx.EXPAND,1)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Background
################################################################################
def UpdateBackground(G2frame,data):
'''respond to selection of PWDR background data tree item.
'''
def OnBackFlagCopy(event):
flag = data[0][1]
backDict = data[-1]
if backDict['nDebye']:
DBflags = []
for term in backDict['debyeTerms']:
DBflags.append(term[1::2])
if backDict['nPeaks']:
PKflags = []
for term in backDict['peaksList']:
PKflags.append(term[1::2])
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg ref. flags from\n'+str(hst[5:])+' to...',
'Copy bkg flags', histList)
copyList = []
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Background'))
backData[0][1] = copy.copy(flag)
bkDict = backData[-1]
if bkDict['nDebye'] == backDict['nDebye']:
for i,term in enumerate(bkDict['debyeTerms']):
term[1::2] = copy.copy(DBflags[i])
if bkDict['nPeaks'] == backDict['nPeaks']:
for i,term in enumerate(bkDict['peaksList']):
term[1::2] = copy.copy(PKflags[i])
def OnBackCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy bkg params from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Background'),copy.deepcopy(data))
CalcBack(Id)
def OnBackSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pwdrbck
filename = os.path.splitext(filename)[0]+'.pwdrbck'
File = open(filename,'w')
File.write("#GSAS-II background parameter file; do not add/delete items!\n")
File.write(str(data[0])+'\n')
for item in data[1]:
if item in ['nPeaks','background PWDR','nDebye'] or not len(data[1][item]):
File.write(item+':'+str(data[1][item])+'\n')
else:
File.write(item+':\n')
for term in data[1][item]:
File.write(str(term)+'\n')
File.close()
print ('Background parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnBackLoad(event):
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II background parameters file', pth, '',
'background parameter files (*.pwdrbck)|*.pwdrbck',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
newback = [[],{}]
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
if S[0] == '#': #skip the heading
S = File.readline() #should contain the std. bck fxn
newback[0] = eval(S[:-1])
S = File.readline()
while S and ':' in S:
[item,vals] = S[:-1].split(':')
if item in ['nPeaks','nDebye']:
newback[1][item] = int(vals)
elif 'PWDR' in item:
newback[1][item] = eval(vals)
elif item in ['FixedPoints','debyeTerms','peaksList']:
newback[1][item] = []
S = File.readline()
while ':' not in S:
newback[1][item].append(eval(S[:-1]))
S = File.readline()
else:
continue
S = File.readline()
File.close()
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Background'),newback)
finally:
dlg.Destroy()
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallLater(100,UpdateBackground,G2frame,newback)
def OnBkgFit(event):
def SetInstParms(Inst):
dataType = Inst['Type'][0]
insVary = []
insNames = []
insVals = []
for parm in Inst:
insNames.append(parm)
insVals.append(Inst[parm][1])
if parm in ['U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q',] and Inst[parm][2]:
Inst[parm][2] = False
# insVary.append(parm)
instDict = dict(zip(insNames,insVals))
instDict['X'] = max(instDict['X'],0.01)
instDict['Y'] = max(instDict['Y'],0.01)
if 'SH/L' in instDict:
instDict['SH/L'] = max(instDict['SH/L'],0.002)
return dataType,instDict,insVary
PatternId = G2frame.PatternId
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
# sort the points for convenience and then separate them; extend the range if needed
if 'FixedPoints' not in background[1]:
msg = ("You have not defined any fixed background points. "+
"Use the Fixed Points/Add menu item to define points that will be fit."+
'\n\nSee the "Fitting the Starting Background using Fixed Points" tutorial for more details.')
print (msg)
G2frame.ErrorDialog('No points',msg)
return
background[1]['FixedPoints'] = sorted(background[1]['FixedPoints'],key=lambda pair:pair[0])
X = [x for x,y in background[1]['FixedPoints']]
Y = [y for x,y in background[1]['FixedPoints']]
if X[0] > limits[0]:
X = [limits[0]] + X
Y = [Y[0]] + Y
if X[-1] < limits[1]:
X += [limits[1]]
Y += [Y[-1]]
# interpolate the fixed points onto the grid of data points within limits
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
xBeg = np.searchsorted(pwddata[0],limits[0])
xFin = np.searchsorted(pwddata[0],limits[1])
xdata = pwddata[0][xBeg:xFin]
ydata = si.interp1d(X,Y)(ma.getdata(xdata))
W = [1]*len(xdata)
Z = [0]*len(xdata)
# load instrument and background params
print (' NB: Any instrument parameter refinement flags will be cleared')
dataType,insDict,insVary = SetInstParms(inst)
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
# how many background parameters are refined?
if len(bakVary)*1.5 > len(X):
msg = ("You are attempting to vary "+str(len(bakVary))+
" background terms with only "+str(len(X))+" background points"+
"\nAdd more points or reduce the number of terms")
print (msg)
G2frame.ErrorDialog('Too few points',msg)
return
wx.BeginBusyCursor()
try:
G2pwd.DoPeakFit('LSQ',[],background,limits,inst,inst2,
np.array((xdata,ydata,W,Z,Z,Z)),Z,prevVaryList=bakVary,controls=controls)
finally:
wx.EndBusyCursor()
# compute the background values and plot them
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(background)
parmDict.update(bakDict)
parmDict.update(insDict)
# Note that this generates a MaskedArrayFutureWarning, but these items are not always masked
pwddata[3][xBeg:xFin] *= 0.
pwddata[5][xBeg:xFin] *= 0.
pwddata[4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,xdata)[0]
G2plt.PlotPatterns(G2frame,plotType='PWDR')
# show the updated background values
wx.CallLater(100,UpdateBackground,G2frame,data)
def OnBkgClear(event):
if 'FixedPoints' not in data[1]:
return
else:
data[1]['FixedPoints'] = []
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnPeaksMove(event):
if not data[1]['nPeaks']:
G2frame.ErrorDialog('Error','No peaks to move')
return
Peaks = {'peaks':[],'sigDict':{}}
for peak in data[1]['peaksList']:
Peaks['peaks'].append([peak[0],0,peak[2],0,peak[4],0,peak[6],0])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),Peaks)
def OnMakeRDF(event):
dlg = RDFDialog(G2frame)
try:
if dlg.ShowModal() == wx.ID_OK:
RDFcontrols = dlg.GetSelection()
else:
return
finally:
dlg.Destroy()
PatternId = G2frame.PatternId
background = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)[1]
auxPlot = G2pwd.MakeRDF(RDFcontrols,background,inst,pwddata)
if '2' in platform.python_version_tuple()[0]:
superMinusOne = unichr(0xaf)+unichr(0xb9)
else:
superMinusOne = chr(0xaf)+chr(0xb9)
for plot in auxPlot:
XY = np.array(plot[:2])
if 'D(R)' in plot[2]:
xlabel = r'$R, \AA$'
ylabel = r'$D(R), arb. units$'
else:
xlabel = r'$Q,\AA$'+superMinusOne
ylabel = r'$I(Q)$'
G2plt.PlotXY(G2frame,[XY,],Title=plot[2],labelX=xlabel,labelY=ylabel,lines=True)
def BackSizer():
def OnNewType(event):
data[0][0] = bakType.GetValue()
def OnBakRef(event):
data[0][1] = bakRef.GetValue()
def OnBakTerms(event):
data[0][2] = int(bakTerms.GetValue())
M = len(data[0])
N = data[0][2]+3
item = data[0]
if N > M: #add terms
for i in range(M,N):
item.append(0.0)
elif N < M: #delete terms
for i in range(N,M):
del(item[-1])
G2frame.GPXtree.SetItemPyData(BackId,data)
wx.CallLater(100,UpdateBackground,G2frame,data)
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
backSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background function: '),0,WACV)
bakType = wx.ComboBox(G2frame.dataWindow,value=data[0][0],
choices=Choices,style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakType.Bind(wx.EVT_COMBOBOX, OnNewType)
topSizer.Add(bakType)
topSizer.Add((5,0),0)
bakRef = wx.CheckBox(G2frame.dataWindow,label=' Refine?')
bakRef.SetValue(bool(data[0][1]))
bakRef.Bind(wx.EVT_CHECKBOX, OnBakRef)
topSizer.Add(bakRef,0,WACV)
backSizer.Add(topSizer)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of coeff.: '),0,WACV)
bakTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[0][2]),choices=[str(i+1) for i in range(36)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
bakTerms.Bind(wx.EVT_COMBOBOX,OnBakTerms)
topSizer.Add(bakTerms,0,WACV)
topSizer.Add((5,0),0)
backSizer.Add(topSizer)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background coefficients:'),0,WACV)
bakSizer = wx.FlexGridSizer(0,5,5,5)
for i,value in enumerate(data[0][3:]):
bakVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[0],i+3,nDig=(10,4),OnLeave=AfterChange)
bakSizer.Add(bakVal,0,WACV)
backSizer.Add(bakSizer)
return backSizer
def DebyeSizer():
def OnDebTerms(event):
data[1]['nDebye'] = int(debTerms.GetValue())
M = len(data[1]['debyeTerms'])
N = data[1]['nDebye']
if N > M: #add terms
for i in range(M,N):
data[1]['debyeTerms'].append([1.0,False,1.0,False,0.010,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['debyeTerms'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = debyeGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
debyeGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if debyeTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=True
elif key == 78: #'N'
for row in range(debyeGrid.GetNumberRows()): data[1]['debyeTerms'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
debSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye scattering: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of terms: '),0,WACV)
debTerms = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nDebye']),choices=[str(i) for i in range(21)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
debTerms.Bind(wx.EVT_COMBOBOX,OnDebTerms)
topSizer.Add(debTerms,0,WACV)
topSizer.Add((5,0),0)
debSizer.Add(topSizer)
if data[1]['nDebye']:
debSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Debye diffuse terms:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['debyeTerms'])): rowLabels.append(str(i))
colLabels = ['A','refine','R','refine','U','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
debyeTable = G2G.Table(data[1]['debyeTerms'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
debyeGrid = G2G.GSGrid(parent=G2frame.dataWindow)
debyeGrid.SetTable(debyeTable, True)
debyeGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
debyeGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
debyeGrid.AutoSizeColumns(False)
debSizer.Add(debyeGrid)
return debSizer
def PeaksSizer():
def OnPeaks(event):
data[1]['nPeaks'] = int(peaks.GetValue())
M = len(data[1]['peaksList'])
N = data[1]['nPeaks']
if N > M: #add terms
for i in range(M,N):
data[1]['peaksList'].append([1.0,False,1.0,False,0.10,False,0.10,False])
elif N < M: #delete terms
for i in range(N,M):
del(data[1]['peaksList'][-1])
if N == 0:
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
wx.CallAfter(UpdateBackground,G2frame,data)
def KeyEditPeakGrid(event):
colList = peaksGrid.GetSelectedCols()
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
peaksGrid.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if peaksTable.GetTypeName(0,col) == wg.GRID_VALUE_BOOL:
if key == 89: #'Y'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=True
elif key == 78: #'N'
for row in range(peaksGrid.GetNumberRows()): data[1]['peaksList'][row][col]=False
def OnCellChange(event):
CalcBack(G2frame.PatternId)
G2plt.PlotPatterns(G2frame,plotType='PWDR')
peaksSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peaks in background: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Number of peaks: '),0,WACV)
peaks = wx.ComboBox(G2frame.dataWindow,-1,value=str(data[1]['nPeaks']),choices=[str(i) for i in range(30)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
peaks.Bind(wx.EVT_COMBOBOX,OnPeaks)
topSizer.Add(peaks,0,WACV)
topSizer.Add((5,0),0)
peaksSizer.Add(topSizer)
G2frame.dataWindow.currentGrids = []
if data[1]['nPeaks']:
peaksSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Peak list:'),0,WACV)
rowLabels = []
for i in range(len(data[1]['peaksList'])): rowLabels.append(str(i))
colLabels = ['pos','refine','int','refine','sig','refine','gam','refine']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,3',wg.GRID_VALUE_BOOL,
wg.GRID_VALUE_FLOAT+':10,5',wg.GRID_VALUE_BOOL]
peaksTable = G2G.Table(data[1]['peaksList'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
peaksGrid = G2G.GSGrid(parent=G2frame.dataWindow)
peaksGrid.SetTable(peaksTable, True)
peaksGrid.Bind(wx.EVT_KEY_DOWN, KeyEditPeakGrid)
peaksGrid.Bind(wg.EVT_GRID_CELL_CHANGED,OnCellChange)
peaksGrid.AutoSizeColumns(False)
peaksSizer.Add(peaksGrid)
return peaksSizer
def BackFileSizer():
def OnBackPWDR(event):
data[1]['background PWDR'][0] = back.GetValue()
if data[1]['background PWDR'][0]:
curHist = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data[1]['background PWDR'][0])
if not Id:
G2G.G2MessageBox(G2frame,'Histogram not found -- how did this happen?','Missing histogram')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
bkgHist = G2frame.GPXtree.GetItemPyData(Id)
if len(bkgHist[1][0]) != len(curHist[1][0]):
G2G.G2MessageBox(G2frame,'Histogram have different lengths','Mismatched histograms')
back.SetValue('')
data[1]['background PWDR'][0] = back.GetValue()
return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def AfterChange(invalid,value,tc):
if invalid: return
CalcBack()
G2plt.PlotPatterns(G2frame,plotType='PWDR')
fileSizer = wx.BoxSizer(wx.VERTICAL)
fileSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Fixed background file:'),0,WACV)
if 'background PWDR' not in data[1]:
data[1]['background PWDR'] = ['',-1.,False]
backSizer = wx.BoxSizer(wx.HORIZONTAL)
Choices = ['',]+G2gd.GetGPXtreeDataNames(G2frame,['PWDR',])
Source = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Choices.pop(Choices.index(Source))
back = wx.ComboBox(parent=G2frame.dataWindow,value=data[1]['background PWDR'][0],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
back.Bind(wx.EVT_COMBOBOX,OnBackPWDR)
backSizer.Add(back)
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' multiplier'),0,WACV)
backMult = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1]['background PWDR'],1,nDig=(10,3),OnLeave=AfterChange)
backSizer.Add(backMult,0,WACV)
fileSizer.Add(backSizer)
return fileSizer
def CalcBack(PatternId=G2frame.PatternId):
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Limits'))[1]
inst,inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Instrument Parameters'))
backData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Background'))
dataType = inst['Type'][0]
insDict = {inskey:inst[inskey][1] for inskey in inst}
parmDict = {}
bakType,bakDict,bakVary = G2pwd.SetBackgroundParms(data)
parmDict.update(bakDict)
parmDict.update(insDict)
pwddata = G2frame.GPXtree.GetItemPyData(PatternId)
xBeg = np.searchsorted(pwddata[1][0],limits[0])
xFin = np.searchsorted(pwddata[1][0],limits[1])
fixBack = backData[1]['background PWDR']
try: #typically bad grid value or no fixed bkg file
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,fixBack[0])
fixData = G2frame.GPXtree.GetItemPyData(Id)
fixedBkg = {'_fixedVary':False,'_fixedMult':fixBack[1],'_fixedValues':fixData[1][1][xBeg:xFin]}
pwddata[1][4][xBeg:xFin] = G2pwd.getBackground('',parmDict,bakType,dataType,pwddata[1][0][xBeg:xFin],fixedBkg)[0]
except:
pass
# UpdateBackground execution starts here
if len(data) < 2: #add Debye diffuse & peaks scattering here
data.append({'nDebye':0,'debyeTerms':[],'nPeaks':0,'peaksList':[]})
if 'nPeaks' not in data[1]:
data[1].update({'nPeaks':0,'peaksList':[]})
G2frame.dataWindow.currentGrids = []
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.BackMenu)
G2frame.Bind(wx.EVT_MENU,OnBackCopy,id=G2G.wxID_BACKCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackFlagCopy,id=G2G.wxID_BACKFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnBackSave,id=G2G.wxID_BACKSAVE)
G2frame.Bind(wx.EVT_MENU,OnBackLoad,id=G2G.wxID_BACKLOAD)
G2frame.Bind(wx.EVT_MENU,OnPeaksMove,id=G2G.wxID_BACKPEAKSMOVE)
G2frame.Bind(wx.EVT_MENU,OnMakeRDF,id=G2G.wxID_MAKEBACKRDF)
G2frame.Bind(wx.EVT_MENU,OnBkgFit,id=G2frame.dataWindow.wxID_BackPts['Fit'])
G2frame.Bind(wx.EVT_MENU,OnBkgClear,id=G2frame.dataWindow.wxID_BackPts['Clear'])
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Background')
Choices = ['chebyschev','cosine','Q^2 power series','Q^-2 power series','lin interpolate','inv interpolate','log interpolate']
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(BackSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(DebyeSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(PeaksSizer())
mainSizer.Add((0,5),0)
mainSizer.Add(BackFileSizer())
G2frame.dataWindow.SetDataSize()
################################################################################
##### Limits
################################################################################
def UpdateLimitsGrid(G2frame, data,plottype):
'''respond to selection of PWDR Limits data tree item.
'''
def AfterChange(invalid,value,tc):
if invalid: return
plottype = G2frame.GPXtree.GetItemText(G2frame.PatternId)[:4]
wx.CallAfter(G2plt.PlotPatterns,G2frame,newPlot=False,plotType=plottype) #unfortunately this resets the plot width
def LimitSizer():
limits = wx.FlexGridSizer(0,3,0,5)
labels = ['Tmin','Tmax']
for i in [0,1]:
limits.Add(wx.StaticText(G2frame.dataWindow,
label=' Original {} {:.4f}'.format(labels[i],data[0][i])),0,WACV)
limits.Add(wx.StaticText(G2frame.dataWindow,label=' New: '),0,WACV)
limits.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[1],i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
return limits
def ExclSizer():
def OnDelExcl(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
del(data[item+2])
G2plt.PlotPatterns(G2frame,newPlot=False,plotType=plottype)
wx.CallAfter(UpdateLimitsGrid,G2frame,data,plottype)
Indx = {}
excl = wx.FlexGridSizer(0,3,0,5)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' From: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' To: '),0,WACV)
excl.Add(wx.StaticText(G2frame.dataWindow,label=' Delete?: '),0,WACV)
for Id,item in enumerate(data[2:]):
for i in [0,1]:
excl.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,i, \
min=data[0][0],max=data[0][1],nDig=(10,4),typeHint=float,OnLeave=AfterChange))
delExcl = wx.CheckBox(G2frame.dataWindow,label='')
Indx[delExcl.GetId()] = Id
delExcl.Bind(wx.EVT_CHECKBOX,OnDelExcl)
excl.Add(delExcl,0,WACV)
return excl
def OnAddExcl(event):
G2frame.ifGetExclude = True
print ('Add excluded region')
def OnLimitCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy limits from\n'+str(hst[5:])+' to...',
'Copy limits', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Limits'),copy.deepcopy(data))
finally:
dlg.Destroy()
def Draw():
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Data used in refinement'),0,WACV)
mainSizer.Add((5,5))
mainSizer.Add(LimitSizer())
if len(data)>2:
mainSizer.Add((0,5),0)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Excluded regions:'),0,WACV)
mainSizer.Add(ExclSizer())
G2frame.dataWindow.SetDataSize()
G2frame.ifGetExclude = False
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.LimitMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Limits')
G2frame.Bind(wx.EVT_MENU,OnLimitCopy,id=G2G.wxID_LIMITCOPY)
G2frame.Bind(wx.EVT_MENU,OnAddExcl,id=G2G.wxID_ADDEXCLREGION)
Draw()
################################################################################
##### Instrument parameters
################################################################################
def UpdateInstrumentGrid(G2frame,data):
'''respond to selection of PWDR/SASD/REFD Instrument Parameters
data tree item.
'''
if 'Bank' not in data: #get it from name; absent for default parms selection
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
if 'Bank' in hst:
bank = int(hst.split('Bank')[1].split('_')[0])
data['Bank'] = [bank,bank,0]
else:
data['Bank'] = [1,1,0]
def keycheck(keys):
good = []
for key in keys:
if key in ['Type','Bank','U','V','W','X','Y','Z','SH/L','I(L2)/I(L1)','alpha',
'beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','Polariz.',
'Lam','Azimuth','2-theta','fltPath','difC','difA','difB','Zero','Lam1','Lam2']:
good.append(key)
return good
def updateData(inst,ref):
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
for item in data:
try:
data[item] = [data[item][0],inst[item],ref[item]]
except KeyError:
try:
data[item] = [data[item][0],inst[item]]
except KeyError:
pass #skip 'Polariz.' for N-data
def RefreshInstrumentGrid(event,doAnyway=False):
if doAnyway or event.GetRow() == 1:
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
newpeaks = []
for peak in peaks['peaks']:
newpeaks.append(G2mth.setPeakparms(data,Inst2,peak[0],peak[2]))
peaks['peaks'] = newpeaks
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'),peaks)
def OnCalibrate(event):
Pattern = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)
xye = ma.array(ma.getdata(Pattern[1]))
cw = np.diff(xye[0])
IndexPeaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List'))
if not len(IndexPeaks[0]):
G2frame.ErrorDialog('Can not calibrate','Index Peak List empty')
return
if not np.any(IndexPeaks[1]):
G2frame.ErrorDialog('Can not calibrate','Peak positions not refined')
return False
Ok = False
for peak in IndexPeaks[0]:
if peak[2] and peak[3]:
Ok = True
if not Ok:
G2frame.ErrorDialog('Can not calibrate','Index Peak List not indexed')
return
if G2pwd.DoCalibInst(IndexPeaks,data):
UpdateInstrumentGrid(G2frame,data)
XY = []
Sigs = []
for ip,peak in enumerate(IndexPeaks[0]):
if peak[2] and peak[3]:
binwid = cw[np.searchsorted(xye[0],peak[0])]
XY.append([peak[-1],peak[0],binwid])
Sigs.append(IndexPeaks[1][ip])
if len(XY):
XY = np.array(XY)
G2plt.PlotCalib(G2frame,data,XY,Sigs,newPlot=True)
else:
G2frame.ErrorDialog('Can not calibrate','Nothing selected for refinement')
def OnLoad(event):
'''Loads instrument parameters from a G2 .instprm file
in response to the Instrument Parameters-Operations/Load Profile menu
If instprm file has multiple banks each with header #Bank n: ..., this
finds matching bank no. to load - rejects nonmatches.
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[0]
bank = data['Bank'][0]
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
newVals = []
Found = False
while S:
if S[0] == '#':
if Found:
break
if 'Bank' in S:
if bank == int(S.split(':')[0].split()[1]):
S = File.readline()
continue
else:
S = File.readline()
while S and '#Bank' not in S:
S = File.readline()
continue
else: #a non #Bank file
S = File.readline()
continue
Found = True
[item,val] = S[:-1].split(':')
newItems.append(item)
try:
newVals.append(float(val))
except ValueError:
newVals.append(val)
S = File.readline()
File.close()
if Found:
Inst,Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'))
if 'Bank' not in Inst: #patch for old .instprm files - may cause faults for TOF data
Inst['Bank'] = [1,1,0]
data = G2fil.makeInstDict(newItems,newVals,len(newVals)*[False,])
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Instrument Parameters'),[data,Inst2])
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
else:
G2frame.ErrorDialog('No match','Bank %d not in %s'%(bank,filename),G2frame)
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
finally:
dlg.Destroy()
def OnSave(event):
'''Respond to the Instrument Parameters Operations/Save Profile menu
item: writes current parameters to a .instprm file
It does not write Bank n: on # line & thus can be used any time w/o clash of bank nos.
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
File.write("#GSAS-II instrument parameter file; do not add/delete items!\n")
for item in data:
File.write(item+':'+str(data[item][1])+'\n')
File.close()
print ('Instrument parameters saved to: '+filename)
finally:
dlg.Destroy()
def OnSaveAll(event):
'''Respond to the Instrument Parameters Operations/Save all Profile menu & writes
selected inst parms. across multiple banks into a single file
Each block starts with #Bank n: GSAS-II instrument... where n is bank no.
item: writes parameters from selected PWDR entries to a .instprm file
'''
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
histList.insert(0,hst)
saveList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Save instrument parameters from',
'Save instrument parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
saveList.append(histList[i])
finally:
dlg.Destroy()
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
for hist in saveList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in inst: #patch
bank = 1
if 'Bank' in hist:
bank = int(hist.split('Bank')[1])
inst['Bank'] = [bank,bank,0]
bank = inst['Bank'][0]
File.write("#Bank %d: GSAS-II instrument parameter file; do not add/delete items!\n"%(bank))
for item in inst:
File.write(item+':'+str(inst[item][1])+'\n')
File.close()
finally:
dlg.Destroy()
def OnReset(event):
insVal.update(insDef)
updateData(insVal,insRef)
RefreshInstrumentGrid(event,doAnyway=True) #to get peaks updated
UpdateInstrumentGrid(G2frame,data)
G2plt.PlotPeakWidths(G2frame)
def OnInstFlagCopy(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
keys = list(data.keys())
try:
keys.remove('Source')
except ValueError:
pass
flags = dict(zip(keys,[data[key][2] for key in keys]))
instType = data['Type'][0]
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst ref. flags from\n'+hst[5:],
'Copy refinement flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
for item in instData:
if item not in ['Source',]:
instData[item][2] = copy.copy(flags[item])
else:
print (item+' not copied - instrument parameters not commensurate')
def OnInstCopy(event):
#need fix for dictionary
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
copyData = copy.deepcopy(data)
del copyData['Azimuth'] #not to be copied!
instType = data['Type'][0]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy inst params from\n'+hst,
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
instData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Instrument Parameters'))[0]
if 'Bank' not in instData:
instData['Bank'] = [1,1,0]
if len(data) == len(instData) and instType == instData['Type'][0]: #don't mix data types or lam & lam1/lam2 parms!
instData.update(copyData)
else:
print (item+' not copied - instrument parameters not commensurate')
def AfterChange(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
def NewProfile(invalid,value,tc):
if invalid: return
updateData(insVal,insRef)
G2plt.PlotPeakWidths(G2frame)
def OnItemRef(event):
Obj = event.GetEventObject()
item = RefObj[Obj.GetId()]
insRef[item] = Obj.GetValue()
updateData(insVal,insRef)
def OnCopy1Val(event):
'''Select one instrument parameter value to edit and copy to many histograms
optionally allow values to be edited in a table
'''
updateData(insVal,insRef)
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
insVal.update({key:data[key][1] for key in instkeys})
insRef.update({key:data[key][2] for key in instkeys})
wx.CallAfter(MakeParameterWindow)
def lblWdef(lbl,dec,val):
'Label parameter showing the default value'
fmt = "%15."+str(dec)+"f"
return " " + lbl + " (" + (fmt % val).strip() + "): "
def RefineBox(item):
'Define a refine checkbox with binding'
#wid = wx.CheckBox(G2frame.dataWindow,label=' Refine? ')
wid = wx.CheckBox(G2frame.dataWindow,label='')
wid.SetValue(bool(insRef[item]))
RefObj[wid.GetId()] = item
wid.Bind(wx.EVT_CHECKBOX, OnItemRef)
return wid
def OnLamPick(event):
data['Source'][1] = lamType = event.GetEventObject().GetValue()
if 'P' in insVal['Type']:
insVal['Lam1'] = waves[lamType][0]
insVal['Lam2'] = waves[lamType][1]
elif 'S' in insVal['Type'] and 'synch' not in lamType:
insVal['Lam'] = meanwaves[lamType]
updateData(insVal,insRef)
i,j= wx.__version__.split('.')[0:2]
if int(i)+int(j)/10. > 2.8:
pass # repaint crashes wxpython 2.9
wx.CallLater(100, MakeParameterWindow)
#wx.CallAfter(MakeParameterWindow)
else:
wx.CallAfter(MakeParameterWindow)
def MakeParameterWindow():
'Displays the Instrument parameters in the dataWindow frame'
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
instSizer = wx.FlexGridSizer(0,3,5,5)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
if insVal['Bank'] == None: #patch
insVal['Bank'] = 1
text = ' Histogram Type: %s Bank: %d'%(insVal['Type'],insVal['Bank'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,text),0,WACV)
mainSizer.Add(subSizer)
labelLst[:],elemKeysLst[:],dspLst[:],refFlgElem[:] = [],[],[],[]
if 'P' in insVal['Type']: #powder data
[instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt),0,WACV) for txt in [' Name (default)',' Value','Refine?']]
if 'C' in insVal['Type']: #constant wavelength
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Lam1' in insVal:
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Ka1/Ka2: '),0,WACV)
txt = u' %8.6f/%8.6f\xc5'%(insVal['Lam1'],insVal['Lam2'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0)
subSizer.Add(waveSizer,0)
mainSizer.Add(subSizer)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef('I(L2)/I(L1)',4,insDef['I(L2)/I(L1)'])),0,WACV)
key = 'I(L2)/I(L1)'
labelLst.append(key)
elemKeysLst.append([key,1])
dspLst.append([10,4])
refFlgElem.append([key,2])
ratVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(ratVal,0)
instSizer.Add(RefineBox(key),0,WACV)
else: # single wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: '),0,WACV)
txt = '%7.2f'%(insVal['Azimuth'])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
instSizer.Add((5,5),0)
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
instSizer.Add(RefineBox(key),0,WACV)
for item in ['Zero','Polariz.']:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
for item in ['U','V','W','X','Y','Z','SH/L']:
nDig = (10,3)
if item == 'SH/L':
nDig = (10,5)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=NewProfile)
instSizer.Add(itemVal,0,WACV)
instSizer.Add(RefineBox(item),0,WACV)
elif 'T' in insVal['Type']: #time of flight (neutrons)
subSizer = wx.BoxSizer(wx.HORIZONTAL)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Flight path: '),0,WACV)
txt = '%8.3f'%(insVal['fltPath'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('flight path')
elemKeysLst.append(['fltPath',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' 2-theta: '),0,WACV)
txt = '%7.2f'%(insVal['2-theta'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('2-theta')
elemKeysLst.append(['2-theta',1])
dspLst.append([10,2])
refFlgElem.append(None)
if 'Pdabc' in Inst2:
Items = ['sig-0','sig-1','sig-2','sig-q','X','Y','Z']
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' difC: '),0,WACV)
txt = '%8.2f'%(insVal['difC'])
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,txt.strip()),0,WACV)
labelLst.append('difC')
elemKeysLst.append(['difC',1])
dspLst.append([10,2])
refFlgElem.append(None)
subSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' alpha, beta: fixed by table'),0,WACV)
else:
Items = ['difC','difA','difB','Zero','alpha','beta-0','beta-1','beta-q','sig-0','sig-1','sig-2','sig-q','X','Y','Z']
mainSizer.Add((5,5),0)
mainSizer.Add(subSizer)
mainSizer.Add((5,5),0)
for item in Items:
if item == '':
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
instSizer.Add((5,5),0)
continue
nDig = (10,3)
if 'beta' in item:
nDig = (12,6)
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,nDig[1],insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=nDig,typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append(nDig)
refFlgElem.append([item,2])
instSizer.Add(RefineBox(item),0,WACV)
elif 'PKS' in insVal['Type']: #peak positions only
key = 'Lam'
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef[key])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,key,nDig=(10,6),typeHint=float,OnLeave=AfterChange)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append([key,1])
dspLst.append([10,6])
instSizer.Add(waveVal,0,WACV)
refFlgElem.append([key,2])
for item in ['Zero',]:
if item in insDef:
labelLst.append(item)
elemKeysLst.append([item,1])
dspLst.append([10,4])
instSizer.Add(
wx.StaticText(G2frame.dataWindow,-1,lblWdef(item,4,insDef[item])),
0,WACV)
itemVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,item,nDig=(10,4),typeHint=float,OnLeave=AfterChange)
instSizer.Add(itemVal,0,WACV)
refFlgElem.append([item,2])
elif 'S' in insVal['Type']: #single crystal data
if 'C' in insVal['Type']: #constant wavelength
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
waveSizer = wx.BoxSizer(wx.HORIZONTAL)
waveSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Source type: '),0,WACV)
# PATCH?: for now at least, Source is not saved anywhere before here
if 'Source' not in data: data['Source'] = ['CuKa','?']
choice = ['synchrotron','TiKa','CrKa','FeKa','CoKa','CuKa','MoKa','AgKa']
lamPick = wx.ComboBox(G2frame.dataWindow,value=data['Source'][1],choices=choice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
lamPick.Bind(wx.EVT_COMBOBOX, OnLamPick)
waveSizer.Add(lamPick,0,WACV)
instSizer.Add(waveSizer,0,WACV)
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
elif insVal['Type'][0] in ['L','R',]:
if 'C' in insVal['Type']:
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,u' Lam (\xc5): (%10.6f)'%(insDef['Lam'])),
0,WACV)
waveVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,insVal,'Lam',nDig=(10,6),typeHint=float,OnLeave=AfterChange)
instSizer.Add(waveVal,0,WACV)
labelLst.append(u'Lam (\xc5)')
elemKeysLst.append(['Lam',1])
dspLst.append([10,6])
refFlgElem.append(None)
instSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Azimuth: %7.2f'%(insVal['Azimuth'])),0,WACV)
labelLst.append('Azimuth angle')
elemKeysLst.append(['Azimuth',1])
dspLst.append([10,2])
refFlgElem.append(None)
else: #time of flight (neutrons)
pass #for now
mainSizer.Add(instSizer,0)
G2frame.dataWindow.SetDataSize()
# end of MakeParameterWindow
# beginning of UpdateInstrumentGrid code
#patch: make sure all parameter items are lists
patched = 0
for key in data:
if type(data[key]) is tuple:
data[key] = list(data[key])
patched += 1
if patched: print (patched,' instrument parameters changed from tuples')
if 'Z' not in data:
data['Z'] = [0.0,0.0,False]
#end of patch
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
instkeys = keycheck(data.keys())
if 'P' in data['Type'][0]: #powder data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = dict(zip(instkeys,[data[key][2] for key in instkeys]))
if 'NC' in data['Type'][0]:
del(insDef['Polariz.'])
del(insVal['Polariz.'])
del(insRef['Polariz.'])
elif 'S' in data['Type'][0]: #single crystal data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'L' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
elif 'R' in data['Type'][0]: #low angle data
insVal = dict(zip(instkeys,[data[key][1] for key in instkeys]))
insDef = dict(zip(instkeys,[data[key][0] for key in instkeys]))
insRef = {}
RefObj = {}
#These from Intl. Tables C, Table 4.2.2.1, p. 177-179
waves = {'CuKa':[1.54051,1.54433],'TiKa':[2.74841,2.75207],'CrKa':[2.28962,2.29351],
'FeKa':[1.93597,1.93991],'CoKa':[1.78892,1.79278],'MoKa':[0.70926,0.713543],
'AgKa':[0.559363,0.563775]}
# meanwaves computed as (2*Ka1+Ka2)/3
meanwaves = {'CuKa':1.54178,'TiKa':2.74963,'CrKa':2.29092,'FeKa':1.93728,
'CoKa':1.79021,'MoKa':0.71069,'AgKa':0.56083}
Inst2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Instrument Parameters'))[1]
G2gd.SetDataMenuBar(G2frame)
#patch
if 'P' in insVal['Type']: #powder data
if 'C' in insVal['Type']: #constant wavelength
if 'Azimuth' not in insVal:
insVal['Azimuth'] = 0.0
insDef['Azimuth'] = 0.0
insRef['Azimuth'] = False
# if 'T' in insVal['Type']:
# if 'difB' not in insVal:
# insVal['difB'] = 0.0
# insDef['difB'] = 0.0
# insRef['difB'] = False
#end of patch
if 'P' in insVal['Type']: #powder data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.InstMenu)
G2frame.GetStatusBar().SetStatusText('NB: Azimuth is used for polarization only',1)
G2frame.Bind(wx.EVT_MENU,OnCalibrate,id=G2G.wxID_INSTCALIB)
G2frame.Bind(wx.EVT_MENU,OnLoad,id=G2G.wxID_INSTLOAD)
G2frame.Bind(wx.EVT_MENU,OnSave,id=G2G.wxID_INSTSAVE)
G2frame.Bind(wx.EVT_MENU,OnSaveAll,id=G2G.wxID_INSTSAVEALL)
G2frame.Bind(wx.EVT_MENU,OnReset,id=G2G.wxID_INSTPRMRESET)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_INSTCOPY)
G2frame.Bind(wx.EVT_MENU,OnInstFlagCopy,id=G2G.wxID_INSTFLAGCOPY)
G2frame.Bind(wx.EVT_MENU,OnCopy1Val,id=G2G.wxID_INST1VAL)
elif 'L' in insVal['Type'] or 'R' in insVal['Type']: #SASD data menu commands
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SASDInstMenu)
G2frame.Bind(wx.EVT_MENU,OnInstCopy,id=G2G.wxID_SASDINSTCOPY)
MakeParameterWindow()
################################################################################
##### Sample parameters
################################################################################
def UpdateSampleGrid(G2frame,data):
'''respond to selection of PWDR/SASD Sample Parameters
data tree item.
'''
def OnSampleSave(event):
'''Respond to the Sample Parameters Operations/Save menu
item: writes current parameters to a .samprm file
'''
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .samprm
filename = os.path.splitext(filename)[0]+'.samprm'
File = open(filename,'w')
File.write("#GSAS-II sample parameter file\n")
File.write("'Type':'"+str(data['Type'])+"'\n")
File.write("'Gonio. radius':"+str(data['Gonio. radius'])+"\n")
if data.get('InstrName'):
File.write("'InstrName':'"+str(data['InstrName'])+"'\n")
File.close()
finally:
dlg.Destroy()
def OnSampleLoad(event):
'''Loads sample parameters from a G2 .samprm file
in response to the Sample Parameters-Operations/Load menu
Note that similar code is found in ReadPowderInstprm (GSASII.py)
'''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II sample parameters file', pth, '',
'sample parameter files (*.samprm)|*.samprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = {}
while S:
if S[0] == '#':
S = File.readline()
continue
[item,val] = S[:-1].split(':')
newItems[item.strip("'")] = eval(val)
S = File.readline()
File.close()
data.update(newItems)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId,'Sample Parameters'),data)
UpdateSampleGrid(G2frame,data)
finally:
dlg.Destroy()
def OnAllSampleLoad(event):
filename = ''
pth = G2G.GetImportPath(G2frame)
if not pth: pth = '.'
dlg = wx.FileDialog(G2frame, 'Choose multihistogram metadata text file', pth, '',
'metadata file (*.*)|*.*',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
S = File.readline()
newItems = []
itemNames = []
Comments = []
while S:
if S[0] == '#':
Comments.append(S)
S = File.readline()
continue
S = S.replace(',',' ').replace('\t',' ')
Stuff = S[:-1].split()
itemNames.append(Stuff[0])
newItems.append(Stuff[1:])
S = File.readline()
File.close()
finally:
dlg.Destroy()
if not filename:
G2frame.ErrorDialog('Nothing to do','No file selected')
return
dataDict = dict(zip(itemNames,newItems))
ifany = False
Controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))
Names = [' ','Phi','Chi','Omega','Time','Temperature','Pressure']
freeNames = {}
for name in ['FreePrm1','FreePrm2','FreePrm3']:
freeNames[Controls[name]] = name
Names.append(Controls[name])
#import imp
#imp.reload(G2G)
dlg = G2G.G2ColumnIDDialog( G2frame,' Choose multihistogram metadata columns:',
'Select columns',Comments,Names,np.array(newItems).T)
try:
if dlg.ShowModal() == wx.ID_OK:
colNames,newData = dlg.GetSelection()
dataDict = dict(zip(itemNames,newData.T))
for item in colNames:
if item != ' ':
ifany = True
finally:
dlg.Destroy()
if not ifany:
G2frame.ErrorDialog('Nothing to do','No columns identified')
return
histList = [G2frame.GPXtree.GetItemText(G2frame.PatternId),]
histList += GetHistsLikeSelected(G2frame)
colIds = {}
for i,name in enumerate(colNames):
if name != ' ':
colIds[name] = i
for hist in histList:
name = hist.split()[1] #this is file name
newItems = {}
for item in colIds:
key = freeNames.get(item,item)
newItems[key] = float(dataDict[name][colIds[item]])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,hist)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(newItems)
UpdateSampleGrid(G2frame,data)
def OnSetScale(event):
if histName[:4] in ['REFD','PWDR']:
Scale = data['Scale'][0]
dlg = wx.MessageDialog(G2frame,'Rescale data by %.2f?'%(Scale),'Rescale data',wx.OK|wx.CANCEL)
try:
if dlg.ShowModal() == wx.ID_OK:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,histName)
y,w = G2frame.GPXtree.GetItemPyData(pId)[1][1:3]
y *= Scale
w /= Scale**2
data['Scale'][0] = 1.0
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
UpdateSampleGrid(G2frame,data)
return
#SASD rescaliing
histList = []
item, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while item:
name = G2frame.GPXtree.GetItemText(item)
if 'SASD' in name and name != histName:
histList.append(name)
item, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
if not len(histList): #nothing to copy to!
return
dlg = wx.SingleChoiceDialog(G2frame,'Select reference histogram for scaling',
'Reference histogram',histList)
try:
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
refHist = histList[sel]
finally:
dlg.Destroy()
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))
Profile = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1]
Data = [Profile,Limits,data]
refId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,refHist)
refSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Sample Parameters'))
refLimits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,refId, 'Limits'))
refProfile = G2frame.GPXtree.GetItemPyData(refId)[1]
refData = [refProfile,refLimits,refSample]
G2sasd.SetScale(Data,refData)
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
UpdateSampleGrid(G2frame,data)
def OnRescaleAll(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
x0,y0,w0 = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[1][:3]
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
od = {'label_1':'Scaling range min','value_1':0.0,'label_2':'Scaling range max','value_2':10.}
dlg = G2G.G2MultiChoiceDialog(G2frame,
'Do scaling from\n'+str(hst[5:])+' to...','Rescale histograms', histList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
Xmin = od['value_1']
Xmax = od['value_2']
iBeg = np.searchsorted(x0,Xmin)
iFin = np.searchsorted(x0,Xmax)
if iBeg > iFin:
wx.MessageBox('Wrong order for Xmin, Xmax','Error',style=wx.ICON_EXCLAMATION)
else:
sum0 = np.sum(y0[iBeg:iFin])
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
xi,yi,wi = G2frame.GPXtree.GetItemPyData(Id)[1][:3]
sumi = np.sum(yi[iBeg:iFin])
if sumi:
Scale = sum0/sumi
yi *= Scale
wi /= Scale**2
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=histName[:4],newPlot=True)
def OnSampleCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'],
addNames = ['Omega','Chi','Phi','Gonio. radius','InstrName'])
copyDict = {}
for parm in copyNames:
copyDict[parm] = data[parm]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
def OnSampleCopySelected(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
# Assemble a list of item labels
TextTable = {key:label for key,label,dig in
SetupSampleLabels(hst,data.get('Type'),Inst['Type'][0])}
# get flexible labels
TextTable.update({key:Controls[key] for key in Controls if key.startswith('FreePrm')})
# add a few extra
TextTable.update({'Type':'Diffractometer type','InstrName':'Instrument Name',})
# Assemble a list of dict entries that would be labeled in the Sample
# params data window (drop ranId and items not used).
keyList = [i for i in data.keys() if i in TextTable]
keyText = [TextTable[i] for i in keyList]
# sort both lists together, ordered by keyText
keyText, keyList = zip(*sorted(list(zip(keyText,keyList)))) # sort lists
selectedKeys = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Select which sample parameters\nto copy',
'Select sample parameters', keyText)
try:
if dlg.ShowModal() == wx.ID_OK:
selectedKeys = [keyList[i] for i in dlg.GetSelections()]
finally:
dlg.Destroy()
if not selectedKeys: return # nothing to copy
copyDict = {}
for parm in selectedKeys:
copyDict[parm] = data[parm]
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample params from\n'+str(hst[5:])+' to...',
'Copy sample parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
sampleData.update(copy.deepcopy(copyDict))
finally:
dlg.Destroy()
G2plt.PlotPatterns(G2frame,plotType=hst[:4],newPlot=False)
def OnSampleFlagCopy(event):
histType,copyNames = SetCopyNames(histName,data['Type'])
flagDict = {}
for parm in copyNames:
flagDict[parm] = data[parm][1]
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample ref. flags from\n'+str(hst[5:])+' to...',
'Copy sample flags', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
sampleData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Sample Parameters'))
for name in copyNames:
sampleData[name][1] = copy.copy(flagDict[name])
finally:
dlg.Destroy()
def OnHistoChange():
'''Called when the histogram type is changed to refresh the window
'''
#wx.CallAfter(UpdateSampleGrid,G2frame,data)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
def SetNameVal():
inst = instNameVal.GetValue()
data['InstrName'] = inst.strip()
def OnNameVal(event):
event.Skip()
wx.CallAfter(SetNameVal)
def AfterChange(invalid,value,tc):
if invalid:
return
if tc.key == 0 and 'SASD' in histName: #a kluge for Scale!
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
elif tc.key == 'Thick':
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnMaterial(event):
Obj = event.GetEventObject()
Id = Info[Obj.GetId()]
data['Materials'][Id]['Name'] = Obj.GetValue()
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnVolFrac(invalid,value,tc):
Id = Info[tc.GetId()]
data['Materials'][not Id][key] = 1.-value
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def OnCopy1Val(event):
'Select one value to copy to many histograms and optionally allow values to be edited in a table'
G2G.SelectEdit1Var(G2frame,data,labelLst,elemKeysLst,dspLst,refFlgElem)
wx.CallAfter(UpdateSampleGrid,G2frame,data)
def SearchAllComments(value,tc,*args,**kwargs):
'''Called when the label for a FreePrm is changed: the comments for all PWDR
histograms are searched for a "label=value" pair that matches the label (case
is ignored) and the values are then set to this value, if it can be converted
to a float.
'''
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
name = G2frame.GPXtree.GetItemText(Id)
if 'PWDR' in name:
Comments = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Comments'))
Sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'Sample Parameters'))
for i,item in enumerate(Comments):
itemSp = item.split('=')
if value.lower() == itemSp[0].lower():
try:
Sample[tc.key] = float(itemSp[1])
except:
print('"{}" has an invalid value in Comments from {}'
.format(item.strip(),name))
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
wx.CallLater(100,UpdateSampleGrid,G2frame,data)
######## DEBUG #######################################################
#import GSASIIpwdGUI
#reload(GSASIIpwdGUI)
#reload(G2gd)
######################################################################
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
histName = G2frame.GPXtree.GetItemText(G2frame.PatternId)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SampleMenu)
#G2frame.SetLabel(G2frame.GetLabel().split('||')[0]+' || '+'Sample Parameters')
G2frame.Bind(wx.EVT_MENU, OnSetScale, id=G2G.wxID_SETSCALE)
G2frame.Bind(wx.EVT_MENU, OnSampleCopy, id=G2G.wxID_SAMPLECOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleCopySelected, id=G2G.wxID_SAMPLECOPYSOME)
G2frame.Bind(wx.EVT_MENU, OnSampleFlagCopy, id=G2G.wxID_SAMPLEFLAGCOPY)
G2frame.Bind(wx.EVT_MENU, OnSampleSave, id=G2G.wxID_SAMPLESAVE)
G2frame.Bind(wx.EVT_MENU, OnSampleLoad, id=G2G.wxID_SAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnCopy1Val, id=G2G.wxID_SAMPLE1VAL)
G2frame.Bind(wx.EVT_MENU, OnAllSampleLoad, id=G2G.wxID_ALLSAMPLELOAD)
G2frame.Bind(wx.EVT_MENU, OnRescaleAll, id=G2G.wxID_RESCALEALL)
if histName[:4] in ['SASD','REFD','PWDR']:
G2frame.dataWindow.SetScale.Enable(True)
Controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
#patch
if 'ranId' not in data:
data['ranId'] = ran.randint(0,sys.maxsize)
if not 'Gonio. radius' in data:
data['Gonio. radius'] = 200.0
if not 'Omega' in data:
data.update({'Omega':0.0,'Chi':0.0,'Phi':0.0})
if 'Azimuth' not in data:
data['Azimuth'] = 0.0
if type(data['Temperature']) is int:
data['Temperature'] = float(data['Temperature'])
if 'Time' not in data:
data['Time'] = 0.0
if 'FreePrm1' not in Controls:
Controls['FreePrm1'] = 'Sample humidity (%)'
if 'FreePrm2' not in Controls:
Controls['FreePrm2'] = 'Sample voltage (V)'
if 'FreePrm3' not in Controls:
Controls['FreePrm3'] = 'Applied load (MN)'
if 'FreePrm1' not in data:
data['FreePrm1'] = 0.
if 'FreePrm2' not in data:
data['FreePrm2'] = 0.
if 'FreePrm3' not in data:
data['FreePrm3'] = 0.
if 'SurfRoughA' not in data and 'PWDR' in histName:
data['SurfRoughA'] = [0.,False]
data['SurfRoughB'] = [0.,False]
if 'Trans' not in data and 'SASD' in histName:
data['Trans'] = 1.0
if 'SlitLen' not in data and 'SASD' in histName:
data['SlitLen'] = 0.0
if 'Shift' not in data:
data['Shift'] = [0.0,False]
if 'Transparency' not in data:
data['Transparency'] = [0.0,False]
data['InstrName'] = data.get('InstrName','')
#patch end
labelLst,elemKeysLst,dspLst,refFlgElem = [],[],[],[]
parms = SetupSampleLabels(histName,data.get('Type'),Inst['Type'][0])
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Sample and Experimental Parameters'))
# add help button to bring up help web page
helpkey = G2frame.dataWindow.helpKey
topSizer.Add((30,-1))
topSizer.Add(G2G.HelpButton(G2frame.dataWindow,helpIndex=helpkey))
topSizer.Add((-1,-1),1,WACV|wx.EXPAND)
mainSizer.Add(topSizer,0,WACV|wx.EXPAND)
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Instrument Name '),0,WACV)
nameSizer.Add((-1,-1),1,WACV)
instNameVal = wx.TextCtrl(G2frame.dataWindow,wx.ID_ANY,data['InstrName'],
size=(200,-1),style=wx.TE_PROCESS_ENTER)
nameSizer.Add(instNameVal)
instNameVal.Bind(wx.EVT_CHAR,OnNameVal)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
labelLst.append('Instrument Name')
elemKeysLst.append(['InstrName'])
dspLst.append(None)
refFlgElem.append(None)
if 'PWDR' in histName:
nameSizer = wx.BoxSizer(wx.HORIZONTAL)
nameSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,' Diffractometer type: '),
0,WACV)
if 'T' in Inst['Type'][0]:
choices = ['Debye-Scherrer',]
else:
choices = ['Debye-Scherrer','Bragg-Brentano',]
histoType = G2G.G2ChoiceButton(G2frame.dataWindow,choices,
strLoc=data,strKey='Type',
onChoice=OnHistoChange)
nameSizer.Add(histoType)
mainSizer.Add(nameSizer,0,WACV)
mainSizer.Add((5,5),0)
parmSizer = wx.FlexGridSizer(0,2,5,0)
for key,lbl,nDig in parms:
labelLst.append(lbl.strip().strip(':').strip())
dspLst.append(nDig)
if 'list' in str(type(data[key])):
parmRef = G2G.G2CheckBox(G2frame.dataWindow,' '+lbl,data[key],1)
parmSizer.Add(parmRef,0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data[key],0,
nDig=nDig,typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key,0])
refFlgElem.append([key,1])
else:
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' '+lbl),
0,WACV|wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,
typeHint=float,OnLeave=AfterChange)
elemKeysLst.append([key])
refFlgElem.append(None)
parmSizer.Add(parmVal,0,WACV)
Info = {}
for key in ('FreePrm1','FreePrm2','FreePrm3'):
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,Controls,key,typeHint=str,
notBlank=False,OnLeave=SearchAllComments)
parmSizer.Add(parmVal,1,wx.EXPAND)
parmVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,key,typeHint=float)
parmSizer.Add(parmVal,0,WACV)
labelLst.append(Controls[key])
dspLst.append(None)
elemKeysLst.append([key])
refFlgElem.append(None)
mainSizer.Add(parmSizer,0)
mainSizer.Add((0,5),0)
if histName[:4] in ['SASD',]:
rho = [0.,0.]
anomrho = [0.,0.]
mu = 0.
subSizer = wx.FlexGridSizer(0,4,5,5)
Substances = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Substances'))
for Id,item in enumerate(data['Materials']):
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Material: '),0,WACV)
matsel = wx.ComboBox(G2frame.dataWindow,value=item['Name'],choices=list(Substances['Substances'].keys()),
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Info[matsel.GetId()] = Id
matsel.Bind(wx.EVT_COMBOBOX,OnMaterial)
subSizer.Add(matsel,0,WACV)
subSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Volume fraction: '),0,WACV)
volfrac = G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,'VolFrac',
min=0.,max=1.,nDig=(10,3),typeHint=float,OnLeave=OnVolFrac)
subSizer.Add(volfrac,0,WACV)
try:
material = Substances['Substances'][item['Name']]
except KeyError:
print('ERROR - missing substance: '+item['Name'])
material = Substances['Substances']['vacuum']
mu += item['VolFrac']*material.get('XAbsorption',0.)
rho[Id] = material['Scatt density']
anomrho[Id] = material.get('XAnom density',0.)
data['Contrast'] = [(rho[1]-rho[0])**2,(anomrho[1]-anomrho[0])**2]
mainSizer.Add(subSizer,0)
conSizer = wx.BoxSizer(wx.HORIZONTAL)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Contrast: %10.2f '%(data['Contrast'][0])),0,WACV)
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Anom. Contrast: %10.2f '%(data['Contrast'][1])),0,WACV)
mut = mu*data['Thick']
conSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Transmission (calc): %10.3f '%(np.exp(-mut))),0,WACV)
mainSizer.Add(conSizer,0)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Indexing Peaks
################################################################################
def UpdateIndexPeaksGrid(G2frame, data):
'''respond to selection of PWDR Index Peak List data
tree item.
'''
bravaisSymb = ['Fm3m','Im3m','Pm3m','R3-H','P6/mmm','I4/mmm',
'P4/mmm','Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm','C2/m','P2/m','C1','P1']
IndexId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List')
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
limitId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits')
Limits = G2frame.GPXtree.GetItemPyData(limitId)
def RefreshIndexPeaksGrid(event):
r,c = event.GetRow(),event.GetCol()
peaks = G2frame.IndexPeaksTable.GetData()
if c == 2:
peaks[r][c] = not peaks[r][c]
G2frame.IndexPeaksTable.SetData(peaks)
G2frame.indxPeaks.ForceRefresh()
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame,plotType='PWDR')
def OnReload(event):
peaks = []
sigs = []
Peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Peak List'))
for ip,peak in enumerate(Peaks['peaks']):
dsp = G2lat.Pos2dsp(Inst,peak[0])
peaks.append([peak[0],peak[2],True,False,0,0,0,dsp,0.0]) #SS?
try:
sig = Peaks['sigDict']['pos'+str(ip)]
except KeyError:
sig = 0.
sigs.append(sig)
data = [peaks,sigs]
G2frame.GPXtree.SetItemPyData(IndexId,data)
UpdateIndexPeaksGrid(G2frame,data)
def OnSave(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose Index peaks csv file', pth, '',
'indexing peaks file (*.csv)|*.csv',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
filename = os.path.splitext(filename)[0]+'.csv'
File = open(filename,'w')
names = 'h,k,l,position,intensity,d-Obs,d-calc\n'
File.write(names)
fmt = '%d,%d,%d,%.4f,%.1f,%.5f,%.5f\n'
for refl in data[0]:
if refl[3]:
File.write(fmt%(refl[4],refl[5],refl[6],refl[0],refl[1],refl[7],refl[8]))
File.close()
finally:
dlg.Destroy()
def KeyEditPickGrid(event):
colList = G2frame.indxPeaks.GetSelectedCols()
data = G2frame.GPXtree.GetItemPyData(IndexId)
if event.GetKeyCode() == wx.WXK_RETURN:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_CONTROL:
event.Skip(True)
elif event.GetKeyCode() == wx.WXK_SHIFT:
event.Skip(True)
elif colList:
G2frame.indxPeaks.ClearSelection()
key = event.GetKeyCode()
for col in colList:
if G2frame.IndexPeaksTable.GetColLabelValue(col) in ['use',]:
if key == 89: #'Y'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=True
elif key == 78: #'N'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col]=False
elif key == 83: # 'S'
for row in range(G2frame.IndexPeaksTable.GetNumberRows()): data[0][row][col] = not data[0][row][col]
if 'PWD' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.IndPeaksMenu)
G2frame.Bind(wx.EVT_MENU, OnReload, id=G2G.wxID_INDXRELOAD)
G2frame.Bind(wx.EVT_MENU, OnSave, id=G2G.wxID_INDEXSAVE)
G2frame.dataWindow.IndexPeaks.Enable(False)
G2frame.IndexPeaksTable = []
if len(data[0]):
G2frame.dataWindow.IndexPeaks.Enable(True)
Unit = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List'))
if Unit:
if len(Unit) == 4: #patch
Unit.append({})
if len(Unit) == 5: #patch
Unit.append({})
controls,bravais,cellist,dmin,ssopt,magcells = Unit
if 'T' in Inst['Type'][0]: #TOF - use other limit!
dmin = G2lat.Pos2dsp(Inst,Limits[1][0])
else:
dmin = G2lat.Pos2dsp(Inst,Limits[1][1])
G2frame.HKL = []
if ssopt.get('Use',False):
cell = controls[6:12]
A = G2lat.cell2A(cell)
ibrav = bravaisSymb.index(controls[5])
spc = controls[13]
SGData = G2spc.SpcGroup(spc)[1]
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
Vec = ssopt['ModVec']
maxH = ssopt['maxH']
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A)
G2frame.HKL = np.array(G2frame.HKL)
data[0] = G2indx.IndexSSPeaks(data[0],G2frame.HKL)[1]
else: #select cell from table - no SS
for i,cell in enumerate(cellist):
if cell[-2]:
ibrav = cell[2]
A = G2lat.cell2A(cell[3:9])
G2frame.HKL = G2lat.GenHBravais(dmin,ibrav,A)
for hkl in G2frame.HKL:
hkl.insert(4,G2lat.Dsp2pos(Inst,hkl[3]))
G2frame.HKL = np.array(G2frame.HKL)
data[0] = G2indx.IndexPeaks(data[0],G2frame.HKL)[1]
break
rowLabels = []
for i in range(len(data[0])): rowLabels.append(str(i+1))
colLabels = ['position','intensity','use','indexed','h','k','l','d-obs','d-calc']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_FLOAT+':10,1',]+2*[wg.GRID_VALUE_BOOL,]+ \
3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_FLOAT+':10,5',]
if len(data[0]) and len(data[0][0]) > 9:
colLabels = ['position','intensity','use','indexed','h','k','l','m','d-obs','d-calc']
Types = [wg.GRID_VALUE_FLOAT+':10,4',wg.GRID_VALUE_FLOAT+':10,1',]+2*[wg.GRID_VALUE_BOOL,]+ \
4*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_FLOAT+':10,5',]
G2frame.GPXtree.SetItemPyData(IndexId,data)
G2frame.IndexPeaksTable = G2G.Table(data[0],rowLabels=rowLabels,colLabels=colLabels,types=Types)
G2frame.dataWindow.currentGrids = []
G2frame.indxPeaks = G2G.GSGrid(parent=G2frame.dataWindow)
G2frame.indxPeaks.SetTable(G2frame.IndexPeaksTable, True)
G2frame.indxPeaks.SetScrollRate(10,10)
XY = []
Sigs = []
for r in range(G2frame.indxPeaks.GetNumberRows()):
for c in range(G2frame.indxPeaks.GetNumberCols()):
if c == 2:
G2frame.indxPeaks.SetReadOnly(r,c,isReadOnly=False)
else:
G2frame.indxPeaks.SetReadOnly(r,c,isReadOnly=True)
if data[0][r][2] and data[0][r][3]:
XY.append([data[0][r][-1],data[0][r][0]])
try:
sig = data[1][r]
except IndexError:
sig = 0.
Sigs.append(sig)
G2frame.indxPeaks.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, RefreshIndexPeaksGrid)
G2frame.indxPeaks.Bind(wx.EVT_KEY_DOWN, KeyEditPickGrid)
G2frame.indxPeaks.AutoSizeColumns(False)
if len(XY):
XY = np.array(XY)
G2plt.PlotCalib(G2frame,Inst,XY,Sigs,newPlot=True)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(G2frame.indxPeaks,0,wx.ALL|wx.EXPAND,1)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Unit cells
################################################################################
def UpdateUnitCellsGrid(G2frame, data):
'''respond to selection of PWDR Unit Cells data tree item.
'''
G2frame.ifGetExclude = False
UnitCellsId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
SPGlist = G2spc.spglist
bravaisSymb = ['Fm3m','Im3m','Pm3m','R3-H','P6/mmm','I4/mmm','P4/mmm',
'Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm','I2/m','C2/m','P2/m','P1','C1']
spaceGroups = ['F m 3 m','I m 3 m','P m 3 m','R 3 m','P 6/m m m','I 4/m m m',
'P 4/m m m','F m m m','I m m m','A m m m','B m m m','C m m m','P m m m','I 2/m','C 2/m','P 2/m','P -1','C -1']
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))[1]
if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:
wave = G2mth.getWave(Inst)
dmin = G2lat.Pos2dsp(Inst,Limits[1])
else:
difC = Inst['difC'][1]
dmin = G2lat.Pos2dsp(Inst,Limits[0])
def SetLattice(controls):
ibrav = bravaisSymb.index(controls[5])
if controls[5] in ['Fm3m','Im3m','Pm3m']:
controls[7] = controls[8] = controls[6]
controls[9] = controls[10] = controls[11] = 90.
elif controls[5] in ['R3m','P6/mmm','I4/mmm','P4/mmm']:
controls[7] = controls[6]
controls[9] = controls[10] = controls[11] = 90.
if controls[5] in ['R3-H','P6/mmm']:
controls[11] = 120.
elif controls[5] in ['Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm']:
controls[9] = controls[10] = controls[11] = 90.
elif controls[5] in ['C2/m','P2/m','I2/m']:
controls[9] = controls[11] = 90. # b unique
controls[12] = G2lat.calc_V(G2lat.cell2A(controls[6:12]))
return ibrav
def OnNcNo(event):
controls[2] = NcNo.GetValue()
def OnIfX20(event):
G2frame.ifX20 = x20.GetValue()
def OnBravais(event):
Obj = event.GetEventObject()
bravais[bravList.index(Obj.GetId())] = Obj.GetValue()
# wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnZeroVar(event):
controls[0] = zeroVar.GetValue()
def OnSSopt(event):
if controls[5] in ['Fm3m','Im3m','Pm3m']:
SSopt.SetValue(False)
G2frame.ErrorDialog('Cubic lattice','Incommensurate superlattice not possible with a cubic lattice')
return
ssopt['Use'] = SSopt.GetValue()
if 'ssSymb' not in ssopt:
ssopt.update({'ssSymb':'(abg)','ModVec':[0.1,0.1,0.1],'maxH':1})
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnSelMG(event):
ssopt['ssSymb'] = selMG.GetValue()
Vec = ssopt['ModVec']
modS = G2spc.splitSSsym(ssopt['ssSymb'])[0]
ssopt['ModVec'] = G2spc.SSGModCheck(Vec,modS)[0]
print (' Selecting: '+controls[13]+ssopt['ssSymb']+ 'maxH:'+str(ssopt['maxH']))
OnHklShow(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnModVal(invalid,value,tc):
OnHklShow(tc.event)
def OnMoveMod(event):
Obj = event.GetEventObject()
ObjId = Obj.GetId()
Id,valObj = Indx[ObjId]
move = Obj.GetValue()*0.01
Obj.SetValue(0)
value = min(0.98,max(-0.98,float(valObj.GetValue())+move))
valObj.SetValue('%.4f'%(value))
ssopt['ModVec'][Id] = value
OnHklShow(event)
def OnMaxMH(event):
ssopt['maxH'] = int(maxMH.GetValue())
print (' Selecting: '+controls[13]+ssopt['ssSymb']+'maxH:'+str(ssopt['maxH']))
OnHklShow(event)
def OnButton(xpos,ypos):
modSym = ssopt['ssSymb'].split(')')[0]+')'
if modSym in ['(a0g)','(a1/2g)']:
ssopt['ModVec'][0] = xpos
ssopt['ModVec'][2] = ypos
elif modSym in ['(0bg)','(1/2bg)']:
ssopt['ModVec'][1] = xpos
ssopt['ModVec'][2] = ypos
elif modSym in ['(ab0)','(ab1/2)']:
ssopt['ModVec'][0] = xpos
ssopt['ModVec'][1] = ypos
vec = ssopt['ModVec']
print(' Trying: %s %s modulation vector = %.3f %.3f %.3f'%(controls[13],ssopt['ssSymb'],vec[0],vec[1],vec[2]))
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnFindOneMV(event):
Peaks = np.copy(peaks[0])
print (' Trying: ',controls[13],ssopt['ssSymb'], ' maxH: 1')
dlg = wx.ProgressDialog('Elapsed time','Modulation vector search',
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE)
try:
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg)
if len(result[0]) == 2:
G2plt.PlotXYZ(G2frame,result[2],1./result[3],labelX='a',labelY='g',
newPlot=True,Title='Modulation vector search',buttonHandler=OnButton)
finally:
dlg.Destroy()
OnHklShow(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnFindMV(event):
best = 1.
bestSS = ''
for ssSym in ssChoice:
ssopt['ssSymb'] = ssSym
Peaks = np.copy(peaks[0])
ssopt['ModVec'] = G2spc.SSGModCheck(ssopt['ModVec'],G2spc.splitSSsym(ssSym)[0],True)[0]
print (' Trying: '+controls[13]+ssSym+ ' maxH: 1')
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg=None)
OnHklShow(event)
if result[1] < best:
bestSS = ssSym
best = result[1]
ssopt['ssSymb'] = bestSS
ssopt['ModVec'],result = G2indx.findMV(Peaks,controls,ssopt,Inst,dlg=None)
if len(result[0]) == 2:
G2plt.PlotXYZ(G2frame,result[2],1./result[3],labelX='a',labelY='g',
newPlot=True,Title='Modulation vector search')
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnBravSel(event):
brav = bravSel.GetString(bravSel.GetSelection())
controls[5] = brav
controls[13] = SPGlist[brav][0]
ssopt['Use'] = False
wx.CallLater(100,UpdateUnitCellsGrid,G2frame,data)
def OnSpcSel(event):
controls[13] = spcSel.GetString(spcSel.GetSelection())
ssopt['SGData'] = G2spc.SpcGroup(controls[13])[1]
ssopt['Use'] = False
G2frame.dataWindow.RefineCell.Enable(True)
OnHklShow(event)
wx.CallLater(100,UpdateUnitCellsGrid,G2frame,data)
def SetCellValue(Obj,ObjId,value):
if controls[5] in ['Fm3m','Im3m','Pm3m']:
controls[6] = controls[7] = controls[8] = value
controls[9] = controls[10] = controls[11] = 90.0
Obj.SetValue(controls[6])
elif controls[5] in ['R3-H','P6/mmm','I4/mmm','P4/mmm']:
if ObjId == 0:
controls[6] = controls[7] = value
Obj.SetValue(controls[6])
else:
controls[8] = value
Obj.SetValue(controls[8])
controls[9] = controls[10] = controls[11] = 90.0
if controls[5] in ['R3-H','P6/mmm']:
controls[11] = 120.
elif controls[5] in ['Fmmm','Immm','Cmmm','Pmmm']:
controls[6+ObjId] = value
Obj.SetValue(controls[6+ObjId])
controls[9] = controls[10] = controls[11] = 90.0
elif controls[5] in ['I2/m','C2/m','P2/m']:
controls[9] = controls[11] = 90.0
if ObjId != 3:
controls[6+ObjId] = value
Obj.SetValue(controls[6+ObjId])
else:
controls[10] = value
Obj.SetValue(controls[10])
else:
controls[6+ObjId] = value
if ObjId < 3:
Obj.SetValue(controls[6+ObjId])
else:
Obj.SetValue(controls[6+ObjId])
controls[12] = G2lat.calc_V(G2lat.cell2A(controls[6:12]))
volVal.SetValue("%.3f"%(controls[12]))
def OnMoveCell(event):
Obj = event.GetEventObject()
ObjId = cellList.index(Obj.GetId())
valObj = valDict[Obj.GetId()]
inc = float(shiftChoices[shiftSel.GetSelection()][:-1])
move = Obj.GetValue() # +1 or -1
Obj.SetValue(0)
value = float(valObj.GetValue()) * (1. + move*inc/100.)
SetCellValue(valObj,ObjId//2,value)
OnHklShow(event)
def OnExportCells(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose Indexing Result csv file', pth, '',
'indexing result file (*.csv)|*.csv',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
filename = os.path.splitext(filename)[0]+'.csv'
File = open(filename,'w')
names = 'M20,X20,Bravais,a,b,c,alpha,beta,gamma,volume\n'
File.write(names)
fmt = '%.2f,%d,%s,%.4f,%.4f,%.4f,%.2f,%.2f,%.2f,%.3f\n'
for cell in cells:
File.write(fmt%(cell[0],cell[1],bravaisSymb[cell[2]], cell[3],cell[4],cell[5], cell[6],cell[7],cell[8],cell[9]))
File.close()
finally:
dlg.Destroy()
def OnCellChange(invalid,value,tc):
if invalid:
return
SetCellValue(tc,Info[tc.GetId()],value)
OnHklShow(tc.event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnHklShow(event):
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Index Peak List'))
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'))
# recompute dmin in case limits were changed
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))[1]
if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:
dmin = G2lat.Pos2dsp(Inst,Limits[1])
else:
dmin = G2lat.Pos2dsp(Inst,Limits[0])
cell = controls[6:12]
A = G2lat.cell2A(cell)
spc = controls[13]
SGData = ssopt.get('SGData',G2spc.SpcGroup(spc)[1])
Symb = SGData['SpGrp']
M20 = X20 = 0.
if ssopt.get('Use',False) and ssopt.get('ssSymb',''):
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
if SSGData is None:
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'][:-1])[1] #skip trailing 's' for mag.
Symb = SSGData['SSpGrp']
Vec = ssopt['ModVec']
maxH = ssopt['maxH']
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,Vec,maxH,A)
if len(peaks[0]):
peaks = [G2indx.IndexSSPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #keep esds from peak fit
M20,X20 = G2indx.calc_M20SS(peaks[0],G2frame.HKL)
else:
G2frame.HKL = G2pwd.getHKLpeak(dmin,SGData,A,Inst)
if len(peaks[0]):
peaks = [G2indx.IndexPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #keep esds from peak fit
M20,X20 = G2indx.calc_M20(peaks[0],G2frame.HKL)
G2frame.HKL = np.array(G2frame.HKL)
if len(G2frame.HKL):
print (' new M20,X20: %.2f %d, fraction found: %.3f for %s' \
%(M20,X20,float(len(peaks[0]))/len(G2frame.HKL),Symb))
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Index Peak List'),peaks)
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame)
def OnSortCells(event):
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(UnitCellsId)
c = event.GetCol()
if colLabels[c] == 'M20':
cells = G2indx.sortM20(cells)
elif colLabels[c] in ['X20','Bravais','a','b','c','alpha','beta','gamma','Volume']:
if c == 1:
c += 1 #X20 before Use
cells = G2indx.sortCells(cells,c-1) #an extra column (Use) not in cells
else:
return
data = [controls,bravais,cells,dmin,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def CopyUnitCell(event):
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(UnitCellsId)
controls = controls[:5]+10*[0.,]
if len(cells):
for Cell in cells:
if Cell[-2]:
break
cell = Cell[2:9]
controls[4] = 1
controls[5] = bravaisSymb[cell[0]]
controls[6:13] = cell[1:8]
controls[13] = spaceGroups[bravaisSymb.index(controls[5])]
G2frame.dataWindow.RefineCell.Enable(True)
elif magcells:
for phase in magcells:
if phase['Use']:
break
SGData = phase['SGData']
controls[4] = 1
controls[5] = (SGData['SGLatt']+SGData['SGLaue']).replace('-','')
if controls[5][1:] == 'm3': controls[5] += 'm'
if 'P3' in controls[5] or 'P-3' in controls[5]: controls[5] = 'P6/mmm'
if 'R' in controls[5]: controls[5] = 'R3-H'
controls[6:13] = phase['Cell']
controls[13] = SGData['SpGrp']
ssopt['SGData'] = SGData
data = [controls,bravais,cells,dminx,ssopt,magcells]
G2frame.dataWindow.RunSubGroups.Enable(True)
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def LoadUnitCell(event):
UnitCellsId = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
data = G2frame.GPXtree.GetItemPyData(UnitCellsId)
if len(data) < 5:
data.append({})
controls,bravais,cells,dminx,ssopt = data[:5]
magcells = [] #clear away old mag cells list (if any)
controls = controls[:14]+[['0','0','0',' ',' ',' '],[],]
data = controls,bravais,cells,dminx,ssopt,magcells
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Phases')
if not pId: return
Phases = []
item, cookie = G2frame.GPXtree.GetFirstChild(pId)
while item:
pName = G2frame.GPXtree.GetItemText(item)
Phase = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,pId,pName))
if not Phase['General']['SGData'].get('SGFixed',False):
Phases.append(G2frame.GPXtree.GetItemText(item))
item, cookie = G2frame.GPXtree.GetNextChild(pId, cookie)
if not len(Phases):
wx.MessageBox('NB: Magnetic phases from mcif files are not suitable for this purpose,\n because of space group symbol - operators mismatches',
caption='No usable space groups',style=wx.ICON_EXCLAMATION)
return
pNum = G2G.ItemSelector(Phases,G2frame,'Select phase',header='Phase')
if pNum is None: return
Phase = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,pId,Phases[pNum]))
Phase['magPhases'] = G2frame.GPXtree.GetItemText(G2frame.PatternId) #use as reference for recovering possible phases
Cell = Phase['General']['Cell']
SGData = Phase['General']['SGData']
if 'SGGray' not in SGData:
SGData['SGGray'] = False
if Phase['General']['Type'] == 'nuclear' and 'MagSpGrp' in SGData:
SGData = G2spc.SpcGroup(SGData['SpGrp'])[1]
G2frame.dataWindow.RunSubGroups.Enable(True)
ssopt.update({'Use':False,'ssSymb':'(abg)','ModVec':[0.1,0.1,0.1],'maxH':1})
if 'SuperSg' in Phase['General'] or SGData.get('SGGray',False):
ssopt.update({'SGData':SGData,'ssSymb':Phase['General']['SuperSg'],'ModVec':Phase['General']['SuperVec'][0],'Use':True,'maxH':1})
ssopt['ssSymb'] = ssopt['ssSymb'].replace(',','')
ssSym = ssopt['ssSymb']
if SGData.get('SGGray',False):
ssSym = ssSym[:-1]
if ssSym not in G2spc.SSChoice(SGData):
ssSym = ssSym.split(')')[0]+')000'
ssopt['ssSymb'] = ssSym
wx.MessageBox('Super space group '+SGData['SpGrp']+ssopt['ssSymb']+' not valid;\n It is set to '+ssSym,
caption='Unusable super space group',style=wx.ICON_EXCLAMATION)
G2frame.dataWindow.RunSubGroups.Enable(False)
SpGrp = SGData['SpGrp']
if 'mono' in SGData['SGSys']:
SpGrp = G2spc.fixMono(SpGrp)
if SpGrp == None:
wx.MessageBox('Monoclinic '+SGData['SpGrp']+' not usable here',caption='Unusable space group',style=wx.ICON_EXCLAMATION)
return
controls[13] = SpGrp
controls[4] = 1
controls[5] = (SGData['SGLatt']+SGData['SGLaue']).replace('-','')
if controls[5][1:] == 'm3': controls[5] += 'm'
if 'P3' in controls[5] or 'P-3' in controls[5]: controls[5] = 'P6/mmm'
if 'R' in controls[5]: controls[5] = 'R3-H'
controls[6:13] = Cell[1:8]
cx,ct,cs,cia = Phase['General']['AtomPtrs']
controls[15] = [atom[:cx+3] for atom in Phase['Atoms']]
if 'N' in Inst['Type'][0]:
if not ssopt.get('Use',False):
G2frame.dataWindow.RunSubGroupsMag.Enable(True)
# G2frame.dataWindow.RunSubGroups.Enable(True)
data = controls,bravais,cells,dminx,ssopt,magcells
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
G2frame.dataWindow.RefineCell.Enable(True)
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def ImportUnitCell(event):
controls,bravais,cells,dminx,ssopt = G2frame.GPXtree.GetItemPyData(UnitCellsId)[:5]
reqrdr = G2frame.dataWindow.ReImportMenuId.get(event.GetId())
rdlist = G2frame.OnImportGeneric(reqrdr,
G2frame.ImportPhaseReaderlist,'phase')
if len(rdlist) == 0: return
rd = rdlist[0]
Cell = rd.Phase['General']['Cell']
SGData = rd.Phase['General']['SGData']
if '1 1' in SGData['SpGrp']:
wx.MessageBox('Unusable space group',caption='Monoclinic '+SGData['SpGrp']+' not usable here',style=wx.ICON_EXCLAMATION)
return
controls[4] = 1
controls[5] = (SGData['SGLatt']+SGData['SGLaue']).replace('-','')
if controls[5][1:] == 'm3': controls[5] += 'm'
if 'P3' in controls[5] or 'P-3' in controls[5]: controls[5] = 'P6/mmm'
if 'R' in controls[5]: controls[5] = 'R3-H'
controls[6:13] = Cell[1:8]
controls[13] = SGData['SpGrp']
# G2frame.GPXtree.SetItemPyData(UnitCellsId,[controls,bravais,cells,dmin,ssopt])
# G2frame.dataWindow.RunSubGroups.Enable(True)
G2frame.dataWindow.RefineCell.Enable(True)
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def RefineCell(event):
def cellPrint(ibrav,A):
cell = G2lat.A2cell(A)
Vol = G2lat.calc_V(A)
if ibrav in ['Fm3m','Im3m','Pm3m']:
print (" %s%10.6f" % ('a =',cell[0]))
elif ibrav in ['R3-H','P6/mmm','I4/mmm','P4/mmm']:
print (" %s%10.6f %s%10.6f %s%12.3f" % ('a =',cell[0],' c =',cell[2],' volume =',Vol))
elif ibrav in ['P4/mmm','Fmmm','Immm','Ammm','Bmmm','Cmmm','Pmmm']:
print (" %s%10.6f %s%10.6f %s%10.6f %s%12.3f" % ('a =',cell[0],'b =',cell[1],'c =',cell[2],' volume =',Vol))
elif ibrav in ['C2/m','P2/m']:
print (" %s%10.6f %s%10.6f %s%10.6f %s%8.3f %s%12.3f" % ('a =',cell[0],'b =',cell[1],'c =',cell[2],'beta =',cell[4],' volume =',Vol))
else:
print (" %s%10.6f %s%10.6f %s%10.6f" % ('a =',cell[0],'b =',cell[1],'c =',cell[2]))
print (" %s%8.3f %s%8.3f %s%8.3f %s%12.3f" % ('alpha =',cell[3],'beta =',cell[4],'gamma =',cell[5],' volume =',Vol))
def vecPrint(Vec):
print (' %s %10.5f %10.5f %10.5f'%('Modulation vector:',Vec[0],Vec[1],Vec[2]))
PatternId = G2frame.PatternId
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Index Peak List'))
print (' Refine cell')
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'))
cell = controls[6:12]
A = G2lat.cell2A(cell)
ibrav = bravaisSymb.index(controls[5])
# if not controls[13]: controls[13] = SPGlist[controls[5]][0] #don't know if this is needed?
SGData = G2spc.SpcGroup(controls[13])[1]
if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:
if ssopt.get('Use',False):
vecFlags = [True if x in ssopt['ssSymb'] else False for x in ['a','b','g']]
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,ssopt['ModVec'],ssopt['maxH'],A)
peaks = [G2indx.IndexSSPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #put peak fit esds back in peaks
Lhkl,M20,X20,Aref,Vec,Zero = \
G2indx.refinePeaksZSS(peaks[0],wave,Inst,SGData,SSGData,ssopt['maxH'],ibrav,A,ssopt['ModVec'],vecFlags,controls[1],controls[0])
else:
G2frame.HKL = G2pwd.getHKLpeak(dmin,SGData,A,Inst)
peaks = [G2indx.IndexPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #put peak fit esds back in peaks
Lhkl,M20,X20,Aref,Zero = G2indx.refinePeaksZ(peaks[0],wave,ibrav,A,controls[1],controls[0])
else:
if ssopt.get('Use',False):
vecFlags = [True if x in ssopt['ssSymb'] else False for x in ['a','b','g']]
SSGData = G2spc.SSpcGroup(SGData,ssopt['ssSymb'])[1]
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,ssopt['ModVec'],ssopt['maxH'],A)
peaks = [G2indx.IndexSSPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #put peak fit esds back in peaks
Lhkl,M20,X20,Aref,Vec,Zero = \
G2indx.refinePeaksTSS(peaks[0],difC,Inst,SGData,SSGData,ssopt['maxH'],ibrav,A,ssopt['ModVec'],vecFlags,controls[1],controls[0])
else:
G2frame.HKL = G2pwd.getHKLpeak(dmin,SGData,A,Inst)
peaks = [G2indx.IndexPeaks(peaks[0],G2frame.HKL)[1],peaks[1]] #put peak fit esds back in peaks
Lhkl,M20,X20,Aref,Zero = G2indx.refinePeaksT(peaks[0],difC,ibrav,A,controls[1],controls[0])
controls[1] = Zero
controls[6:12] = G2lat.A2cell(Aref)
controls[12] = G2lat.calc_V(Aref)
cells = G2frame.GPXtree.GetItemPyData(UnitCellsId)[2]
for cell in cells:
cell[-2] = False
cells.insert(0,[M20,X20,ibrav]+controls[6:13]+[True,False])
if ssopt.get('Use',False):
ssopt['ModVec'] = Vec
G2frame.HKL = G2pwd.getHKLMpeak(dmin,Inst,SGData,SSGData,ssopt['ModVec'],ssopt['maxH'],A)
else:
G2frame.HKL = G2pwd.getHKLpeak(dmin,SGData,A,Inst)
G2frame.HKL = np.array(G2frame.HKL)
data = [controls,bravais,cells,dmin,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'),data)
print (" %s%10.3f" % ('refinement M20 = ',M20))
print (' unindexed lines = %d'%X20)
cellPrint(controls[5],Aref)
ip = 4
if ssopt.get('Use',False):
vecPrint(Vec)
ip = 5
for hkl in G2frame.HKL:
hkl[ip] = G2lat.Dsp2pos(Inst,hkl[ip-1])+controls[1]
G2frame.HKL = np.array(G2frame.HKL)
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnIndexPeaks(event):
PatternId = G2frame.PatternId
print ('Peak Indexing')
keepcells = []
try:
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'))
for cell in cells:
if cell[11]:
cell[10] = False #clear selection flag on keepers
keepcells.append(cell)
except IndexError:
pass
except ValueError:
G2frame.ErrorDialog('Error','Need to set controls in Unit Cell List first')
return
if ssopt.get('Use',False):
G2frame.ErrorDialog('Super lattice error','Indexing not available for super lattices')
return
if True not in bravais:
G2frame.ErrorDialog('Error','No Bravais lattices selected')
return
if not len(peaks[0]):
G2frame.ErrorDialog('Error','Index Peak List is empty')
return
if len(peaks[0][0]) > 9:
G2frame.ErrorDialog('Error','You need to reload Index Peaks List first')
return
G2frame.dataWindow.CopyCell.Enable(False)
G2frame.dataWindow.RefineCell.Enable(False)
dlg = wx.ProgressDialog("Generated reflections",'0 '+" cell search for "+bravaisNames[ibrav],101,
# style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_SKIP|wx.PD_CAN_ABORT) #desn't work in 32 bit versions
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
try:
OK,dmin,newcells = G2indx.DoIndexPeaks(peaks[0],controls,bravais,dlg,G2frame.ifX20)
finally:
dlg.Destroy()
cells = keepcells+newcells
cells = G2indx.sortM20(cells)
if OK:
cells[0][10] = True #select best M20
data = [controls,bravais,cells,dmin,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Unit Cells List'),data)
bestCell = cells[0]
if bestCell[0] > 10.:
G2frame.HKL = G2lat.GenHBravais(dmin,bestCell[2],G2lat.cell2A(bestCell[3:9]))
for hkl in G2frame.HKL:
hkl.insert(4,G2lat.Dsp2pos(Inst,hkl[3])+controls[1])
G2frame.HKL = np.array(G2frame.HKL)
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame)
G2frame.dataWindow.CopyCell.Enable(True)
G2frame.dataWindow.IndexPeaks.Enable(True)
G2frame.dataWindow.MakeNewPhase.Enable(True)
G2frame.ifX20 = True
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def RefreshUnitCellsGrid(event):
'responds when "use" is pressed in index table; generates/plots reflections'
data = G2frame.GPXtree.GetItemPyData(UnitCellsId)
cells,dminx = data[2:4]
r,c = event.GetRow(),event.GetCol()
if cells:
if event.GetEventObject().GetColLabelValue(c) == 'use':
for i in range(len(cells)):
cells[i][-2] = False
UnitCellsTable.SetValue(i,c,False)
UnitCellsTable.SetValue(r,c,True)
gridDisplay.ForceRefresh()
cells[r][-2] = True
ibrav = cells[r][2]
A = G2lat.cell2A(cells[r][3:9])
G2frame.HKL = G2lat.GenHBravais(dmin,ibrav,A)
for hkl in G2frame.HKL:
hkl.insert(4,G2lat.Dsp2pos(Inst,hkl[3])+controls[1])
G2frame.HKL = np.array(G2frame.HKL)
if 'PKS' in G2frame.GPXtree.GetItemText(G2frame.PatternId):
G2plt.PlotPowderLines(G2frame)
else:
G2plt.PlotPatterns(G2frame)
elif event.GetEventObject().GetColLabelValue(c) == 'Keep':
if UnitCellsTable.GetValue(r,c):
UnitCellsTable.SetValue(r,c,False)
cells[r][c] = False
else:
cells[r][c] = True
UnitCellsTable.SetValue(r,c,True)
gridDisplay.ForceRefresh()
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
KeyList = []
def ClearCurrentShowNext():
KeepShowNext(False)
KeyList += [['j',ClearCurrentShowNext,'Show next Mag. Spc. Group, clear keep flag on current']]
def KeepCurrentShowNext():
KeepShowNext(True)
KeyList += [['k',KeepCurrentShowNext,'Show next Mag. Spc. Group, keep current']]
def KeepShowNext(KeepCurrent=True):
'''Show next "keep" item in Magnetic Space Group list, possibly resetting the
keep flag for the current displayed cell
'''
for i in range(len(magcells)): # find plotted setting
if magcells[i]['Use']: break
else:
return # no Try is set
if not KeepCurrent: # clear current
magcells[i]['Keep'] = False
MagCellsTable.SetValue(i,2,False)
keeps = [j for j in range(i+1,len(magcells)) if magcells[j]['Keep']]
if not keeps:
if not KeepCurrent: magDisplay.ForceRefresh()
return # no remaining Keep-flagged entries
next = keeps[0]
# update table
magcells[i]['Use'] = False
MagCellsTable.SetValue(i,1,False)
magcells[next]['Use'] = True
MagCellsTable.SetValue(next,1,True)
# get SG info and plot
SGData = magcells[next]['SGData']
A = G2lat.cell2A(magcells[next]['Cell'][:6])
G2frame.HKL = G2pwd.getHKLpeak(1.0,SGData,A,Inst)
G2plt.PlotPatterns(G2frame,extraKeys=KeyList)
magDisplay.ForceRefresh()
# change Scroll to display new setting
xscroll = G2frame.dataWindow.GetScrollPos(wx.HORIZONTAL)
yscroll = magDisplay.CellToRect(next,1)[1]/G2frame.dataWindow.GetScrollPixelsPerUnit()[1]
G2frame.dataWindow.Scroll(xscroll,yscroll)
def RefreshMagCellsGrid(event):
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(UnitCellsId)
r,c = event.GetRow(),event.GetCol()
rLab = magDisplay.GetRowLabelValue(r)
br = baseList[r]
phase = phaseDict[br]
pname = '(%s) %s'%(rLab,phase['Name'])
if magcells:
if c == 0:
mSGData = phase['SGData']
text,table = G2spc.SGPrint(mSGData,AddInv=True)
if 'magAtms' in phase:
msg = 'Magnetic space group information'
text[0] = ' Magnetic Space Group: '+mSGData['MagSpGrp']
text[3] = ' The magnetic lattice point group is '+mSGData['MagPtGp']
OprNames,SpnFlp = G2spc.GenMagOps(mSGData)
G2G.SGMagSpinBox(G2frame.dataWindow,msg,text,table,mSGData['SGCen'],OprNames,
mSGData['SpnFlp'],False).Show()
else:
msg = 'Space Group Information'
G2G.SGMessageBox(G2frame.dataWindow,msg,text,table).Show()
elif c == 1:
for i in range(len(magcells)):
magcells[i]['Use'] = False
for i in range(len(baseList)):
MagCellsTable.SetValue(i,c,False)
MagCellsTable.SetValue(r,c,True)
magDisplay.ForceRefresh()
phase['Use'] = True
mSGData = phase['SGData']
A = G2lat.cell2A(phase['Cell'][:6])
G2frame.HKL = G2pwd.getHKLpeak(1.0,mSGData,A,Inst)
G2plt.PlotPatterns(G2frame,extraKeys=KeyList)
elif c == 2:
if MagCellsTable.GetValue(r,c):
MagCellsTable.SetValue(r,c,False)
phase['Keep'] = False
else:
phase['Keep'] = True
MagCellsTable.SetValue(r,c,True)
magDisplay.ForceRefresh()
elif c ==3:
maxequiv = magcells[0].get('maxequiv',100)
mSGData = phase['SGData']
Uvec = phase['Uvec']
Trans = phase['Trans']
ifMag = False
if 'magAtms' in phase:
ifMag = True
allmom = phase.get('allmom',False)
magAtms = phase.get('magAtms','')
mAtoms = TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv)
else:
mAtoms = TestAtoms(phase,controls[15],SGData,Uvec,Trans,maxequiv)
Atms = []
AtCods = []
atMxyz = []
for ia,atom in enumerate(mAtoms):
atom[0] += '_%d'%ia
SytSym,Mul,Nop,dupDir = G2spc.SytSym(atom[2:5],mSGData)
Atms.append(atom[:2]+['',]+atom[2:5])
AtCods.append('1')
if 'magAtms' in phase:
MagSytSym = G2spc.MagSytSym(SytSym,dupDir,mSGData)
CSI = G2spc.GetCSpqinel(mSGData['SpnFlp'],dupDir)
atMxyz.append([MagSytSym,CSI[0]])
else:
CSI = G2spc.GetCSxinel(SytSym)
atMxyz.append([SytSym,CSI[0]])
G2phsG.UseMagAtomDialog(G2frame,pname,Atms,AtCods,atMxyz,ifMag=ifMag,ifOK=True).ShowModal()
elif c in [4,5]:
if 'altList' not in phase: return
if c == 4:
title = 'Conjugacy list for '+pname
items = phase['altList']
elif c == 5:
title = 'Super groups list for '+pname
items = phase['supList']
if not items[0]:
wx.MessageBox(pname+' is a maximal subgroup',caption='Super group is parent',style=wx.ICON_INFORMATION)
return
SubCellsDialog(G2frame,title,controls,SGData,items,phaseDict).ShowModal()
data = [controls,bravais,cells,dminx,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
def OnRefreshKeep(event):
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(UnitCellsId)
c = event.GetCol()
E,SGData = G2spc.SpcGroup(controls[13])
if c == 2:
testAtoms = ['',]+list(set([atom[1] for atom in controls[15]]))
ifMag = False
maxequiv = magcells[0]['maxequiv']
maximal = False
if 'magAtms' in magcells[0]:
ifMag = True
allmom = magcells[0]['allmom']
magAtms = magcells[0]['magAtms']
dlg = G2G.MultiDataDialog(G2frame,title='Keep options',
prompts=['max unique','test for mag. atoms','all have moment','only maximal subgroups',],
values=[maxequiv,'',allmom,False],limits=[[1,100],testAtoms,[True,False],[True,False]],
formats=['%d','choice','bool','bool'])
else:
dlg = G2G.MultiDataDialog(G2frame,title='Keep options',
prompts=['max unique','only maximal subgroups',],
values=[maxequiv,False],limits=[[1,100],[True,False],],
formats=['%d','bool',])
if dlg.ShowModal() == wx.ID_OK:
if ifMag:
maxequiv,atype,allmom,maximal = dlg.GetValues()
magAtms = [atom for atom in controls[15] if atom[1] == atype]
else:
maxequiv,maximal = dlg.GetValues()
dlg = wx.ProgressDialog('Setting Keep flags','Processing '+magcells[0]['Name'],len(magcells),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME)
for ip,phase in enumerate(magcells):
dlg.Update(ip,newmsg='Processing '+phase['Name'])
Uvec = phase['Uvec']
Trans = phase['Trans']
if ifMag:
phase['nAtoms'] = len(TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv,maximal))
else:
phase['nAtoms'] = len(TestAtoms(phase,controls[15],SGData,Uvec,Trans,maxequiv,maximal))
dlg.Destroy()
data = controls,bravais,cells,dminx,ssopt,magcells
G2frame.GPXtree.SetItemPyData(UnitCellsId,data)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def MakeNewPhase(event):
if not G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases'):
sub = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Phases')
else:
sub = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
PhaseName = ''
dlg = wx.TextEntryDialog(None,'Enter a name for this phase','Phase Name Entry','New phase',
style=wx.OK)
try:
if dlg.ShowModal() == wx.ID_OK:
PhaseName = dlg.GetValue()
cells = G2frame.GPXtree.GetItemPyData(UnitCellsId)[2]
for Cell in cells:
if Cell[-2]:
break
cell = Cell[2:10]
sub = G2frame.GPXtree.AppendItem(parent=sub,text=PhaseName)
E,SGData = G2spc.SpcGroup(controls[13])
G2frame.GPXtree.SetItemPyData(sub, \
G2obj.SetNewPhase(Name=PhaseName,SGData=SGData,cell=cell[1:],Super=ssopt))
G2frame.GetStatusBar().SetStatusText('Change space group from '+str(controls[13])+' if needed',1)
finally:
dlg.Destroy()
def OnMagSel(event):
Obj = event.GetEventObject()
if Obj.GetValue():
SGData['SGSpin'] = [1,]*len(SGData['SGSpin'])
GenSym,GenFlg,BNSsym = G2spc.GetGenSym(SGData)
SGData['GenSym'] = GenSym
SGData['GenFlg'] = GenFlg
OprNames,SpnFlp = G2spc.GenMagOps(SGData)
SGData['SpnFlp'] = SpnFlp
SGData['MagSpGrp'] = G2spc.MagSGSym(SGData)
else:
del SGData['MagSpGrp']
ssopt['SGData'] = SGData
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnSpinOp(event):
Obj = event.GetEventObject()
isym = Indx[Obj.GetId()]+1
spCode = {'red':-1,'black':1}
SGData['SGSpin'][isym] = spCode[Obj.GetValue()]
G2spc.CheckSpin(isym,SGData)
GenSym,GenFlg,BNSsym = G2spc.GetGenSym(SGData)
SGData['GenSym'] = GenSym
SGData['GenFlg'] = GenFlg
OprNames,SpnFlp = G2spc.GenMagOps(SGData)
SGData['SpnFlp'] = SpnFlp
SGData['MagSpGrp'] = G2spc.MagSGSym(SGData)
OnHklShow(None)
def OnBNSlatt(event):
Obj = event.GetEventObject()
SGData.update(G2spc.SpcGroup(SGData['SpGrp'])[1])
BNSlatt = Obj.GetValue()
if '_' in BNSlatt:
SGData['BNSlattsym'] = [BNSlatt,BNSsym[BNSlatt]]
else:
SGData['BNSlattsym'] = [SGData['SGLatt'],[0.,0.,0.]]
SGData['SGSpin'] = [1,]*len(SGData['SGSpin'])
GenSym,GenFlg = G2spc.GetGenSym(SGData)[:2]
SGData['GenSym'] = GenSym
SGData['GenFlg'] = GenFlg
SGData['MagSpGrp'] = G2spc.MagSGSym(SGData)
G2spc.ApplyBNSlatt(SGData,SGData['BNSlattsym'])
OprNames,SpnFlp = G2spc.GenMagOps(SGData)
SGData['SpnFlp'] = SpnFlp
OnHklShow(None)
def OnShowSpins(event):
msg = 'Magnetic space group information'
text,table = G2spc.SGPrint(SGData,AddInv=True)
text[0] = ' Magnetic Space Group: '+SGData['MagSpGrp']
text[3] = ' The magnetic lattice point group is '+SGData['MagPtGp']
G2G.SGMagSpinBox(G2frame.dataWindow,msg,text,table,SGData['SGCen'],OprNames,
SGData['SpnFlp'],False).Show()
def TransformUnitCell(event):
Trans = np.eye(3)
Uvec = np.zeros(3)
Vvec = np.zeros(3)
ifMag = False
Type = 'nuclear'
BNSlatt = ''
E,SGData = G2spc.SpcGroup(controls[13])
phase = {'General':{'Name':'','Type':Type,'Cell':['',]+controls[6:13],'SGData':SGData}}
dlg = G2phsG.TransformDialog(G2frame,phase,Trans,Uvec,Vvec,ifMag,BNSlatt)
try:
if dlg.ShowModal() == wx.ID_OK:
newPhase,Trans,Uvec,Vvec,ifMag,ifConstr,Common = dlg.GetSelection()
sgData = newPhase['General']['SGData']
controls[5] = sgData['SGLatt']+sgData['SGLaue']
controls[13] = sgData['SpGrp']
ssopt['SGData'] = sgData
controls[6:13] = newPhase['General']['Cell'][1:8]
else:
return
finally:
dlg.Destroy()
OnHklShow(None)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnLatSym(event):
'Run Bilbao PsuedoLattice cell search'
# look up a space group matching Bravais lattice (should not matter which one)
bravaisSPG = {'Fm3m':225,'Im3m':229,'Pm3m':221,'R3-H':146,'P6/mmm':191,
'I4/mmm':139,'P4/mmm':123,'Fmmm':69,'Immm':71,
'Cmmm':65,'Pmmm':47,'C2/m':12,'P2/m':10,'P1':2}
pUCid = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(pUCid)
sgNum = bravaisSPG.get(controls[5],0)
if sgNum < 1:
wx.MessageBox('Sorry, only standard cell settings are allowed, please transform axes',caption='Bilbao requires standard settings',style=wx.ICON_EXCLAMATION)
return
cell = controls[6:12]
tolerance = 5.
dlg = G2G.SingleFloatDialog(G2frame,'Tolerance',
'Enter angular tolerance for search',5.0,[.1,30.],"%.1f")
if dlg.ShowModal() == wx.ID_OK:
tolerance = dlg.GetValue()
dlg.Destroy()
else:
dlg.Destroy()
return
import SUBGROUPS as kSUB
wx.BeginBusyCursor()
wx.MessageBox(''' For use of PSEUDOLATTICE, please cite:
Bilbao Crystallographic Server I: Databases and crystallographic computing programs,
M. I. Aroyo, J. M. Perez-Mato, C. Capillas, E. Kroumova, S. Ivantchev, G. Madariaga, A. Kirov & H. Wondratschek
Z. Krist. 221, 1, 15-27 (2006).
doi:10.1524/zkri.2006.221.1.15''',
caption='Bilbao PSEUDOLATTICE',style=wx.ICON_INFORMATION)
page = kSUB.subBilbaoCheckLattice(sgNum,cell,tolerance)
wx.EndBusyCursor()
if not page: return
# while cells: cells.pop() # cells.clear() is much cleaner but not Py2
for i,(cell,mat) in enumerate(kSUB.parseBilbaoCheckLattice(page)):
cells.append([])
cells[-1] += [mat,0,16]
cells[-1] += cell
cells[-1] += [G2lat.calc_V(G2lat.cell2A(cell)),False,False]
G2frame.GPXtree.SetItemPyData(pUCid,data)
G2frame.OnFileSave(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnRunSubs(event):
import SUBGROUPS as kSUB
G2frame.dataWindow.RunSubGroupsMag.Enable(False)
pUCid = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(pUCid)
E,SGData = G2spc.SpcGroup(controls[13])
Kx = [' ','0','1/2','-1/2','1/3','-1/3','2/3','1']
Ky = [' ','0','1/2','1/3','2/3','1']
Kz = [' ','0','1/2','3/2','1/3','2/3','1']
kvec = [['0','0','0'],[' ',' ',' '],[' ',' ',' ',' ']]
dlg = G2G.MultiDataDialog(G2frame,title='SUBGROUPS options',prompts=[' k-vector 1',' k-vector 2',' k-vector 3', \
' Use whole star',' Filter by','preserve axes','max unique'],
values=kvec+[False,'',True,100],
limits=[[Kx[1:],Ky[1:],Kz[1:]],[Kx,Ky,Kz],[Kx,Ky,Kz],[True,False],['',' Landau transition',' Only maximal subgroups',],
[True,False],[1,100]],
formats=[['choice','choice','choice'],['choice','choice','choice'],['choice','choice','choice'],'bool','choice',
'bool','%d',])
if dlg.ShowModal() == wx.ID_OK:
magcells = []
newVals = dlg.GetValues()
kvec[:9] = newVals[0]+newVals[1]+newVals[2]+[' ',]
nkvec = kvec.index(' ')
star = newVals[3]
filterby = newVals[4]
keepaxes = newVals[5]
maxequiv = newVals[6]
if 'maximal' in filterby:
maximal = True
Landau = False
elif 'Landau' in filterby:
maximal = False
Landau = True
else:
maximal = False
Landau = False
if nkvec not in [0,3,6,9]:
wx.MessageBox('Error: check your propagation vector(s)',
caption='Bilbao SUBGROUPS setup error',style=wx.ICON_EXCLAMATION)
return
if nkvec in [6,9] and Landau:
wx.MessageBox('Error, multi k-vectors & Landau not compatible',
caption='Bilbao SUBGROUPS setup error',style=wx.ICON_EXCLAMATION)
return
wx.BeginBusyCursor()
wx.MessageBox(''' For use of SUBGROUPS, please cite:
Symmetry-Based Computational Tools for Magnetic Crystallography,
J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo
Annu. Rev. Mater. Res. 2015. 45,217-48.
doi: 10.1146/annurev-matsci-070214-021008''',caption='Bilbao SUBGROUPS',style=wx.ICON_INFORMATION)
SubGroups,baseList = kSUB.GetNonStdSubgroups(SGData,kvec[:9],star,Landau)
# SUBGROUPS,baseList = kMAG.GetNonStdSubgroups(SGData,kvec[:9],star,Landau,maximal)
wx.EndBusyCursor()
if SubGroups is None:
wx.MessageBox('Check your internet connection?',caption='Bilbao SUBGROUPS error',style=wx.ICON_EXCLAMATION)
return
if not SubGroups:
if Landau:
wx.MessageBox('No results from SUBGROUPS, multi k-vectors & Landau not compatible',
caption='Bilbao SUBGROUPS error',style=wx.ICON_EXCLAMATION)
else:
wx.MessageBox('No results from SUBGROUPS, check your propagation vector(s)',
caption='Bilbao SUBGROUPS error',style=wx.ICON_EXCLAMATION)
return
controls[14] = kvec[:9]
try:
controls[16] = baseList
except IndexError:
controls.append(baseList)
dlg = wx.ProgressDialog('SUBGROUPS results','Processing '+SubGroups[0][0],len(SubGroups),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME)
for ir,result in enumerate(SubGroups):
dlg.Update(ir,newmsg='Processing '+result[0])
Trans = np.array(eval(result[1][0]))
Uvec = np.array(eval(result[1][1]))
phase = G2lat.makeBilbaoPhase(result,Uvec,Trans)
phase['gid'] = result[2]
phase['altList'] = result[3]
phase['supList'] = eval(result[4])
RVT = None
if keepaxes:
RVT = G2lat.FindNonstandard(controls,phase)
if RVT is not None:
result,Uvec,Trans = RVT
phase.update(G2lat.makeBilbaoPhase(result,Uvec,Trans))
phase['Cell'] = G2lat.TransformCell(controls[6:12],Trans)
phase['maxequiv'] = maxequiv
phase['nAtoms'] = len(TestAtoms(phase,controls[15],SGData,Uvec,Trans,maxequiv,maximal))
magcells.append(phase)
dlg.Destroy()
magcells[0]['Use'] = True
SGData = magcells[0]['SGData']
A = G2lat.cell2A(magcells[0]['Cell'][:6])
G2frame.HKL = G2pwd.getHKLpeak(1.0,SGData,A,Inst)
G2plt.PlotPatterns(G2frame,extraKeys=KeyList)
data = [controls,bravais,cells,dmin,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(pUCid,data)
G2frame.OnFileSave(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
def OnRunSubsMag(event):
import SUBGROUPS as kSUB
G2frame.dataWindow.RunSubGroups.Enable(False)
pUCid = G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Unit Cells List')
controls,bravais,cells,dminx,ssopt,magcells = G2frame.GPXtree.GetItemPyData(pUCid)
E,SGData = G2spc.SpcGroup(controls[13])
atoms = list(set([atom[1] for atom in controls[15]]))
testAtoms = ['',]+[atom for atom in atoms if len(G2elem.GetMFtable([atom,],[2.0,]))]
Kx = [' ','0','1/2','-1/2','1/3','-1/3','2/3','1']
Ky = [' ','0','1/2','1/3','2/3','1']
Kz = [' ','0','1/2','3/2','1/3','2/3','1']
kvec = [['0','0','0'],[' ',' ',' '],[' ',' ',' ',' ']]
dlg = G2G.MultiDataDialog(G2frame,title='k-SUBGROUPSMAG options',prompts=[' k-vector 1',' k-vector 2',' k-vector 3', \
' Use whole star',' Filter by','preserve axes','test for mag. atoms','all have moment','max unique'],
values=kvec+[False,'',True,'',False,100],
limits=[[Kx[1:],Ky[1:],Kz[1:]],[Kx,Ky,Kz],[Kx,Ky,Kz],[True,False],['',' Landau transition',' Only maximal subgroups',],
[True,False],testAtoms,[True,False],[1,100]],
formats=[['choice','choice','choice'],['choice','choice','choice'],['choice','choice','choice'],'bool','choice',
'bool','choice','bool','%d',])
if dlg.ShowModal() == wx.ID_OK:
magcells = []
newVals = dlg.GetValues()
kvec[:9] = newVals[0]+newVals[1]+newVals[2]+[' ',]
nkvec = kvec.index(' ')
star = newVals[3]
filterby = newVals[4]
keepaxes = newVals[5]
atype = newVals[6]
allmom = newVals[7]
maxequiv = newVals[8]
if 'maximal' in filterby:
maximal = True
Landau = False
elif 'Landau' in filterby:
maximal = False
Landau = True
else:
maximal = False
Landau = False
if nkvec not in [0,3,6,9]:
wx.MessageBox('Error: check your propagation vector(s)',
caption='Bilbao k-SUBGROUPSMAG setup error',style=wx.ICON_EXCLAMATION)
return
if nkvec in [6,9] and Landau:
wx.MessageBox('Error, multi k-vectors & Landau not compatible',
caption='Bilbao k-SUBGROUPSMAG setup error',style=wx.ICON_EXCLAMATION)
return
magAtms = [atom for atom in controls[15] if atom[1] == atype]
wx.BeginBusyCursor()
wx.MessageBox(''' For use of k-SUBGROUPSMAG, please cite:
Symmetry-Based Computational Tools for Magnetic Crystallography,
J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo
Annu. Rev. Mater. Res. 2015. 45,217-48.
doi: 10.1146/annurev-matsci-070214-021008''',caption='Bilbao k-SUBGROUPSMAG',style=wx.ICON_INFORMATION)
MAXMAGN,baseList = kSUB.GetNonStdSubgroupsmag(SGData,kvec[:9],star,Landau)
wx.EndBusyCursor()
if MAXMAGN is None:
wx.MessageBox('Check your internet connection?',caption='Bilbao k-SUBGROUPSMAG error',style=wx.ICON_EXCLAMATION)
return
if not MAXMAGN:
if Landau:
wx.MessageBox('No results from k-SUBGROUPSMAG, multi k-vectors & Landau not compatible',
caption='Bilbao k-SUBGROUPSMAG error',style=wx.ICON_EXCLAMATION)
else:
wx.MessageBox('No results from k-SUBGROUPSMAG, check your propagation vector(s)',
caption='Bilbao k-SUBGROUPSMAG error',style=wx.ICON_EXCLAMATION)
return
controls[14] = kvec[:9]
try:
controls[16] = baseList
except IndexError:
controls.append(baseList)
dlg = wx.ProgressDialog('k-SUBGROUPSMAG results','Processing '+MAXMAGN[0][0],len(MAXMAGN),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME)
for ir,result in enumerate(MAXMAGN):
# result is SPGP,BNS,MV,itemList,altList,superList
dlg.Update(ir,newmsg='Processing '+result[0])
Trans = np.array(eval(result[2][0]))
Uvec = np.array(eval(result[2][1]))
phase = G2lat.makeBilbaoPhase(result[:2],Uvec,Trans,True)
phase['gid'] = result[3]
phase['altList'] = result[4]
phase['supList'] = eval(result[5])
RVT = None
if keepaxes:
RVT = G2lat.FindNonstandard(controls,phase)
if RVT is not None:
result,Uvec,Trans = RVT
phase.update(G2lat.makeBilbaoPhase(result,Uvec,Trans,True))
phase['Cell'] = G2lat.TransformCell(controls[6:12],Trans)
phase['aType'] = atype
phase['allmom'] = allmom
phase['magAtms'] = magAtms
phase['maxequiv'] = maxequiv
phase['nAtoms'] = len(TestMagAtoms(phase,magAtms,SGData,Uvec,Trans,allmom,maxequiv,maximal))
magcells.append(phase)
dlg.Destroy()
magcells[0]['Use'] = True
SGData = magcells[0]['SGData']
A = G2lat.cell2A(magcells[0]['Cell'][:6])
G2frame.HKL = G2pwd.getHKLpeak(1.0,SGData,A,Inst)
G2plt.PlotPatterns(G2frame,extraKeys=KeyList)
data = [controls,bravais,cells,dmin,ssopt,magcells]
G2frame.GPXtree.SetItemPyData(pUCid,data)
G2frame.OnFileSave(event)
wx.CallAfter(UpdateUnitCellsGrid,G2frame,data)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.IndexMenu)
G2frame.GetStatusBar().SetStatusText('')
G2frame.Bind(wx.EVT_MENU, OnIndexPeaks, id=G2G.wxID_INDEXPEAKS)
G2frame.Bind(wx.EVT_MENU, OnRunSubs, id=G2G.wxID_RUNSUB)
G2frame.Bind(wx.EVT_MENU, OnRunSubsMag, id=G2G.wxID_RUNSUBMAG)
G2frame.Bind(wx.EVT_MENU, OnLatSym, id=G2G.wxID_LATSYM)
G2frame.Bind(wx.EVT_MENU, CopyUnitCell, id=G2G.wxID_COPYCELL)
G2frame.Bind(wx.EVT_MENU, LoadUnitCell, id=G2G.wxID_LOADCELL)
G2frame.Bind(wx.EVT_MENU, ImportUnitCell, id=G2G.wxID_IMPORTCELL)
G2frame.Bind(wx.EVT_MENU, TransformUnitCell, id=G2G.wxID_TRANSFORMCELL)
G2frame.Bind(wx.EVT_MENU, RefineCell, id=G2G.wxID_REFINECELL)
G2frame.Bind(wx.EVT_MENU, MakeNewPhase, id=G2G.wxID_MAKENEWPHASE)
G2frame.Bind(wx.EVT_MENU, OnExportCells, id=G2G.wxID_EXPORTCELLS)
if len(data) < 6:
data.append([])
controls,bravais,cells,dminx,ssopt,magcells = data
if len(controls) < 13: #add cell volume if missing
controls.append(G2lat.calc_V(G2lat.cell2A(controls[6:12])))
if len(controls) < 14: #add space group if missing
controls.append(spaceGroups[bravaisSymb.index(controls[5])])
if len(controls) < 15:
controls.append(list(range(1,len(magcells)+1)))
while len(bravais) < 17:
bravais += [0,]
SGData = ssopt.get('SGData',G2spc.SpcGroup(controls[13])[1])
G2frame.GPXtree.SetItemPyData(UnitCellsId,data) #update with volume
bravaisNames = ['Cubic-F','Cubic-I','Cubic-P','Trigonal-R','Trigonal/Hexagonal-P',
'Tetragonal-I','Tetragonal-P','Orthorhombic-F','Orthorhombic-I','Orthorhombic-A',
'Orthorhombic-B','Orthorhombic-C','Orthorhombic-P',
'Monoclinic-I','Monoclinic-C','Monoclinic-P','Triclinic','Triclinic',]
cellGUIlist = [[[0,1,2],4,zip([" Unit cell: a = "," Vol = "],[(10,5),"%.3f"],[True,False],[0,0])],
[[3,4,5,6],6,zip([" Unit cell: a = "," c = "," Vol = "],[(10,5),(10,5),"%.3f"],[True,True,False],[0,2,0])],
[[7,8,9,10,11,12],8,zip([" Unit cell: a = "," b = "," c = "," Vol = "],[(10,5),(10,5),(10,5),"%.3f"],
[True,True,True,False],[0,1,2,0])],
[[13,14,15],10,zip([" Unit cell: a = "," b = "," c = "," beta = "," Vol = "],
[(10,5),(10,5),(10,5),(10,3),"%.3f"],[True,True,True,True,False],[0,1,2,4,0])],
[[16,17],8,zip([" Unit cell: a = "," b = "," c = "," alpha = "," beta = "," gamma = "," Vol = "],
[(10,5),(10,5),(10,5),(10,3),(10,3),(10,3),"%.3f"],
[True,True,True,True,True,True,False],[0,1,2,3,4,5,0])]]
G2frame.dataWindow.IndexPeaks.Enable(False)
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Index Peak List'))
if peaks:
G2frame.dataWindow.IndexPeaks.Enable(True)
G2frame.dataWindow.RefineCell.Enable(False)
if controls[12] > 1.0 and len(peaks[0]): #if a "real" volume (i.e. not default) and peaks
G2frame.dataWindow.RefineCell.Enable(True)
G2frame.dataWindow.CopyCell.Enable(False)
G2frame.dataWindow.MakeNewPhase.Enable(False)
G2frame.dataWindow.ExportCells.Enable(False)
if cells:
G2frame.dataWindow.CopyCell.Enable(True)
G2frame.dataWindow.MakeNewPhase.Enable(True)
G2frame.dataWindow.ExportCells.Enable(True)
elif magcells:
G2frame.dataWindow.CopyCell.Enable(True)
if G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Phases'):
G2frame.dataWindow.LoadCell.Enable(True)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Indexing controls: '),0,WACV)
mainSizer.Add((5,5),0)
littleSizer = wx.FlexGridSizer(0,5,5,5)
littleSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Max Nc/Nobs '),0,WACV)
NcNo = wx.SpinCtrl(G2frame.dataWindow)
NcNo.SetRange(2,8)
NcNo.SetValue(controls[2])
NcNo.Bind(wx.EVT_SPINCTRL,OnNcNo)
littleSizer.Add(NcNo,0,WACV)
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Start Volume '),0,WACV)
startVol = G2G.ValidatedTxtCtrl(G2frame.dataWindow,controls,3,typeHint=int,min=25)
littleSizer.Add(startVol,0,WACV)
x20 = wx.CheckBox(G2frame.dataWindow,label='Use M20/(X20+1)?')
x20.SetValue(G2frame.ifX20)
x20.Bind(wx.EVT_CHECKBOX,OnIfX20)
littleSizer.Add(x20,0,WACV)
mainSizer.Add(littleSizer,0)
mainSizer.Add((5,5),0)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Select Bravais Lattices for indexing: '),
0,WACV)
mainSizer.Add((5,5),0)
littleSizer = wx.FlexGridSizer(0,5,5,5)
bravList = []
bravs = zip(bravais,bravaisNames)
for brav,bravName in bravs:
bravCk = wx.CheckBox(G2frame.dataWindow,label=bravName)
bravList.append(bravCk.GetId())
bravCk.SetValue(brav)
bravCk.Bind(wx.EVT_CHECKBOX,OnBravais)
littleSizer.Add(bravCk,0,WACV)
mainSizer.Add(littleSizer,0)
mainSizer.Add((-1,10),0)
littleSizer = wx.BoxSizer(wx.HORIZONTAL)
littleSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Cell Test && Refinement: '),0,WACV)
littleSizer.Add((5,5),0)
hklShow = wx.Button(G2frame.dataWindow,label="Show hkl positions")
hklShow.Bind(wx.EVT_BUTTON,OnHklShow)
littleSizer.Add(hklShow,0,WACV)
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=' cell step ',style=wx.ALIGN_RIGHT),0,WACV|wx.ALIGN_RIGHT)
shiftChoices = [ '0.01%','0.05%','0.1%','0.5%', '1.0%','2.5%','5.0%']
shiftSel = wx.Choice(G2frame.dataWindow,choices=shiftChoices)
shiftSel.SetSelection(3)
littleSizer.Add(shiftSel)
mainSizer.Add(littleSizer,0)
mainSizer.Add((5,5),0)
littleSizer = wx.BoxSizer(wx.HORIZONTAL)
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=" Bravais \n lattice ",style=wx.ALIGN_CENTER),0,WACV,5)
bravSel = wx.Choice(G2frame.dataWindow,choices=bravaisSymb,size=(75,-1))
bravSel.SetSelection(bravaisSymb.index(controls[5]))
bravSel.Bind(wx.EVT_CHOICE,OnBravSel)
littleSizer.Add(bravSel,0,WACV)
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=" Space \n group ",style=wx.ALIGN_CENTER),0,WACV,5)
spcSel = wx.Choice(G2frame.dataWindow,choices=SPGlist[controls[5]],size=(100,-1))
spcSel.SetSelection(SPGlist[controls[5]].index(controls[13]))
spcSel.Bind(wx.EVT_CHOICE,OnSpcSel)
littleSizer.Add(spcSel,0,WACV)
if ssopt.get('Use',False): #zero for super lattice doesn't work!
controls[0] = False
else:
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=" Zero offset "),0,WACV)
zero = G2G.ValidatedTxtCtrl(G2frame.dataWindow,controls,1,nDig=(10,4),typeHint=float,
min=-5.,max=5.,size=(50,-1))
littleSizer.Add(zero,0,WACV)
zeroVar = wx.CheckBox(G2frame.dataWindow,label="Refine?")
zeroVar.SetValue(controls[0])
zeroVar.Bind(wx.EVT_CHECKBOX,OnZeroVar)
littleSizer.Add(zeroVar,0,WACV)
SSopt = wx.CheckBox(G2frame.dataWindow,label="Modulated?")
SSopt.SetValue(ssopt.get('Use',False))
SSopt.Bind(wx.EVT_CHECKBOX,OnSSopt)
littleSizer.Add(SSopt,0,WACV)
if 'N' in Inst['Type'][0]:
MagSel = wx.CheckBox(G2frame.dataWindow,label="Magnetic?")
MagSel.SetValue('MagSpGrp' in SGData)
MagSel.Bind(wx.EVT_CHECKBOX,OnMagSel)
littleSizer.Add(MagSel,0,WACV)
mainSizer.Add(littleSizer,0)
mainSizer.Add((5,5),0)
if 'N' in Inst['Type'][0]:
neutSizer = wx.BoxSizer(wx.HORIZONTAL)
if 'MagSpGrp' in SGData:
Indx = {}
GenSym,GenFlg,BNSsym = G2spc.GetGenSym(SGData)
SGData['GenSym'] = GenSym
SGData['SGGray'] = False
neutSizer.Add(wx.StaticText(G2frame.dataWindow,label=' BNS lattice: '),0,WACV)
BNSkeys = [SGData['SGLatt'],]+list(BNSsym.keys())
BNSkeys.sort()
try: #this is an ugly kluge - bug in wx.ComboBox
if SGData['BNSlattsym'][0][2] in ['a','b','c']:
BNSkeys.reverse()
except:
pass
BNS = wx.ComboBox(G2frame.dataWindow,value=SGData['BNSlattsym'][0],
choices=BNSkeys,style=wx.CB_READONLY|wx.CB_DROPDOWN)
BNS.Bind(wx.EVT_COMBOBOX,OnBNSlatt)
neutSizer.Add(BNS,0,WACV)
spinColor = ['black','red']
spCode = {-1:'red',1:'black'}
for isym,sym in enumerate(GenSym[1:]):
neutSizer.Add(wx.StaticText(G2frame.dataWindow,label=' %s: '%(sym.strip())),0,WACV)
spinOp = wx.ComboBox(G2frame.dataWindow,value=spCode[SGData['SGSpin'][isym+1]],choices=spinColor,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[spinOp.GetId()] = isym
spinOp.Bind(wx.EVT_COMBOBOX,OnSpinOp)
neutSizer.Add(spinOp,0,WACV)
OprNames,SpnFlp = G2spc.GenMagOps(SGData)
SGData['SpnFlp'] = SpnFlp
showSpins = wx.Button(G2frame.dataWindow,label=' Show spins?')
showSpins.Bind(wx.EVT_BUTTON,OnShowSpins)
neutSizer.Add(showSpins,0,WACV)
mainSizer.Add(neutSizer,0)
mainSizer.Add((5,5),0)
ibrav = SetLattice(controls)
for cellGUI in cellGUIlist:
if ibrav in cellGUI[0]:
useGUI = cellGUI
cellList = []
valDict = {}
Info = {}
littleSizer = wx.FlexGridSizer(0,min(6,useGUI[1]),5,5)
for txt,fmt,ifEdit,Id in useGUI[2]:
littleSizer.Add(wx.StaticText(G2frame.dataWindow,label=txt,style=wx.ALIGN_RIGHT),0,WACV|wx.ALIGN_RIGHT)
if ifEdit: #a,b,c,etc.
cellVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,controls,6+Id,nDig=fmt,OnLeave=OnCellChange)
Info[cellVal.GetId()] = Id
valSizer = wx.BoxSizer(wx.HORIZONTAL)
valSizer.Add(cellVal,0,WACV)
cellSpin = wx.SpinButton(G2frame.dataWindow,style=wx.SP_VERTICAL,size=wx.Size(20,20))
cellSpin.SetValue(0)
cellSpin.SetRange(-1,1)
cellSpin.Bind(wx.EVT_SPIN, OnMoveCell)
valSizer.Add(cellSpin,0,WACV)
littleSizer.Add(valSizer,0,WACV)
cellList.append(cellVal.GetId())
cellList.append(cellSpin.GetId())
valDict[cellSpin.GetId()] = cellVal
else: #volume
volVal = wx.TextCtrl(G2frame.dataWindow,value=(fmt%(controls[12])),style=wx.TE_READONLY)
volVal.SetBackgroundColour(VERY_LIGHT_GREY)
littleSizer.Add(volVal,0,WACV)
mainSizer.Add(littleSizer,0)
if ssopt.get('Use',False): #super lattice display
indChoice = ['1','2','3','4',]
if 'MagSpGrp' in SGData: #limit to one for magnetic SS for now
indChoice = ['1',]
SpSg = controls[13]
SGData = G2spc.SpcGroup(SpSg)[1]
ssChoice = G2spc.SSChoice(SGData)
if ssopt['ssSymb'] not in ssChoice:
ssopt['ssSymb'] = ssopt['ssSymb'][:-1]
ssSizer = wx.BoxSizer(wx.HORIZONTAL)
ssSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Supersymmetry space group: '+SpSg+' '),0,WACV)
selMG = wx.ComboBox(G2frame.dataWindow,value=ssopt['ssSymb'],
choices=ssChoice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
selMG.Bind(wx.EVT_COMBOBOX, OnSelMG)
ssSizer.Add(selMG,0,WACV)
ssSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Mod. vector: '),0,WACV)
modS = G2spc.splitSSsym(ssopt['ssSymb'])[0]
ssopt['ModVec'],ifShow = G2spc.SSGModCheck(ssopt['ModVec'],modS)
Indx = {}
for i,[val,show] in enumerate(zip(ssopt['ModVec'],ifShow)):
if show:
valSizer = wx.BoxSizer(wx.HORIZONTAL)
modVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,ssopt['ModVec'],i,
min=-.98,max=.98,nDig=(10,4),typeHint=float,OnLeave=OnModVal)
valSizer.Add(modVal,0,WACV)
modSpin = wx.SpinButton(G2frame.dataWindow,style=wx.SP_VERTICAL,size=wx.Size(20,20))
modSpin.SetValue(0)
modSpin.SetRange(-1,1)
modSpin.Bind(wx.EVT_SPIN, OnMoveMod)
valSizer.Add(modSpin,0,WACV)
ssSizer.Add(valSizer,0,WACV)
Indx[modVal.GetId()] = i
Indx[modSpin.GetId()] = [i,modVal]
else:
modVal = wx.TextCtrl(G2frame.dataWindow,value=('%.3f'%(val)),
size=wx.Size(50,20),style=wx.TE_READONLY)
modVal.SetBackgroundColour(VERY_LIGHT_GREY)
ssSizer.Add(modVal,0,WACV)
ssSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Max. M: '),0,WACV)
maxMH = wx.ComboBox(G2frame.dataWindow,value=str(ssopt['maxH']),
choices=indChoice,style=wx.CB_READONLY|wx.CB_DROPDOWN)
maxMH.Bind(wx.EVT_COMBOBOX, OnMaxMH)
ssSizer.Add(maxMH,0,WACV)
findMV = wx.Button(G2frame.dataWindow,label="Find mod. vec.?")
findMV.Bind(wx.EVT_BUTTON,OnFindOneMV)
ssSizer.Add(findMV,0,WACV)
findallMV = wx.Button(G2frame.dataWindow,label="Try all?")
findallMV.Bind(wx.EVT_BUTTON,OnFindMV)
ssSizer.Add(findallMV,0,WACV)
mainSizer.Add(ssSizer,0)
G2frame.dataWindow.currentGrids = []
if cells:
mode = 0
try: # for Cell sym, 1st entry is cell xform matrix
len(cells[0][0])
mode = 1
except:
pass
if mode:
mainSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label='\n Cell symmetry search:'),0,WACV)
colLabels = ['use']
Types = [wg.GRID_VALUE_BOOL]
else:
mainSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label='\n Indexing Result:'),0,WACV)
colLabels = ['M20','X20','use','Bravais']
Types = [wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_NUMBER,
wg.GRID_VALUE_BOOL,wg.GRID_VALUE_STRING]
rowLabels = []
colLabels += ['a','b','c','alpha','beta','gamma','Volume','Keep']
Types += (3*[wg.GRID_VALUE_FLOAT+':10,5',]+
3*[wg.GRID_VALUE_FLOAT+':10,3',]+
[wg.GRID_VALUE_FLOAT+':10,2',wg.GRID_VALUE_BOOL])
table = []
for cell in cells:
rowLabels.append('')
if mode:
row = [cell[-2]]+cell[3:10]+[cell[11],]
else:
row = cell[0:2]+[cell[-2]]+[bravaisSymb[cell[2]]]+cell[3:10]+[cell[11],]
if cell[-2]:
A = G2lat.cell2A(cell[3:9])
G2frame.HKL = G2lat.GenHBravais(dmin,cell[2],A)
for hkl in G2frame.HKL:
hkl.insert(4,G2lat.Dsp2pos(Inst,hkl[3])+controls[1])
G2frame.HKL = np.array(G2frame.HKL)
table.append(row)
UnitCellsTable = G2G.Table(table,rowLabels=rowLabels,colLabels=colLabels,types=Types)
gridDisplay = G2G.GSGrid(G2frame.dataWindow)
gridDisplay.SetTable(UnitCellsTable, True)
G2frame.dataWindow.CopyCell.Enable(True)
gridDisplay.Bind(wg.EVT_GRID_CELL_LEFT_CLICK,RefreshUnitCellsGrid)
gridDisplay.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK,OnSortCells)
gridDisplay.SetRowLabelSize(0)
gridDisplay.AutoSizeColumns(False)
for r in range(gridDisplay.GetNumberRows()):
for c in range(gridDisplay.GetNumberCols()):
if c == 2:
gridDisplay.SetReadOnly(r,c,isReadOnly=False)
else:
gridDisplay.SetReadOnly(r,c,isReadOnly=True)
mainSizer.Add(gridDisplay,0,WACV)
if magcells and len(controls) > 16:
itemList = [phase.get('gid',ip+1) for ip,phase in enumerate(magcells)]
phaseDict = dict(zip(itemList,magcells))
G2frame.dataWindow.CopyCell.Enable(False)
kvec1 = ','.join(controls[14][:3])
kvec2 = ','.join(controls[14][3:6])
kvec3 = ','.join(controls[14][6:])
baseList = controls[16]
if 'magAtms' in magcells[0]:
G2frame.dataWindow.RunSubGroupsMag.Enable(True)
Label = '\n Magnetic subgroup cells from Bilbao k-SUBGROUPSMAG for %s; kvec1=(%s)'%(controls[13],kvec1)
else:
G2frame.dataWindow.RunSubGroups.Enable(True)
Label = '\n Subgroup cells from Bilbao SUBGROUPS for %s; kvec1=(%s)'%(controls[13],kvec1)
if ' ' not in kvec2:
Label += ', kvec2=(%s)' % kvec2
if ' ' not in kvec3:
Label += ', kvec3=(%s)' % kvec3
Label += ':'
mainSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=Label),0,WACV)
rowLabels = [str(i+1) for i in range(len(baseList))]
colLabels = ['Space Gp','Try','Keep','Uniq','nConj','nSup','Trans','Vec','a','b','c','alpha','beta','gamma','Volume']
Types = [wg.GRID_VALUE_STRING,]+2*[wg.GRID_VALUE_BOOL,]+3*[wg.GRID_VALUE_LONG,]+2*[wg.GRID_VALUE_STRING,]+ \
3*[wg.GRID_VALUE_FLOAT+':10,5',]+3*[wg.GRID_VALUE_FLOAT+':10,3',]+[wg.GRID_VALUE_FLOAT+':10,2']
table = []
for ip in baseList:
phase = phaseDict[ip]
natms = phase.get('nAtoms',1)
try:
nConj = len(phase['altList'])
nSup = len(phase['supList'])
except KeyError:
nConj = 0
nSup = 0
cell = list(phase['Cell'])
trans = G2spc.Trans2Text(phase['Trans'])
vec = G2spc.Latt2text([phase['Uvec'],])
row = [phase['Name'],phase['Use'],phase['Keep'],natms,nConj,nSup,trans,vec]+cell
table.append(row)
MagCellsTable = G2G.Table(table,rowLabels=rowLabels,colLabels=colLabels,types=Types)
G2frame.GetStatusBar().SetStatusText(
'Double click Keep to refresh Keep flags; click Space Gp to see sym. ops., Uniq to see unique atoms list; Try to trigger K & J keys on plot',1)
magDisplay = G2G.GSGrid(G2frame.dataWindow)
magDisplay.SetTable(MagCellsTable, True)
magDisplay.Bind(wg.EVT_GRID_CELL_LEFT_CLICK,RefreshMagCellsGrid)
magDisplay.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK,OnRefreshKeep)
magDisplay.AutoSizeColumns(False)
for r in range(magDisplay.GetNumberRows()):
for c in range(magDisplay.GetNumberCols()):
if c in [1,2]:
magDisplay.SetReadOnly(r,c,isReadOnly=False)
else:
magDisplay.SetReadOnly(r,c,isReadOnly=True)
mainSizer.Add(magDisplay,0,WACV)
G2frame.dataWindow.SetDataSize()
################################################################################
##### Reflection list
################################################################################
def UpdateReflectionGrid(G2frame,data,HKLF=False,Name=''):
'''respond to selection of PWDR Reflections data tree item by displaying
a table of reflections in the data window.
'''
Controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root, 'Controls'))
dMin = 0.05
if 'UsrReject' in Controls:
dMin = Controls['UsrReject'].get('MinD',0.05)
def OnPlot1DHKL(event):
phaseName = G2frame.RefList
if phaseName not in ['Unknown',]:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
phaseId = G2gd.GetGPXtreeItemId(G2frame,pId,phaseName)
General = G2frame.GPXtree.GetItemPyData(phaseId)['General']
Super = General.get('Super',0)
else:
Super = 0
if 'list' in str(type(data)): #single crystal data is 2 dict in list
refList = data[1]['RefList']
else: #powder data is a dict of dicts; each same structure as SC 2nd dict
if 'RefList' in data[phaseName]:
refList = np.array(data[phaseName]['RefList'])
else:
wx.MessageBox('No reflection list - do Refine first',caption='Reflection plotting')
return
G2plt.Plot1DSngl(G2frame,newPlot=True,hklRef=refList,Super=Super,Title=phaseName)
def OnPlotHKL(event):
'''Plots a layer of reflections
'''
phaseName = G2frame.RefList
if phaseName not in ['Unknown',]:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
phaseId = G2gd.GetGPXtreeItemId(G2frame,pId,phaseName)
General = G2frame.GPXtree.GetItemPyData(phaseId)['General']
Super = General.get('Super',0)
SuperVec = General.get('SuperVec',[])
else:
Super = 0
SuperVec = []
if 'list' in str(type(data)): #single crystal data is 2 dict in list
refList = data[1]['RefList']
else: #powder data is a dict of dicts; each same structure as SC 2nd dict
if 'RefList' in data[phaseName]:
refList = np.array(data[phaseName]['RefList'])
else:
wx.MessageBox('No reflection list - do Refine first',caption='Reflection plotting')
return
FoMax = np.max(refList.T[8+Super])
Hmin = np.array([int(np.min(refList.T[0])),int(np.min(refList.T[1])),int(np.min(refList.T[2]))])
Hmax = np.array([int(np.max(refList.T[0])),int(np.max(refList.T[1])),int(np.max(refList.T[2]))])
controls = {'Type' : 'Fo','ifFc' : True,'HKLmax' : Hmax,'HKLmin' : Hmin,
'FoMax' : FoMax,'Zone' : '001','Layer' : 0,'Scale' : 1.0,'Super':Super,'SuperVec':SuperVec}
G2plt.PlotSngl(G2frame,newPlot=True,Data=controls,hklRef=refList,Title=phaseName)
def OnPlot3DHKL(event):
'''Plots the reflections in 3D
'''
phaseName = G2frame.RefList
Super = 0
SuperVec = []
if phaseName not in ['Unknown',]:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
phaseId = G2gd.GetGPXtreeItemId(G2frame,pId,phaseName)
General = G2frame.GPXtree.GetItemPyData(phaseId)['General']
if General.get('Modulated',False):
Super = 1
SuperVec = General['SuperVec']
if 'list' in str(type(data)): #single crystal data is 2 dict in list
refList = data[1]['RefList']
else: #powder data is a dict of dicts; each same structure as SC 2nd dict
if 'RefList' in data[phaseName]:
refList = np.array(data[phaseName]['RefList'])
else:
wx.MessageBox('No reflection list - do Refine first',caption='Reflection plotting')
return
refList.T[3+Super] = np.where(refList.T[4+Super]<dMin,-refList.T[3+Super],refList.T[3+Super])
FoMax = np.max(refList.T[8+Super])
Hmin = np.array([int(np.min(refList.T[0])),int(np.min(refList.T[1])),int(np.min(refList.T[2]))])
Hmax = np.array([int(np.max(refList.T[0])),int(np.max(refList.T[1])),int(np.max(refList.T[2]))])
Vpoint = np.array([int(np.mean(refList.T[0])),int(np.mean(refList.T[1])),int(np.mean(refList.T[2]))])
controls = {'Type':'Fosq','Iscale':False,'HKLmax':Hmax,'HKLmin':Hmin,'Zone':False,'viewKey':'L',
'FoMax' : FoMax,'Scale' : 1.0,'Drawing':{'viewPoint':[Vpoint,[]],'default':Vpoint[:],
'backColor':[0,0,0],'depthFog':False,'Zclip':10.0,'cameraPos':10.,'Zstep':0.05,'viewUp':[0,1,0],
'Scale':1.0,'oldxy':[],'viewDir':[0,0,1]},'Super':Super,'SuperVec':SuperVec}
G2plt.Plot3DSngl(G2frame,newPlot=True,Data=controls,hklRef=refList,Title=phaseName)
def MakeReflectionTable(phaseName):
'''Returns a wx.grid table (G2G.Table) containing a list of all reflections
for a phase.
'''
Super = 0
if phaseName not in ['Unknown',]:
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Phases')
if pId: # phase section missing from file (unusual)
phaseId = G2gd.GetGPXtreeItemId(G2frame,pId,phaseName)
if phaseId: #is phase deleted?
General = G2frame.GPXtree.GetItemPyData(phaseId)['General']
if General.get('Modulated',False):
Super = 1
rowLabels = []
if HKLF:
refList = data[1]['RefList']
refs = refList
else:
if len(data) > 1:
G2frame.dataWindow.SelectPhase.Enable(True)
try: #patch for old reflection lists
if not len(data[phaseName]):
return None
refList = np.array(data[phaseName]['RefList'])
I100 = refList.T[8+Super]*refList.T[11+Super]
except TypeError:
refList = np.array([refl[:11+Super] for refl in data[phaseName]])
I100 = refList.T[8+Super]*np.array([refl[11+Super] for refl in data[phaseName]])
Imax = np.max(I100)
if Imax:
I100 *= 100.0/Imax
if 'C' in Inst['Type'][0]:
refs = np.vstack((refList.T[:15+Super],I100)).T
elif 'T' in Inst['Type'][0]:
refs = np.vstack((refList.T[:18+Super],I100)).T
rowLabels = [str(i) for i in range(len(refs))]
Types = (4+Super)*[wg.GRID_VALUE_LONG,]+4*[wg.GRID_VALUE_FLOAT+':10,4',]+ \
2*[wg.GRID_VALUE_FLOAT+':10,2',]+[wg.GRID_VALUE_FLOAT+':10,3',]+ \
[wg.GRID_VALUE_FLOAT+':10,3',]
if HKLF:
colLabels = ['H','K','L','flag','d','Fosq','sig','Fcsq','FoTsq','FcTsq','phase','ExtC',]
if 'T' in Inst['Type'][0]:
colLabels = ['H','K','L','flag','d','Fosq','sig','Fcsq','FoTsq','FcTsq','phase','ExtC','wave','tbar']
Types += 2*[wg.GRID_VALUE_FLOAT+':10,3',]
if Super:
colLabels.insert(3,'M')
else:
if 'C' in Inst['Type'][0]:
colLabels = ['H','K','L','mul','d','pos','sig','gam','Fosq','Fcsq','phase','Icorr','Prfo','Trans','ExtP','I100']
Types += 4*[wg.GRID_VALUE_FLOAT+':10,3',]
elif 'T' in Inst['Type'][0]:
colLabels = ['H','K','L','mul','d','pos','sig','gam','Fosq','Fcsq','phase','Icorr','alp','bet','wave','Prfo','Abs','Ext','I100']
Types += 7*[wg.GRID_VALUE_FLOAT+':10,3',]
if Super:
colLabels.insert(3,'M')
refs.T[3+Super] = np.where(refs.T[4+Super]<dMin,-refs.T[3+Super],refs.T[3+Super])
return G2G.Table(refs,rowLabels=rowLabels,colLabels=colLabels,types=Types)
def ShowReflTable(phaseName):
'''Posts a table of reflections for a phase, creating the table
if needed using MakeReflectionTable
'''
def setBackgroundColors(im,it):
for r in range(G2frame.refTable[phaseName].GetNumberRows()):
if HKLF:
if float(G2frame.refTable[phaseName].GetCellValue(r,3+im)) <= 0.:
G2frame.refTable[phaseName].SetCellBackgroundColour(r,3+im,wx.RED)
Fosq = float(G2frame.refTable[phaseName].GetCellValue(r,5+im))
Fcsq = float(G2frame.refTable[phaseName].GetCellValue(r,7+im))
sig = float(G2frame.refTable[phaseName].GetCellValue(r,6+im))
rat = 11.
if sig:
rat = abs(Fosq-Fcsq)/sig
if rat > 10.:
G2frame.refTable[phaseName].SetCellBackgroundColour(r,7+im,wx.RED)
elif rat > 3.0:
G2frame.refTable[phaseName].SetCellBackgroundColour(r,7+im,wx.Colour(255,255,0))
else: #PWDR
if float(G2frame.refTable[phaseName].GetCellValue(r,12+im+itof)) < 0.:
G2frame.refTable[phaseName].SetCellBackgroundColour(r,12+im+itof,wx.RED)
if float(G2frame.refTable[phaseName].GetCellValue(r,3+im)) < 0:
G2frame.refTable[phaseName].SetCellBackgroundColour(r,8+im,wx.RED)
if not HKLF and not len(data[phaseName]):
return #deleted phase?
G2frame.RefList = phaseName
if HKLF:
G2frame.GetStatusBar().SetStatusText('abs(DF)/sig > 10 red; > 3 yellow; flag:>0 twin no., 0 sp.gp absent, -1 user rejected, -2 Rfree',1)
else:
G2frame.GetStatusBar().SetStatusText('Prfo < 0. in red; if excluded Fosq in red & mul < 0',1)
itof = 0
if HKLF:
im = data[1].get('Super',0)
else:
if 'T' in data[phaseName].get('Type',''):
itof = 3
im = data[phaseName].get('Super',0)
# has this table already been displayed?
if G2frame.refTable[phaseName].GetTable() is None:
PeakTable = MakeReflectionTable(phaseName)
G2frame.refTable[phaseName].SetTable(PeakTable, True)
G2frame.refTable[phaseName].EnableEditing(False)
G2frame.refTable[phaseName].SetMargins(0,0)
G2frame.refTable[phaseName].AutoSizeColumns(False)
setBackgroundColors(im,itof)
if HKLF:
refList = np.array([refl[:6+im] for refl in data[1]['RefList']])
else:
refList = np.array([refl[:6+im] for refl in data[phaseName]['RefList']])
G2frame.HKL = np.vstack((refList.T)).T #build for plots
# raise the tab (needed for 1st use and from OnSelectPhase)
for PageNum in range(G2frame.refBook.GetPageCount()):
if phaseName == G2frame.refBook.GetPageText(PageNum):
G2frame.refBook.SetSelection(PageNum)
break
else:
print (phaseName)
print (phases)
raise Exception("how did we not find a phase name?")
def OnPageChanged(event):
'''Respond to a press on a phase tab by displaying the reflections. This
routine is needed because the reflection table may not have been created yet.
'''
G2frame.refBook.SetSize(G2frame.dataWindow.GetClientSize()) #TODO -almost right
page = event.GetSelection()
phaseName = G2frame.refBook.GetPageText(page)
ShowReflTable(phaseName)
def OnSelectPhase(event):
'''For PWDR, selects a phase with a selection box. Called from menu.
'''
if len(phases) < 2: return
dlg = wx.SingleChoiceDialog(G2frame,'Select','Phase',phases)
try:
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelection()
ShowReflTable(phases[sel])
finally:
dlg.Destroy()
if not data:
print ('No phases, no reflections')
return
if HKLF:
G2frame.RefList = 1
phaseName = IsHistogramInAnyPhase(G2frame,Name)
if not phaseName:
phaseName = 'Unknown'
phases = [phaseName]
else:
phaseName = G2frame.RefList
phases = list(data.keys())
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.ReflMenu)
if HKLF:
G2frame.Bind(wx.EVT_MENU, OnPlotHKL, id=G2G.wxID_PWDHKLPLOT)
G2frame.Bind(wx.EVT_MENU, OnPlot1DHKL, id=G2G.wxID_1DHKLSTICKPLOT)
G2frame.Bind(wx.EVT_MENU, OnPlot3DHKL, id=G2G.wxID_PWD3DHKLPLOT)
G2frame.dataWindow.SelectPhase.Enable(False)
else:
G2frame.Bind(wx.EVT_MENU, OnSelectPhase, id=G2G.wxID_SELECTPHASE)
G2frame.Bind(wx.EVT_MENU, OnPlot1DHKL, id=G2G.wxID_1DHKLSTICKPLOT)
G2frame.Bind(wx.EVT_MENU, OnPlotHKL, id=G2G.wxID_PWDHKLPLOT)
G2frame.Bind(wx.EVT_MENU, OnPlot3DHKL, id=G2G.wxID_PWD3DHKLPLOT)
G2frame.dataWindow.SelectPhase.Enable(False)
G2frame.dataWindow.ClearData()
G2frame.refBook = G2G.GSNoteBook(parent=G2frame.dataWindow)
G2frame.dataWindow.GetSizer().Add(G2frame.refBook,1,wx.ALL|wx.EXPAND,1)
G2frame.refTable = {}
G2frame.dataWindow.currentGrids = []
for tabnum,phase in enumerate(phases):
if isinstance(data,list): #single crystal HKLF
G2frame.refTable[phase] = G2G.GSGrid(parent=G2frame.refBook)
G2frame.refBook.AddPage(G2frame.refTable[phase],phase)
G2frame.refTable[phase].SetScrollRate(10,10) # reflection grids (inside tab) need scroll bars
elif len(data[phase]): #else dict for PWDR
G2frame.refTable[phase] = G2G.GSGrid(parent=G2frame.refBook)
G2frame.refBook.AddPage(G2frame.refTable[phase],phase)
G2frame.refTable[phase].SetScrollRate(10,10) # as above
else: #cleanup deleted phase reflection lists
del data[phase]
if len(data):
G2frame.RefList = list(data.keys())[0]
phaseName = G2frame.RefList
else:
G2frame.RefList = ''
phaseName = ''
if phaseName: ShowReflTable(phaseName)
G2frame.refBook.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED, OnPageChanged)
G2frame.dataWindow.SetDataSize()
################################################################################
##### SASD/REFD Substances
################################################################################
def UpdateSubstanceGrid(G2frame,data):
'''respond to selection of SASD/REFD Substance data tree item.
'''
import Substances as substFile
def LoadSubstance(name):
subst = substFile.Substances[name]
ElList = subst['Elements'].keys()
for El in ElList:
Info = G2elem.GetAtomInfo(El.strip().capitalize())
Info.update(subst['Elements'][El])
data['Substances'][name]['Elements'][El] = Info
if 'Volume' in subst:
data['Substances'][name]['Volume'] = subst['Volume']
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])
elif 'Density' in subst:
data['Substances'][name]['Density'] = subst['Density']
data['Substances'][name]['Volume'] = \
G2mth.Den2Vol(data['Substances'][name]['Elements'],data['Substances'][name]['Density'])
else:
data['Substances'][name]['Volume'] = G2mth.El2EstVol(data['Substances'][name]['Elements'])
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])
if 'X' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
isotopes = list(Info['Isotopes'].keys())
isotopes.sort()
data['Substances'][name]['Elements'][El]['Isotope'] = isotopes[-1]
data['Substances'][name]['Scatt density'] = \
G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
def OnReloadSubstances(event):
for name in data['Substances'].keys():
if name not in ['vacuum','unit scatter']:
if 'X' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
UpdateSubstanceGrid(G2frame,data)
def OnLoadSubstance(event):
names = list(substFile.Substances.keys())
names.sort()
dlg = wx.SingleChoiceDialog(G2frame, 'Which substance?', 'Select substance', names, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
name = names[dlg.GetSelection()]
else:
return
finally:
dlg.Destroy()
data['Substances'][name] = {'Elements':{},'Volume':1.0,'Density':1.0,
'Scatt density':0.0,'Real density':0.0,'XAbsorption':0.0,'XImag density':0.0}
LoadSubstance(name)
UpdateSubstanceGrid(G2frame,data)
def OnCopySubstance(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy substances from\n'+hst[5:]+' to...',
'Copy substances', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'Instrument Parameters'))[0]
wave = G2mth.getWave(Inst)
ndata = copy.deepcopy(data)
for name in ndata['Substances'].keys():
if name not in ['vacuum','unit scatter']:
if 'X' in Inst['Type'][0]:
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
ndata['Substances'][name]['XAnom density'] = recontrst
ndata['Substances'][name]['XAbsorption'] = absorb
ndata['Substances'][name]['XImag density'] = imcontrst
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Substances'),ndata)
def OnAddSubstance(event):
dlg = wx.TextEntryDialog(None,'Enter a name for this substance','Substance Name Entry','New substance',
style=wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
Name = dlg.GetValue()
data['Substances'][Name] = {'Elements':{},'Volume':1.0,'Density':1.0,
'Scatt density':0.0,'XAnom density':0.,'XAbsorption':0.,'XImag density':0.}
AddElement(Name)
else:
return
dlg.Destroy()
if not data['Substances'][Name]['XAbsorption']:
del data['Substances'][Name]
UpdateSubstanceGrid(G2frame,data)
def OnDeleteSubstance(event):
TextList = []
for name in data['Substances']:
if name not in ['vacuum','unit scatter']:
TextList += [name,]
if not TextList:
return
dlg = wx.SingleChoiceDialog(G2frame, 'Which substance?', 'Select substance to delete', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
name = TextList[dlg.GetSelection()]
else:
return
finally:
dlg.Destroy()
del(data['Substances'][name])
UpdateSubstanceGrid(G2frame,data)
def OnAddElement(event):
TextList = []
for name in data['Substances']:
if name not in ['vacuum','unit scatter']:
TextList += [name,]
if not TextList:
return
dlg = wx.SingleChoiceDialog(G2frame, 'Which substance?', 'Select substance', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
name = TextList[dlg.GetSelection()]
AddElement(name)
else:
return
finally:
dlg.Destroy()
UpdateSubstanceGrid(G2frame,data)
def AddElement(name):
ElList = list(data['Substances'][name]['Elements'].keys())
dlg = G2elemGUI.PickElements(G2frame,ElList)
if dlg.ShowModal() == wx.ID_OK:
for El in dlg.Elem:
El = El.strip().capitalize()
Info = G2elem.GetAtomInfo(El)
Info.update({'Num':1.})
data['Substances'][name]['Elements'][El] = Info
isotopes = list(Info['Isotopes'].keys())
isotopes.sort()
data['Substances'][name]['Elements'][El]['Isotope'] = isotopes[-1]
data['Substances'][name]['Volume'] = G2mth.El2EstVol(data['Substances'][name]['Elements'])
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])
if 'X' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
else:
return
dlg.Destroy()
def OnDeleteElement(event):
TextList = []
for name in data['Substances']:
if name not in ['vacuum','unit scatter']:
TextList += [name,]
if not TextList:
return
dlg = wx.SingleChoiceDialog(G2frame, 'Which substance?', 'Select substance', TextList, wx.CHOICEDLG_STYLE)
try:
if dlg.ShowModal() == wx.ID_OK:
name = TextList[dlg.GetSelection()]
else:
return
finally:
dlg.Destroy()
ElList = list(data['Substances'][name]['Elements'].keys())
if len(ElList):
DE = G2elemGUI.DeleteElement(G2frame,ElList)
if DE.ShowModal() == wx.ID_OK:
El = DE.GetDeleteElement().strip().upper()
del(data['Substances'][name]['Elements'][El])
data['Substances'][name]['Volume'] = G2mth.El2EstVol(data['Substances'][name]['Elements'])
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])
if 'X' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
UpdateSubstanceGrid(G2frame,data)
def SubstSizer():
def OnNum(invalid,value,tc):
if invalid: return
name,El,keyId = Indx[tc.GetId()]
data['Substances'][name]['Volume'] = G2mth.El2EstVol(data['Substances'][name]['Elements'])
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])
if 'X' in Inst['Type'][0]:
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
wx.CallAfter(UpdateSubstanceGrid,G2frame,data)
def OnVolDen(invalid,value,tc):
if invalid: return
name,keyId = Indx[tc.GetId()]
if keyId in 'Volume':
data['Substances'][name]['Density'] = \
G2mth.Vol2Den(data['Substances'][name]['Elements'],value)
elif keyId in 'Density':
data['Substances'][name]['Volume'] = \
G2mth.Den2Vol(data['Substances'][name]['Elements'],value)
if 'X' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.XScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
elif 'NC' in Inst['Type'][0]:
data['Substances'][name]['Scatt density'] = \
G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'])[0]
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
wx.CallAfter(UpdateSubstanceGrid,G2frame,data)
def OnIsotope(event):
Obj = event.GetEventObject()
El,name = Indx[Obj.GetId()]
data['Substances'][name]['Elements'][El]['Isotope'] = Obj.GetValue()
recontrst,absorb,imcontrst = G2mth.NCScattDen(data['Substances'][name]['Elements'],data['Substances'][name]['Volume'],wave)
data['Substances'][name]['XAnom density'] = recontrst
data['Substances'][name]['XAbsorption'] = absorb
data['Substances'][name]['XImag density'] = imcontrst
wx.CallAfter(UpdateSubstanceGrid,G2frame,data)
Indx = {}
substSizer = wx.BoxSizer(wx.VERTICAL)
substSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Substance list: wavelength: %.5fA'%(wave)),
0,WACV)
for name in data['Substances']:
G2G.HorizontalLine(substSizer,G2frame.dataWindow)
substSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Data for '+name+':'),
0,WACV)
if name == 'vacuum':
substSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Not applicable'),
0,WACV)
elif name == 'unit scatter':
substSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Scattering density,f: %.3f *10%scm%s'%(data['Substances'][name]['Scatt density'],Pwr10,Pwrm2)),0,WACV)
else:
elSizer = wx.FlexGridSizer(0,8,5,5)
Substance = data['Substances'][name]
Elems = Substance['Elements']
for El in Elems: #do elements as pull downs for isotopes for neutrons
elSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' '+El+': '),
0,WACV)
num = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Substances'][name]['Elements'][El],'Num',
nDig=(10,2,'f'),typeHint=float,OnLeave=OnNum)
Indx[num.GetId()] = [name,El,'Num']
elSizer.Add(num,0,WACV)
if 'N' in Inst['Type'][0]:
elSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Isotope: '),0,WACV)
isotopes = list(Elems[El]['Isotopes'].keys())
isotope = wx.ComboBox(G2frame.dataWindow,choices=isotopes,value=Elems[El].get('Isotope','Nat. Abund.'),
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[isotope.GetId()] = [El,name]
isotope.Bind(wx.EVT_COMBOBOX,OnIsotope)
elSizer.Add(isotope,0,WACV)
substSizer.Add(elSizer,0)
vdsSizer = wx.FlexGridSizer(0,4,5,5)
vdsSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Volume: '),
0,WACV)
vol = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Substances'][name],'Volume',nDig=(10,2),typeHint=float,OnLeave=OnVolDen)
Indx[vol.GetId()] = [name,'Volume']
vdsSizer.Add(vol,0,WACV)
vdsSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' Density: '),
0,WACV)
den = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Substances'][name],'Density',nDig=(10,2),typeHint=float,OnLeave=OnVolDen)
Indx[den.GetId()] = [name,'Density']
vdsSizer.Add(den,0,WACV)
substSizer.Add(vdsSizer,0)
denSizer = wx.FlexGridSizer(0,2,0,0)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Scattering density,f'),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=': %.3f *10%scm%s'%(Substance['Scatt density'],Pwr10,Pwrm2)),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=" Real density,f+f'"),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=': %.3f *10%scm%s'%(Substance['XAnom density'],Pwr10,Pwrm2)),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Imaginary density,f"'),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=': %.3g *10%scm%s'%(Substance['XImag density'],Pwr10,Pwrm2)),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Absorption'),0,WACV)
denSizer.Add(wx.StaticText(G2frame.dataWindow,label=': %.3g cm%s'%(Substance['XAbsorption'],Pwrm1)),0,WACV)
substSizer.Add(denSizer)
return substSizer
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
wave = G2mth.getWave(Inst)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.SubstanceMenu)
G2frame.Bind(wx.EVT_MENU, OnLoadSubstance, id=G2G.wxID_LOADSUBSTANCE)
G2frame.Bind(wx.EVT_MENU, OnReloadSubstances, id=G2G.wxID_RELOADSUBSTANCES)
G2frame.Bind(wx.EVT_MENU, OnAddSubstance, id=G2G.wxID_ADDSUBSTANCE)
G2frame.Bind(wx.EVT_MENU, OnCopySubstance, id=G2G.wxID_COPYSUBSTANCE)
G2frame.Bind(wx.EVT_MENU, OnDeleteSubstance, id=G2G.wxID_DELETESUBSTANCE)
G2frame.Bind(wx.EVT_MENU, OnAddElement, id=G2G.wxID_ELEMENTADD)
G2frame.Bind(wx.EVT_MENU, OnDeleteElement, id=G2G.wxID_ELEMENTDELETE)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(SubstSizer(),0)
G2frame.dataWindow.SetDataSize()
################################################################################
##### SASD Models
################################################################################
def UpdateModelsGrid(G2frame,data):
'''respond to selection of SASD Models data tree item.
'''
#patches
if 'Current' not in data:
data['Current'] = 'Size dist.'
if 'logBins' not in data['Size']:
data['Size']['logBins'] = True
if 'MinMaxDiam' in data['Size']:
data['Size']['MinDiam'] = 50.
data['Size']['MaxDiam'] = 10000.
del data['Size']['MinMaxDiam']
if isinstance(data['Size']['MaxEnt']['Sky'],float):
data['Size']['MaxEnt']['Sky'] = -3
if 'Power' not in data['Size']['IPG']:
data['Size']['IPG']['Power'] = -1
if 'Matrix' not in data['Particle']:
data['Particle']['Matrix'] = {'Name':'vacuum','VolFrac':[0.0,False]}
if 'BackFile' not in data:
data['BackFile'] = ''
if 'Pair' not in data:
data['Pair'] = {'Method':'Moore','MaxRadius':100.,'NBins':100,'Errors':'User','Result':[],
'Percent error':2.5,'Background':[0,False],'Distribution':[],'Moore':10,'Dist G':100.,}
if 'Shapes' not in data:
data['Shapes'] = {'outName':'run','NumAA':100,'Niter':1,'AAscale':1.0,'Symm':1,'bias-z':0.0,
'inflateV':1.0,'AAglue':0.0,'pdbOut':False,'boxStep':4.0}
if 'boxStep' not in data['Shapes']:
data['Shapes']['boxStep'] = 4.0
plotDefaults = {'oldxy':[0.,0.],'Quaternion':[0.,0.,0.,1.],'cameraPos':150.,'viewDir':[0,0,1],}
#end patches
def RefreshPlots(newPlot=False):
PlotText = G2frame.G2plotNB.nb.GetPageText(G2frame.G2plotNB.nb.GetSelection())
if 'Powder' in PlotText:
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=newPlot)
elif 'Size' in PlotText:
G2plt.PlotSASDSizeDist(G2frame)
elif 'Pair' in PlotText:
G2plt.PlotSASDPairDist(G2frame)
def OnAddModel(event):
if data['Current'] == 'Particle fit':
material = 'vacuum'
if len(data['Particle']['Levels']):
material = data['Particle']['Levels'][-1]['Controls']['Material']
data['Particle']['Levels'].append({
'Controls':{'FormFact':'Sphere','DistType':'LogNormal','Material':material,
'FFargs':{},'SFargs':{},'NumPoints':50,'Cutoff':0.01,'Contrast':0.0,
'SlitSmear':[0.0,False],'StrFact':'Dilute'}, #last 2 not used - future?
'LogNormal':{'Volume':[0.05,False],'Mean':[1000.,False],'StdDev':[0.5,False],'MinSize':[10.,False],},
'Gaussian':{'Volume':[0.05,False],'Mean':[1000.,False],'StdDev':[300.,False],},
'LSW':{'Volume':[0.05,False],'Mean':[1000.0,False],},
'Schulz-Zimm':{'Volume':[0.05,False],'Mean':[1000.,False],'StdDev':[300.,False],},
'Unified':{'G':[1.e3,False],'Rg':[100,False],'B':[1.e-5,False],'P':[4,False],'Cutoff':[1e-5,False],},
'Porod':{'B':[1.e-4,False],'P':[4,False],'Cutoff':[1e-5,False],},
'Monodisperse':{'Volume':[0.05,False],'Radius':[100,False],}, #OK for spheres
'Bragg':{'PkInt':[100,False],'PkPos':[0.2,False],
'PkSig':[10,False],'PkGam':[10,False],}, #reasonable 31A peak
})
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnCopyModel(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy models from\n'+hst[5:]+' to...',
'Copy models', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
newdata = copy.deepcopy(data)
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Models'),newdata)
if newdata['BackFile']:
Profile = G2frame.GPXtree.GetItemPyData(Id)[1]
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,newdata['BackFile'])
BackSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,BackId, 'Sample Parameters'))
Profile[5] = BackSample['Scale'][0]*G2frame.GPXtree.GetItemPyData(BackId)[1][1]
UpdateModelsGrid(G2frame,newdata)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
RefreshPlots(True)
def OnCopyFlags(event):
thisModel = copy.deepcopy(data)
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy sample ref. flags from\n'+str(hst[5:])+' to...',
'Copy sample flags', histList)
distChoice = ['LogNormal','Gaussian','LSW','Schulz-Zimm','Bragg','Unified',
'Porod','Monodisperse',]
parmOrder = ['Volume','Radius','Mean','StdDev','G','Rg','B','P',
'Cutoff','PkInt','PkPos','PkSig','PkGam','VolFr','Dist',]
try:
if dlg.ShowModal() == wx.ID_OK:
result = dlg.GetSelections()
for i in result:
item = histList[i]
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
newModel = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'Models'))
newModel['Back'][1] = copy.copy(thisModel['Back'][1])
for ilev,level in enumerate(newModel['Particle']['Levels']):
for form in level:
if form in distChoice:
thisForm = thisModel['Particle']['Levels'][ilev][form]
for item in parmOrder:
if item in thisForm:
level[form][item][1] = copy.copy(thisForm[item][1])
elif form == 'Controls':
thisForm = thisModel['Particle']['Levels'][ilev][form]['SFargs']
for item in parmOrder:
if item in thisForm:
level[form]['SFargs'][item][1] = copy.copy(thisForm[item][1])
finally:
dlg.Destroy()
def OnFitModelAll(event):
choices = G2gd.GetGPXtreeDataNames(G2frame,['SASD',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Sequential SASD refinement',
'Select dataset to include',choices,extraOpts=od)
names = []
if dlg.ShowModal() == wx.ID_OK:
for sel in dlg.GetSelections():
names.append(choices[sel])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential SASD results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential SASD results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
SeqResult['histNames'] = names
else:
dlg.Destroy()
return
dlg.Destroy()
dlg = wx.ProgressDialog('SASD Sequential fit','Data set name = '+names[0],len(names),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
wx.BeginBusyCursor()
if od['value_2']:
names.reverse()
JModel = None
try:
for i,name in enumerate(names):
print (' Sequential fit for '+name)
GoOn = dlg.Update(i,newmsg='Data set name = '+name)[0]
if not GoOn:
break
sId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if i and od['value_1']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'),JModel)
IProfDict,IProfile = G2frame.GPXtree.GetItemPyData(sId)[:2]
IModel = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'))
ISample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Sample Parameters'))
ILimits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Limits'))
IfOK,result,varyList,sig,Rvals,covMatrix,parmDict,Msg = G2sasd.ModelFit(IProfile,IProfDict,ILimits,ISample,IModel)
JModel = copy.deepcopy(IModel)
if not IfOK:
G2frame.ErrorDialog('Failed sequential refinement for data '+name,
' Msg: '+Msg+'\nYou need to rethink your selection of parameters\n'+ \
' Model restored to previous version for'+name)
SeqResult['histNames'] = names[:i]
dlg.Destroy()
break
else:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'),copy.deepcopy(IModel))
G2sasd.ModelFxn(IProfile,IProfDict,ILimits,ISample,IModel)
SeqResult[name] = {'variables':result[0],'varyList':varyList,'sig':sig,'Rvals':Rvals,
'covMatrix':covMatrix,'title':name,'parmDict':parmDict}
else:
dlg.Destroy()
print (' ***** Small angle sequential refinement successful *****')
finally:
wx.EndBusyCursor()
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.GPXtree.SelectItem(Id)
def OnFitModel(event):
if data['Current'] == 'Size dist.':
if not any(Sample['Contrast']):
G2frame.ErrorDialog('No contrast; your sample is a vacuum!',
'You need to define a scattering substance!\n'+ \
' Do Substances and then Sample parameters')
return
G2sasd.SizeDistribution(Profile,ProfDict,Limits,Sample,data)
G2plt.PlotSASDSizeDist(G2frame)
RefreshPlots(True)
elif data['Current'] == 'Particle fit':
SaveState()
Results = G2sasd.ModelFit(Profile,ProfDict,Limits,Sample,data)
if not Results[0]:
G2frame.ErrorDialog('Failed refinement',
' Msg: '+Results[-1]+'\nYou need to rethink your selection of parameters\n'+ \
' Model restored to previous version')
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
elif data['Current'] == 'Pair distance':
SaveState()
G2sasd.PairDistFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
G2plt.PlotSASDPairDist(G2frame)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
elif data['Current'] == 'Shapes':
SaveState()
wx.MessageBox(''' For use of SHAPES, please cite:
A New Algroithm for the Reconstruction of Protein Molecular Envelopes
from X-ray Solution Scattering Data,
J. Badger, Jour. of Appl. Chrystallogr. 2019, 52, xxx-xxx.
doi: 10.1107/S1600576719009774''',
caption='Program Shapes',style=wx.ICON_INFORMATION)
dlg = wx.ProgressDialog('Running SHAPES','Cycle no.: 0 of 160',161,
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME)
data['Pair']['Result'] = [] #clear old results (if any) for now
data['Pair']['Result'] = G2shapes.G2shapes(Profile,ProfDict,Limits,data,dlg)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnUnDo(event):
DoUnDo()
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Models'))
G2frame.dataWindow.SasdUndo.Enable(False)
UpdateModelsGrid(G2frame,data)
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
def DoUnDo():
print ('Undo last refinement')
file = open(G2frame.undosasd,'rb')
PatternId = G2frame.PatternId
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Models'),cPickle.load(file))
print (' Models recovered')
file.close()
def SaveState():
G2frame.undosasd = os.path.join(G2frame.dirname,'GSASIIsasd.save')
file = open(G2frame.undosasd,'wb')
PatternId = G2frame.PatternId
for item in ['Models']:
cPickle.dump(G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId,item)),file,1)
file.close()
G2frame.dataWindow.SasdUndo.Enable(True)
def OnSelectFit(event):
data['Current'] = fitSel.GetValue()
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnCheckBox(event):
Obj = event.GetEventObject()
item,ind = Indx[Obj.GetId()]
item[ind] = Obj.GetValue()
def OnIntVal(event):
event.Skip()
Obj = event.GetEventObject()
item,ind,minVal = Indx[Obj.GetId()]
try:
value = int(Obj.GetValue())
if value <= minVal:
raise ValueError
except ValueError:
value = item[ind]
Obj.SetValue(str(value))
item[ind] = value
def SizeSizer():
def OnShape(event):
data['Size']['Shape'][0] = partsh.GetValue()
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnMethod(event):
data['Size']['Method'] = method.GetValue()
wx.CallAfter(UpdateModelsGrid,G2frame,data)
sizeSizer = wx.BoxSizer(wx.VERTICAL)
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Size distribution parameters: '),0,WACV)
binSizer = wx.FlexGridSizer(0,7,5,5)
binSizer.Add(wx.StaticText(G2frame.dataWindow,label=' No. size bins: '),0,WACV)
bins = ['50','100','150','200']
nbins = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['Nbins']),choices=bins,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[nbins.GetId()] = [data['Size'],'Nbins',0]
nbins.Bind(wx.EVT_COMBOBOX,OnIntVal)
binSizer.Add(nbins,0,WACV)
binSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Min diam.: '),0,WACV)
minDias = ['10','25','50','100','150','200']
mindiam = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['MinDiam']),choices=minDias,
style=wx.CB_DROPDOWN)
mindiam.Bind(wx.EVT_LEAVE_WINDOW,OnIntVal)
mindiam.Bind(wx.EVT_TEXT_ENTER,OnIntVal)
mindiam.Bind(wx.EVT_KILL_FOCUS,OnIntVal)
Indx[mindiam.GetId()] = [data['Size'],'MinDiam',0]
binSizer.Add(mindiam,0,WACV)
binSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Max diam.: '),0,WACV)
maxDias = [str(1000*(i+1)) for i in range(10)]
maxdiam = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['MaxDiam']),choices=maxDias,
style=wx.CB_DROPDOWN)
maxdiam.Bind(wx.EVT_LEAVE_WINDOW,OnIntVal)
maxdiam.Bind(wx.EVT_TEXT_ENTER,OnIntVal)
maxdiam.Bind(wx.EVT_KILL_FOCUS,OnIntVal)
Indx[maxdiam.GetId()] = [data['Size'],'MaxDiam',0]
binSizer.Add(maxdiam,0,WACV)
logbins = wx.CheckBox(G2frame.dataWindow,label='Log bins?')
Indx[logbins.GetId()] = [data['Size'],'logBins']
logbins.SetValue(data['Size']['logBins'])
logbins.Bind(wx.EVT_CHECKBOX, OnCheckBox)
binSizer.Add(logbins,0,WACV)
sizeSizer.Add(binSizer,0)
sizeSizer.Add((5,5),0)
partSizer = wx.BoxSizer(wx.HORIZONTAL)
partSizer.Add(wx.StaticText(G2frame.dataWindow,label='Particle description: '),0,WACV)
shapes = {'Spheroid':' Aspect ratio: ','Cylinder':' Diameter ','Cylinder AR':' Aspect ratio: ',
'Unified sphere':'','Unified rod':' Diameter: ','Unified rod AR':' Aspect ratio: ',
'Unified disk':' Thickness: ', 'Spherical shell': ' Shell thickness'}
partsh = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['Shape'][0]),choices=list(shapes.keys()),
style=wx.CB_READONLY|wx.CB_DROPDOWN)
partsh.Bind(wx.EVT_COMBOBOX,OnShape)
partSizer.Add(partsh,0,WACV)
if data['Size']['Shape'][0] not in ['Unified sphere',]:
partSizer.Add(wx.StaticText(G2frame.dataWindow,label=shapes[data['Size']['Shape'][0]]),0,WACV)
partprm = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Size']['Shape'],1,
nDig=(10,3),typeHint=float,min=0.)
partSizer.Add(partprm,0,WACV)
sizeSizer.Add(partSizer,0)
sizeSizer.Add((5,5),0)
fitSizer = wx.BoxSizer(wx.HORIZONTAL)
methods = ['MaxEnt','IPG',]
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label='Fitting method: '),0,WACV)
method = wx.ComboBox(G2frame.dataWindow,value=data['Size']['Method'],choices=methods,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
method.Bind(wx.EVT_COMBOBOX,OnMethod)
fitSizer.Add(method,0,WACV)
iters = ['10','25','50','100','150','200']
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label=' No. iterations: '),0,WACV)
Method = data['Size']['Method']
iter = wx.ComboBox(G2frame.dataWindow,value=str(data['Size'][Method]['Niter']),choices=iters,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[iter.GetId()] = [data['Size'][Method],'Niter',0]
iter.Bind(wx.EVT_COMBOBOX,OnIntVal)
fitSizer.Add(iter,0,WACV)
if 'MaxEnt' in data['Size']['Method']:
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Log floor factor: '),0,WACV)
floors = [str(-i) for i in range(9)]
floor = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['MaxEnt']['Sky']),choices=floors,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[floor.GetId()] = [data['Size']['MaxEnt'],'Sky',-10]
floor.Bind(wx.EVT_COMBOBOX,OnIntVal)
fitSizer.Add(floor,0,WACV)
elif 'IPG' in data['Size']['Method']:
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Q power weight (-1 for sigma): '),0,WACV)
choices = ['-1','0','1','2','3','4']
power = wx.ComboBox(G2frame.dataWindow,value=str(data['Size']['IPG']['Power']),choices=choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[power.GetId()] = [data['Size']['IPG'],'Power',-2]
power.Bind(wx.EVT_COMBOBOX,OnIntVal)
fitSizer.Add(power,0,WACV)
sizeSizer.Add(fitSizer,0)
return sizeSizer
def PairSizer():
def OnMethod(event):
data['Pair']['Method'] = method.GetValue()
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnError(event):
data['Pair']['Errors'] = error.GetValue()
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnMaxRadEst(event):
Results = G2sasd.RgFit(Profile,ProfDict,Limits,Sample,data)
if not Results[0]:
G2frame.ErrorDialog('Failed refinement',
' Msg: '+Results[-1]+'\nYou need to rethink your selection of parameters\n'+ \
' Model restored to previous version')
RefreshPlots(True)
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnMooreTerms(event):
data['Pair']['Moore'] = int(round(Limits[1][1]*data['Pair']['MaxRadius']/np.pi))-1
wx.CallAfter(UpdateModelsGrid,G2frame,data)
def OnNewVal(invalid,value,tc):
if invalid: return
parmDict = {'Rg':data['Pair']['MaxRadius']/2.5,'G':data['Pair']['Dist G'],
'B':data['Pair'].get('Dist B',Profile[1][-1]*Profile[0][-1]**4),
'Back':data['Back'][0]}
Profile[2] = G2sasd.getSASDRg(Profile[0],parmDict)
RefreshPlots(True)
pairSizer = wx.BoxSizer(wx.VERTICAL)
pairSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Pair distribution parameters: '),0,WACV)
binSizer = wx.FlexGridSizer(0,6,5,5)
binSizer.Add(wx.StaticText(G2frame.dataWindow,label=' No. R bins: '),0,WACV)
bins = ['50','100','150','200']
nbins = wx.ComboBox(G2frame.dataWindow,value=str(data['Pair']['NBins']),choices=bins,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[nbins.GetId()] = [data['Pair'],'NBins',0]
nbins.Bind(wx.EVT_COMBOBOX,OnIntVal)
binSizer.Add(nbins,0,WACV)
binSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Max diam.: '),0,WACV)
maxdiam = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Pair'],'MaxRadius',min=10.,nDig=(10,1),OnLeave=OnNewVal)
binSizer.Add(maxdiam,0,WACV)
maxest = wx.Button(G2frame.dataWindow,label='Make estimate')
maxest.Bind(wx.EVT_BUTTON,OnMaxRadEst)
binSizer.Add(maxest,0,WACV)
pairSizer.Add(binSizer,0)
pairSizer.Add((5,5),0)
fitSizer = wx.BoxSizer(wx.HORIZONTAL)
methods = ['Moore',] #'Regularization',
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label='Fitting method: '),0,WACV)
method = wx.ComboBox(G2frame.dataWindow,value=data['Pair']['Method'],choices=methods,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
method.Bind(wx.EVT_COMBOBOX,OnMethod)
fitSizer.Add(method,0,WACV)
if data['Pair']['Method'] == 'Moore':
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label=" P.B. Moore, J. Appl. Cryst., 13, 168-175 (1980)"),0,WACV)
else:
fitSizer.Add(wx.StaticText(G2frame.dataWindow,label=" D.I. Svergun, J. Appl. Cryst., 24, 485-492 (1991)"),0,WACV)
pairSizer.Add(fitSizer,0,WACV)
if 'Moore' in data['Pair']['Method']:
mooreSizer = wx.BoxSizer(wx.HORIZONTAL)
mooreSizer.Add(wx.StaticText(G2frame.dataWindow,label='Number of functions: '),0,WACV)
moore = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Pair'],'Moore',min=2,max=20)
mooreSizer.Add(moore,0,WACV)
mooreterms = wx.Button(G2frame.dataWindow,label = 'Auto determine?')
mooreterms.Bind(wx.EVT_BUTTON,OnMooreTerms)
mooreSizer.Add(mooreterms,0,WACV)
pairSizer.Add(mooreSizer,0,WACV)
errorSizer = wx.BoxSizer(wx.HORIZONTAL)
errorSizer.Add(wx.StaticText(G2frame.dataWindow,label='Error method: '),0,WACV)
errors = ['User','Sqrt','Percent']
error = wx.ComboBox(G2frame.dataWindow,value=data['Pair']['Errors'],choices=errors,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
error.Bind(wx.EVT_COMBOBOX,OnError)
if 'Percent' in data['Pair']['Errors']:
percent = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Pair'],'Percent error',min=0.5,nDig=(10,1))
errorSizer.Add(percent,0,WACV)
errorSizer.Add(error,0,WACV)
pairSizer.Add(errorSizer,0,WACV)
return pairSizer
def ShapesSizer():
# def OnPDBout(event):
# data['Shapes']['pdbOut'] = not data['Shapes']['pdbOut']
def OnShapeSelect(event):
r,c = event.GetRow(),event.GetCol()
for i in [1,2]:
for j in range(len(Patterns)):
shapeTable.SetValue(j,i,False)
shapeTable.SetValue(r,c,True)
ShapesResult.ForceRefresh()
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))[1]
ProfDict,Profile = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[:2]
iBeg = np.searchsorted(Profile[0],Limits[0])
iFin = np.searchsorted(Profile[0],Limits[1])
pattern = Patterns[r]
Profile[3][iBeg:iFin+1] = np.array(pattern[2])
selAtoms = Atoms[2*r+(c-1)]
prCalc = PRcalc[r][2]
prDelt= np.diff(PRcalc[r][0])[0]
prsum = np.sum(prCalc)
prCalc /= prsum*prDelt
data['Pair']['Pair Calc'] = np.array([PRcalc[r][0],prCalc]).T
print('%s %d'%('num. beads',len(selAtoms[1])))
print('%s %.3f'%('selected r value',pattern[-1]))
print('%s %.3f'%('selected Delta P(r)',PRcalc[r][-1]))
PDBtext = 'P(R) dif: %.3f r-value: %.3f Nbeads: %d'%(PRcalc[r][-1],pattern[-1],len(selAtoms[1]))
# RefreshPlots(True)
G2plt.PlotPatterns(G2frame,plotType='SASD',newPlot=True)
G2plt.PlotSASDPairDist(G2frame)
G2plt.PlotBeadModel(G2frame,selAtoms,plotDefaults,PDBtext)
shapeSizer = wx.BoxSizer(wx.VERTICAL)
shapeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Shape parameters:'),0,WACV)
parmSizer = wx.FlexGridSizer(0,4,5,5)
#1st row
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' No. amino acids: '),0,WACV)
numAA = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'NumAA',min=10)
parmSizer.Add(numAA,0,WACV)
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Nballs=no. amino acids*'),0,WACV)
scaleAA = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'AAscale',min=0.01,max=10.,nDig=(10,2))
parmSizer.Add(scaleAA,0,WACV)
#2nd row
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Inflate by (1.-1.4): '),0,WACV)
inflate = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'inflateV',min=1.,max=1.4,nDig=(10,2))
parmSizer.Add(inflate,0,WACV)
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Axial symmetry (1-12): '),0,WACV)
symm = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'Symm',min=1,max=12)
parmSizer.Add(symm,0,WACV)
#3rd row
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' z-axis bias (-2 to 2): '),0,WACV)
zaxis = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'bias-z',min=-2.,max=2.,nDig=(10,2))
parmSizer.Add(zaxis,0,WACV)
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' elongation (0-20): '),0,WACV)
glue = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'AAglue',min=0.,max=20.,nDig=(10,2))
parmSizer.Add(glue,0,WACV)
#4th row
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' No. iterations (1-10): '),0,WACV)
niter = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'Niter',min=1,max=10)
parmSizer.Add(niter,0,WACV)
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Output name: '),0,WACV)
name = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'outName')
parmSizer.Add(name,0,WACV)
#last row
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Bead separation (3.5-5): '),0,WACV)
beadsep = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Shapes'],'boxStep',min=3.5,max=5,nDig=(10,1))
parmSizer.Add(beadsep,0,WACV)
# pdb = wx.CheckBox(G2frame.dataWindow,label=' Save as pdb files?: ')
# pdb.SetValue(data['Shapes']['pdbOut'])
# pdb.Bind(wx.EVT_CHECKBOX, OnPDBout)
# parmSizer.Add(pdb,0,WACV)
shapeSizer.Add(parmSizer)
if len(data['Pair'].get('Result',[])):
shapeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' SHAPES run results:'),0,WACV)
Atoms,Patterns,PRcalc = data['Pair']['Result']
colLabels = ['name','show beads','show shape','Rvalue','P(r) dif','Nbeads','Nshape']
Types = [wg.GRID_VALUE_STRING,]+2*[wg.GRID_VALUE_BOOL,]+2*[wg.GRID_VALUE_FLOAT+':10,3',]+2*[wg.GRID_VALUE_LONG,]
rowLabels = [str(i) for i in range(len(Patterns))]
tableVals = []
for i in range(len(Patterns)):
tableVals.append([Atoms[2*i][0],False,False,Patterns[i][-1],PRcalc[i][-1],len(Atoms[2*i][1]),len(Atoms[2*i+1][1])])
shapeTable = G2G.Table(tableVals,rowLabels=rowLabels,colLabels=colLabels,types=Types)
ShapesResult = G2G.GSGrid(G2frame.dataWindow)
ShapesResult.SetTable(shapeTable,True)
ShapesResult.AutoSizeColumns(False)
ShapesResult.Bind(wg.EVT_GRID_CELL_LEFT_CLICK, OnShapeSelect)
for r in range(len(Patterns)):
for c in range(7):
if c in [1,2]:
ShapesResult.SetReadOnly(r,c,isReadOnly=False)
else:
ShapesResult.SetReadOnly(r,c,isReadOnly=True)
shapeSizer.Add(ShapesResult,0,WACV)
return shapeSizer
def PartSizer():
FormFactors = {'Sphere':{},'Spheroid':{'Aspect ratio':[1.0,False]},
'Cylinder':{'Length':[100.,False]},'Cylinder diam':{'Diameter':[100.,False]},
'Cylinder AR':{'Aspect ratio':[1.0,False]},'Unified sphere':{},
'Unified rod':{'Length':[100.,False]},'Unified rod AR':{'Aspect ratio':[1.0,False]},
'Unified disk':{'Thickness':[100.,False]},
'Unified tube':{'Length':[100.,False],'Thickness':[10.,False]},
'Spherical shell':{'Shell thickness':[1.5,False] }, }
StructureFactors = {'Dilute':{},'Hard sphere':{'VolFr':[0.1,False],'Dist':[100.,False]},
'Sticky hard sphere':{'VolFr':[0.1,False],'Dist':[100.,False],'epis':[0.05,False],'Sticky':[0.2,False]},
'Square well':{'VolFr':[0.1,False],'Dist':[100.,False],'Depth':[0.1,False],'Width':[1.,False]},
'InterPrecipitate':{'VolFr':[0.1,False],'Dist':[100.,False]},}
ffDistChoices = ['Sphere','Spheroid','Cylinder','Cylinder diam',
'Cylinder AR','Unified sphere','Unified rod','Unified rod AR',
'Unified disk','Unified tube','Spherical shell',]
ffMonoChoices = ['Sphere','Spheroid','Cylinder','Cylinder AR',]
sfChoices = ['Dilute','Hard sphere','Sticky hard sphere','Square well','InterPrecipitate',]
slMult = 1000.
def OnValue(event):
event.Skip()
Obj = event.GetEventObject()
item,key,sldrObj = Indx[Obj.GetId()]
try:
value = float(Obj.GetValue())
if value <= 0.:
raise ValueError
except ValueError:
value = item[key][0]
item[key][0] = value
Obj.SetValue('%.3g'%(value))
if key in ['P','epis','Sticky','Depth','Width','VolFr','Dist']:
sldrObj.SetValue(slMult*value)
else:
logv = np.log10(value)
valMinMax = [logv-1,logv+1]
sldrObj.SetRange(slMult*valMinMax[0],slMult*valMinMax[1])
sldrObj.SetValue(slMult*logv)
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
def OnSelect(event):
Obj = event.GetEventObject()
item,key = Indx[Obj.GetId()]
if key in ['NumPoints',]:
item[key] = int(Obj.GetValue())
else:
item[key] = Obj.GetValue()
if 'Refine' not in Obj.GetLabel():
if 'FormFact' in key :
item['FFargs'] = FormFactors[Obj.GetValue()]
elif 'StrFact' in key:
item['SFargs'] = StructureFactors[Obj.GetValue()]
wx.CallAfter(UpdateModelsGrid,G2frame,data)
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
def OnDelLevel(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
del data['Particle']['Levels'][item]
wx.CallAfter(UpdateModelsGrid,G2frame,data)
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
def OnParmSlider(event):
Obj = event.GetEventObject()
item,key,pvObj = Indx[Obj.GetId()]
slide = Obj.GetValue()
if key in ['P','epis','Sticky','Depth','Width','VolFr','Dist']:
value = float(slide/slMult)
else:
value = 10.**float(slide/slMult)
item[key][0] = value
pvObj.SetValue('%.3g'%(item[key][0]))
G2sasd.ModelFxn(Profile,ProfDict,Limits,Sample,data)
RefreshPlots(True)
def SizeSizer():
sizeSizer = wx.FlexGridSizer(0,4,5,5)
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Distribution: '),0,WACV)
Distchoice = ['LogNormal','Gaussian','LSW','Schulz-Zimm','Bragg','Unified','Porod','Monodisperse',]
distChoice = wx.ComboBox(G2frame.dataWindow,value=level['Controls']['DistType'],choices=Distchoice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[distChoice.GetId()] = [level['Controls'],'DistType']
distChoice.Bind(wx.EVT_COMBOBOX,OnSelect)
sizeSizer.Add(distChoice,0,WACV) #put structure factor choices here
if level['Controls']['DistType'] not in ['Bragg','Unified','Porod',]:
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Form Factor: '),0,WACV)
if 'Mono' in level['Controls']['DistType']:
ffChoice = wx.ComboBox(G2frame.dataWindow,value=level['Controls']['FormFact'],choices=ffMonoChoices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
else:
ffChoice = wx.ComboBox(G2frame.dataWindow,value=level['Controls']['FormFact'],choices=ffDistChoices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[ffChoice.GetId()] = [level['Controls'],'FormFact']
ffChoice.Bind(wx.EVT_COMBOBOX,OnSelect)
sizeSizer.Add(ffChoice,0,WACV)
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Material: '),0,WACV)
matSel = wx.ComboBox(G2frame.dataWindow,value=level['Controls']['Material'],
choices=list(Substances['Substances'].keys()),style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[matSel.GetId()] = [level['Controls'],'Material']
matSel.Bind(wx.EVT_COMBOBOX,OnSelect)
sizeSizer.Add(matSel,0,WACV) #do neutron test here?
rho = Substances['Substances'][level['Controls']['Material']].get('XAnom density',0.0)
level['Controls']['Contrast'] = contrast = (rho-rhoMat)**2
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Resonant X-ray contrast: '),0,WACV)
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' %.2f 10%scm%s'%(contrast,Pwr20,Pwrm4)),0,WACV)
if 'Mono' not in level['Controls']['DistType']:
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Num. radii: '),0,WACV)
radii = ['25','50','75','100','200']
nRadii = wx.ComboBox(G2frame.dataWindow,value=str(level['Controls']['NumPoints']),choices=radii,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[nRadii.GetId()] = [level['Controls'],'NumPoints']
nRadii.Bind(wx.EVT_COMBOBOX,OnSelect)
sizeSizer.Add(nRadii,0,WACV)
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' R dist. cutoff: '),0,WACV)
rCutoff = G2G.ValidatedTxtCtrl(G2frame.dataWindow,level['Controls'],'Cutoff',
min=0.001,max=0.1,typeHint=float)
sizeSizer.Add(rCutoff,0,WACV)
elif level['Controls']['DistType'] in ['Unified',]:
Parms = level['Unified']
Best = G2sasd.Bestimate(Parms['G'][0],Parms['Rg'][0],Parms['P'][0])
sizeSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Estimated Dist B: %12.4g'%(Best)),0,WACV)
return sizeSizer
def ParmSizer():
parmSizer = wx.FlexGridSizer(0,3,5,5)
parmSizer.AddGrowableCol(2,1)
parmSizer.SetFlexibleDirection(wx.HORIZONTAL)
Parms = level[level['Controls']['DistType']]
FFargs = level['Controls']['FFargs']
SFargs = level['Controls'].get('SFargs',{})
parmOrder = ['Volume','Radius','Mean','StdDev','MinSize','G','Rg','B','P','Cutoff',
'PkInt','PkPos','PkSig','PkGam',]
for parm in parmOrder:
if parm in Parms:
if parm == 'MinSize':
parmSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Dist '+parm),0,wx.ALIGN_CENTER)
else:
parmVar = wx.CheckBox(G2frame.dataWindow,label='Refine? Dist '+parm)
parmVar.SetValue(Parms[parm][1])
parmVar.Bind(wx.EVT_CHECKBOX, OnSelect)
parmSizer.Add(parmVar,0,WACV)
Indx[parmVar.GetId()] = [Parms[parm],1]
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
parmValue = wx.TextCtrl(G2frame.dataWindow,value='%.3g'%(Parms[parm][0]),
style=wx.TE_PROCESS_ENTER)
parmValue.Bind(wx.EVT_TEXT_ENTER,OnValue)
parmValue.Bind(wx.EVT_KILL_FOCUS,OnValue)
parmSizer.Add(parmValue,0,WACV)
if parm == 'P':
value = Parms[parm][0]
valMinMax = [0.1,4.2]
else:
value = np.log10(Parms[parm][0])
valMinMax = [value-1,value+1]
parmSldr = wx.Slider(G2frame.dataWindow,minValue=slMult*valMinMax[0],
maxValue=slMult*valMinMax[1],value=slMult*value)
Indx[parmValue.GetId()] = [Parms,parm,parmSldr]
Indx[parmSldr.GetId()] = [Parms,parm,parmValue]
parmSldr.Bind(wx.EVT_SLIDER,OnParmSlider)
parmSizer.Add(parmSldr,1,wx.EXPAND)
if level['Controls']['DistType'] not in ['Bragg']:
parmOrder = ['Aspect ratio','Length','Diameter','Thickness','VolFr','Dist','epis','Sticky','Depth','Width','Shell thickness',]
fTypes = ['FF ','SF ']
for iarg,Args in enumerate([FFargs,SFargs]):
for parm in parmOrder:
if parm in Args:
parmVar = wx.CheckBox(G2frame.dataWindow,label='Refine? '+fTypes[iarg]+parm)
parmVar.SetValue(Args[parm][1])
Indx[parmVar.GetId()] = [Args[parm],1]
parmVar.Bind(wx.EVT_CHECKBOX, OnSelect)
parmSizer.Add(parmVar,0,WACV)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
parmValue = wx.TextCtrl(G2frame.dataWindow,value='%.3g'%(Args[parm][0]),
style=wx.TE_PROCESS_ENTER)
parmValue.Bind(wx.EVT_TEXT_ENTER,OnValue)
parmValue.Bind(wx.EVT_KILL_FOCUS,OnValue)
parmSizer.Add(parmValue,0,WACV)
value = Args[parm][0]
if parm == 'epis':
valMinMax = [0,.1]
elif parm in ['Sticky','Width',]:
valMinMax = [0,1.]
elif parm == 'Depth':
valMinMax = [-2.,2.]
elif parm == 'Dist':
valMinMax = [100.,1000.]
elif parm == 'VolFr':
valMinMax = [1.e-4,1.]
else:
value = np.log10(Args[parm][0])
valMinMax = [value-1,value+1]
parmSldr = wx.Slider(G2frame.dataWindow,minValue=slMult*valMinMax[0],
maxValue=slMult*valMinMax[1],value=slMult*value)
Indx[parmVar.GetId()] = [Args[parm],1]
Indx[parmValue.GetId()] = [Args,parm,parmSldr]
Indx[parmSldr.GetId()] = [Args,parm,parmValue]
parmSldr.Bind(wx.EVT_SLIDER,OnParmSlider)
parmSizer.Add(parmSldr,1,wx.EXPAND)
return parmSizer
Indx = {}
partSizer = wx.BoxSizer(wx.VERTICAL)
topSizer = wx.BoxSizer(wx.HORIZONTAL)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Particle fit parameters: '),0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Matrix: '),0,WACV)
matsel = wx.ComboBox(G2frame.dataWindow,value=data['Particle']['Matrix']['Name'],
choices=list(Substances['Substances'].keys()),style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[matsel.GetId()] = [data['Particle']['Matrix'],'Name']
matsel.Bind(wx.EVT_COMBOBOX,OnSelect) #Do neutron test here?
rhoMat = Substances['Substances'][data['Particle']['Matrix']['Name']].get('XAnom density',0.0)
topSizer.Add(matsel,0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Volume fraction: '),0,WACV)
volfrac = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Particle']['Matrix']['VolFrac'],0,
typeHint=float)
topSizer.Add(volfrac,0,WACV)
volVar = wx.CheckBox(G2frame.dataWindow,label=' Refine?')
volVar.SetValue(data['Particle']['Matrix']['VolFrac'][1])
Indx[volVar.GetId()] = [data['Particle']['Matrix']['VolFrac'],1]
volVar.Bind(wx.EVT_CHECKBOX, OnSelect)
topSizer.Add(volVar,0,WACV)
partSizer.Add(topSizer,0,)
for ilev,level in enumerate(data['Particle']['Levels']):
G2G.HorizontalLine(partSizer,G2frame.dataWindow)
topLevel = wx.BoxSizer(wx.HORIZONTAL)
topLevel.Add(wx.StaticText(G2frame.dataWindow,label=' Model component %d: '%(ilev)),0,WACV)
delBtn = wx.Button(G2frame.dataWindow,label=' Delete?')
Indx[delBtn.GetId()] = ilev
delBtn.Bind(wx.EVT_BUTTON,OnDelLevel)
topLevel.Add(delBtn,0,WACV)
partSizer.Add(topLevel,0)
partSizer.Add(SizeSizer())
if level['Controls']['DistType'] not in ['Bragg','Unified','Porod',]:
topLevel.Add(wx.StaticText(G2frame.dataWindow,label=' Structure factor: '),0,WACV)
strfctr = wx.ComboBox(G2frame.dataWindow,value=level['Controls']['StrFact'],
choices=sfChoices,style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[strfctr.GetId()] = [level['Controls'],'StrFact']
strfctr.Bind(wx.EVT_COMBOBOX,OnSelect)
topLevel.Add(strfctr,0,WACV)
partSizer.Add(ParmSizer(),0,wx.EXPAND)
return partSizer
def OnEsdScale(event):
event.Skip()
try:
value = float(esdScale.GetValue())
if value <= 0.:
raise ValueError
except ValueError:
value = 1./np.sqrt(ProfDict['wtFactor'])
ProfDict['wtFactor'] = 1./value**2
esdScale.SetValue('%.3f'%(value))
RefreshPlots(True)
def OnBackChange(invalid,value,tc):
Profile[4][:] = value
RefreshPlots()
def OnBackFile(event): #multiple backgrounds?
data['BackFile'] = backFile.GetValue()
if data['BackFile']:
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['BackFile'])
BackSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,BackId, 'Sample Parameters'))
Profile[5] = BackSample['Scale'][0]*G2frame.GPXtree.GetItemPyData(BackId)[1][1]
else:
Profile[5] = np.zeros(len(Profile[5]))
RefreshPlots(True)
Sample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Sample Parameters'))
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))
Substances = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Substances'))
ProfDict,Profile = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[:2]
if data['BackFile']:
BackId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['BackFile'])
BackSample = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,BackId, 'Sample Parameters'))
Profile[5] = BackSample['Scale'][0]*G2frame.GPXtree.GetItemPyData(BackId)[1][1]
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.ModelMenu)
G2frame.dataWindow.ClearData()
G2frame.Bind(wx.EVT_MENU, OnCopyModel, id=G2G.wxID_MODELCOPY)
G2frame.Bind(wx.EVT_MENU, OnCopyFlags, id=G2G.wxID_MODELCOPYFLAGS)
G2frame.Bind(wx.EVT_MENU, OnFitModel, id=G2G.wxID_MODELFIT)
G2frame.Bind(wx.EVT_MENU, OnFitModelAll, id=G2G.wxID_MODELFITALL)
G2frame.Bind(wx.EVT_MENU, OnUnDo, id=G2G.wxID_MODELUNDO)
G2frame.Bind(wx.EVT_MENU, OnAddModel, id=G2G.wxID_MODELADD)
Indx = {}
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
topSizer = wx.BoxSizer(wx.HORIZONTAL)
models = ['Size dist.','Particle fit','Pair distance',]
if len(data['Pair']['Distribution']):
models += ['Shapes',]
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Modeling by: '),0,WACV)
fitSel = wx.ComboBox(G2frame.dataWindow,value=data['Current'],choices=models,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
fitSel.Bind(wx.EVT_COMBOBOX,OnSelectFit)
topSizer.Add(fitSel,0,WACV)
topSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Error multiplier: '),0,WACV)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
esdScale = wx.TextCtrl(G2frame.dataWindow,value='%.3f'%(1./np.sqrt(ProfDict['wtFactor'])),style=wx.TE_PROCESS_ENTER)
esdScale.Bind(wx.EVT_TEXT_ENTER,OnEsdScale)
esdScale.Bind(wx.EVT_KILL_FOCUS,OnEsdScale)
topSizer.Add(esdScale,0,WACV)
mainSizer.Add(topSizer)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
if 'Size' in data['Current']:
G2frame.dataWindow.SasSeqFit.Enable(False)
if 'MaxEnt' in data['Size']['Method']:
G2frame.GetStatusBar().SetStatusText('Size distribution by Maximum entropy',1)
elif 'IPG' in data['Size']['Method']:
G2frame.GetStatusBar().SetStatusText('Size distribution by Interior-Point Gradient',1)
mainSizer.Add(SizeSizer())
elif 'Particle' in data['Current']:
G2frame.dataWindow.SasSeqFit.Enable(True)
mainSizer.Add(PartSizer(),1,wx.ALIGN_LEFT|wx.EXPAND)
elif 'Pair' in data['Current']:
G2frame.dataWindow.SasSeqFit.Enable(False)
mainSizer.Add(PairSizer(),1,wx.ALIGN_LEFT|wx.EXPAND)
elif 'Shape' in data['Current']:
G2frame.dataWindow.SasSeqFit.Enable(False)
mainSizer.Add(ShapesSizer(),1,wx.ALIGN_LEFT|wx.EXPAND)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
backSizer = wx.BoxSizer(wx.HORIZONTAL)
backSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Background:'),0,WACV)
backVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Back'],0,
nDig=(10,3,'g'),OnLeave=OnBackChange)
backSizer.Add(backVal,0,WACV)
if 'Shape' not in data['Current']:
backVar = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[backVar.GetId()] = [data['Back'],1]
backVar.SetValue(data['Back'][1])
backVar.Bind(wx.EVT_CHECKBOX, OnCheckBox)
backSizer.Add(backVar,0,WACV)
#multiple background files?
backSizer.Add(wx.StaticText(G2frame.dataWindow,-1,' Background file: '),0,WACV)
Choices = ['',]+G2gd.GetGPXtreeDataNames(G2frame,['SASD',])
backFile = wx.ComboBox(parent=G2frame.dataWindow,value=data['BackFile'],choices=Choices,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
backFile.Bind(wx.EVT_COMBOBOX,OnBackFile)
backSizer.Add(backFile)
mainSizer.Add(backSizer)
G2frame.dataWindow.SetDataSize()
################################################################################
##### REFD Models
################################################################################
def UpdateREFDModelsGrid(G2frame,data):
'''respond to selection of REFD Models data tree item.
'''
def OnCopyModel(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
copyList = []
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy reflectivity models from\n'+str(hst[5:])+' to...',
'Copy parameters', histList)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
copyList.append(histList[i])
finally:
dlg.Destroy()
for item in copyList:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
G2frame.GPXtree.SetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,Id,'Models'),copy.deepcopy(data))
def OnFitModel(event):
SaveState()
G2pwd.REFDRefine(Profile,ProfDict,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
wx.CallAfter(UpdateREFDModelsGrid,G2frame,data)
def OnModelPlot(event):
hst = G2frame.GPXtree.GetItemText(G2frame.PatternId)
histList = GetFileList(G2frame,'REFD')
# histList = [hst,]
# histList += GetHistsLikeSelected(G2frame)
if not histList:
G2frame.ErrorDialog('No match','No histograms match '+hst,G2frame)
return
plotList = []
od = {'label_1':'Zero at substrate','value_1':False,'label_2':'Show layer transitions','value_2':True}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Plot reflectivity models for:',
'Plot SLD models', histList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
for i in dlg.GetSelections():
plotList.append(histList[i])
else:
dlg.Destroy()
return
finally:
dlg.Destroy()
XY = []
LinePos = []
for item in plotList:
mId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
model = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,mId,'Models'))
Substances = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,mId,'Substances'))['Substances']
x,xr,y = G2pwd.makeSLDprofile(model,Substances)
if od['value_1']:
XY.append([xr,y])
disLabel = r'$Distance\ from\ substrate,\ \AA$'
else:
XY.append([x,y])
disLabel = r'$Distance\ from\ top\ surface,\ \AA$'
if od['value_2']:
laySeq = model['Layer Seq'].split()
nLines = len(laySeq)+1
linePos = np.zeros(nLines)
for ilay,lay in enumerate(np.fromstring(data['Layer Seq'],dtype=int,sep=' ')):
linePos[ilay+1:] += model['Layers'][lay].get('Thick',[0.,False])[0]
if od['value_1']:
linePos = linePos[-1]-linePos
LinePos.append(linePos)
G2plt.PlotXY(G2frame,XY,labelX=disLabel,labelY=r'$SLD,\ 10^{10}cm^{-2}$',newPlot=True,
Title='Scattering length density',lines=True,names=[],vertLines=LinePos)
def OnFitModelAll(event):
choices = G2gd.GetGPXtreeDataNames(G2frame,['REFD',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Sequential REFD refinement',
'Select dataset to include',choices,extraOpts=od)
names = []
if dlg.ShowModal() == wx.ID_OK:
for sel in dlg.GetSelections():
names.append(choices[sel])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential REFD results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential REFD results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
SeqResult['histNames'] = names
else:
dlg.Destroy()
return
dlg.Destroy()
dlg = wx.ProgressDialog('REFD Sequential fit','Data set name = '+names[0],len(names),
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_REMAINING_TIME|wx.PD_CAN_ABORT)
wx.BeginBusyCursor()
if od['value_2']:
names.reverse()
JModel = None
try:
for i,name in enumerate(names):
print (' Sequential fit for '+name)
GoOn = dlg.Update(i,newmsg='Data set name = '+name)[0]
if not GoOn:
break
sId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if i and od['value_1']:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'),JModel)
IProfDict,IProfile = G2frame.GPXtree.GetItemPyData(sId)[:2]
IModel = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'))
ISubstances = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Substances'))['Substances']
ILimits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Limits'))
IfOK,result,varyList,sig,Rvals,covMatrix,parmDict,Msg = G2pwd.REFDRefine(IProfile,IProfDict,Inst,ILimits,ISubstances,IModel)
JModel = copy.deepcopy(IModel)
if not IfOK:
G2frame.ErrorDialog('Failed sequential refinement for data '+name,
' Msg: '+Msg+'\nYou need to rethink your selection of parameters\n'+ \
' Model restored to previous version for'+name)
SeqResult['histNames'] = names[:i]
dlg.Destroy()
break
else:
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,sId, 'Models'),copy.deepcopy(IModel))
SeqResult[name] = {'variables':result[0],'varyList':varyList,'sig':sig,'Rvals':Rvals,
'covMatrix':covMatrix,'title':name,'parmDict':parmDict}
else:
dlg.Destroy()
print (' ***** Small angle sequential refinement successful *****')
finally:
wx.EndBusyCursor()
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.GPXtree.SelectItem(Id)
def ModelPlot(data,x,xr,y):
laySeq = data['Layer Seq'].split()
nLines = len(laySeq)+1
linePos = np.zeros(nLines)
for ilay,lay in enumerate(np.fromstring(data['Layer Seq'],dtype=int,sep=' ')):
linePos[ilay+1:] += data['Layers'][lay].get('Thick',[0.,False])[0]
if data['Zero'] == 'Top':
XY = [[x,y],]
disLabel = r'$Distance\ from\ top\ surface,\ \AA$'
else:
XY = [[xr,y],]
linePos = linePos[-1]-linePos
disLabel = r'$Distance\ from\ substrate,\ \AA$'
G2plt.PlotXY(G2frame,XY,labelX=disLabel,labelY=r'$SLD,\ 10^{10}cm^{-2}$',newPlot=True,
Title='Scattering length density',lines=True,names=[],vertLines=[linePos,])
def OnUnDo(event):
DoUnDo()
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,
G2frame.PatternId,'Models'))
G2frame.dataWindow.REFDUndo.Enable(False)
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
wx.CallLater(100,UpdateREFDModelsGrid,G2frame,data)
def DoUnDo():
print ('Undo last refinement')
file = open(G2frame.undorefd,'rb')
PatternId = G2frame.PatternId
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'Models'),cPickle.load(file))
print (' Model recovered')
file.close()
def SaveState():
G2frame.undorefd = os.path.join(G2frame.dirname,'GSASIIrefd.save')
file = open(G2frame.undorefd,'wb')
PatternId = G2frame.PatternId
cPickle.dump(G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId,'Models')),file,1)
file.close()
G2frame.dataWindow.REFDUndo.Enable(True)
def ControlSizer():
def OnRefPos(event):
data['Zero'] = refpos.GetValue()
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
def OnMinSel(event):
data['Minimizer'] = minSel.GetValue()
def OnWeight(event):
data['2% weight'] = weight.GetValue()
def OnSLDplot(event):
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
# def OnQ4fftplot(event):
# q4fft.SetValue(False)
# R,F = G2pwd.makeRefdFFT(Limits,Profile)
# XY = [[R[:2500],F[:2500]],]
# G2plt.PlotXY(G2frame,XY,labelX='thickness',labelY='F(R)',newPlot=True,
# Title='Fourier transform',lines=True)
def OndQSel(event):
data['dQ type'] = dQSel.GetStringSelection()
Recalculate()
def NewRes(invalid,value,tc):
Recalculate()
def Recalculate():
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
controlSizer = wx.BoxSizer(wx.VERTICAL)
resol = wx.BoxSizer(wx.HORIZONTAL)
choice = ['None','const '+GkDelta+'Q/Q',]
if ProfDict['ifDQ']:
choice += [GkDelta+'Q/Q in data']
dQSel = wx.RadioBox(G2frame.dataWindow,wx.ID_ANY,'Instrument resolution type:',choices=choice,
majorDimension=0,style=wx.RA_SPECIFY_COLS)
dQSel.SetStringSelection(data['dQ type'])
dQSel.Bind(wx.EVT_RADIOBOX,OndQSel)
resol.Add(dQSel,0,WACV)
resol.Add(wx.StaticText(G2frame.dataWindow,label=' (FWHM %): '),0,WACV)
resol.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Resolution'],0,nDig=(10,3),min=0.,max=5.,OnLeave=NewRes),0,WACV)
controlSizer.Add(resol,0,WACV)
minimiz = wx.BoxSizer(wx.HORIZONTAL)
minimiz.Add(wx.StaticText(G2frame.dataWindow,label=' Minimizer: '),0,WACV)
minlist = ['LMLS','Basin Hopping','MC/SA Anneal','L-BFGS-B',]
minSel = wx.ComboBox(G2frame.dataWindow,value=data['Minimizer'],choices=minlist,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
minSel.Bind(wx.EVT_COMBOBOX, OnMinSel)
minimiz.Add(minSel,0,WACV)
minimiz.Add(wx.StaticText(G2frame.dataWindow,label=' Bounds factor: '),0,WACV)
minimiz.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Toler',nDig=(10,2),max=0.99,min=0.1),0,WACV)
weight = wx.CheckBox(G2frame.dataWindow,label='Use 2% sig. weights')
weight.SetValue(data.get('2% weight',False))
weight.Bind(wx.EVT_CHECKBOX, OnWeight)
minimiz.Add(weight,0,WACV)
controlSizer.Add(minimiz,0,WACV)
plotSizer = wx.BoxSizer(wx.HORIZONTAL)
plotSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Plot controls: '),0,WACV)
sld = wx.Button(G2frame.dataWindow,label='Plot SLD?')
sld.Bind(wx.EVT_BUTTON, OnSLDplot)
plotSizer.Add(sld,0,WACV)
plotSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Zero position location: '),0,WACV)
poslist = ['Top','Bottom']
refpos = wx.ComboBox(G2frame.dataWindow,value=data['Zero'],choices=poslist,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
refpos.Bind(wx.EVT_COMBOBOX, OnRefPos)
plotSizer.Add(refpos,0,WACV)
# q4fft = wx.CheckBox(G2frame.dataWindow,label='Plot fft?')
# q4fft.Bind(wx.EVT_CHECKBOX, OnQ4fftplot)
# plotSizer.Add(q4fft,0,WACV)
controlSizer.Add(plotSizer,0,WACV)
return controlSizer
def OverallSizer():
#'DualFitFile':'', 'DualFltBack':[0.0,False],'DualScale':[1.0,False] future for neutrons - more than one?
def OnScaleRef(event):
data['Scale'][1] = scaleref.GetValue()
def OnBackRef(event):
data['FltBack'][1] = backref.GetValue()
def Recalculate(invalid,value,tc):
if invalid:
return
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
overall = wx.BoxSizer(wx.HORIZONTAL)
overall.Add(wx.StaticText(G2frame.dataWindow,label=' Scale: '),0,WACV)
overall.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Scale'],0,
nDig=(10,2),typeHint=float,OnLeave=Recalculate),0,WACV)
scaleref = wx.CheckBox(G2frame.dataWindow,label=' Refine? ')
scaleref.SetValue(data['Scale'][1])
scaleref.Bind(wx.EVT_CHECKBOX, OnScaleRef)
overall.Add(scaleref,0,WACV)
overall.Add(wx.StaticText(G2frame.dataWindow,label=' Flat bkg.: '),0,WACV)
overall.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['FltBack'],0,
nDig=(10,2,'g'),typeHint=float,OnLeave=Recalculate),0,WACV)
backref = wx.CheckBox(G2frame.dataWindow,label=' Refine? ')
backref.SetValue(data['FltBack'][1])
backref.Bind(wx.EVT_CHECKBOX, OnBackRef)
overall.Add(backref,0,WACV)
return overall
def LayerSizer():
#'Penetration':[0.,False]?
def OnSelect(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
Name = Obj.GetValue()
data['Layers'][item]['Name'] = Name
if 'Rough' not in data['Layers'][item]:
data['Layers'][item]['Rough'] = [0.,False]
if 'Thick' not in data['Layers'][item]:
data['Layers'][item]['Thick'] = [10.,False]
if 'N' in Inst['Type'][0]:
data['Layers'][item]['Mag SLD'] = [0.,False]
if Name == 'unit scatter':
data['Layers'][item]['iDenMul'] = [0.,False]
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
G2plt.PlotPatterns(G2frame,plotType='REFD')
wx.CallAfter(UpdateREFDModelsGrid,G2frame,data)
def OnCheckBox(event):
Obj = event.GetEventObject()
item,parm = Indx[Obj.GetId()]
data['Layers'][item][parm][1] = Obj.GetValue()
def OnInsertLayer(event):
Obj = event.GetEventObject()
ind = Indx[Obj.GetId()]
data['Layers'].insert(ind+1,{'Name':'vacuum','DenMul':[1.0,False],})
data['Layer Seq'] = ' '.join([str(i+1) for i in range(len(data['Layers'])-2)])
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
G2plt.PlotPatterns(G2frame,plotType='REFD')
wx.CallAfter(UpdateREFDModelsGrid,G2frame,data)
def OnDeleteLayer(event):
Obj = event.GetEventObject()
ind = Indx[Obj.GetId()]
del data['Layers'][ind]
data['Layer Seq'] = ' '.join([str(i+1) for i in range(len(data['Layers'])-2)])
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
G2plt.PlotPatterns(G2frame,plotType='REFD')
wx.CallAfter(UpdateREFDModelsGrid,G2frame,data)
def Recalculate(invalid,value,tc):
if invalid:
return
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
# wx.CallLater(100,UpdateREFDModelsGrid,G2frame,data)
Indx = {}
layerSizer = wx.BoxSizer(wx.VERTICAL)
for ilay,layer in enumerate(data['Layers']):
if not ilay:
layerSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Top layer (superphase):'),0,WACV)
elif ilay < len(data['Layers'])-1:
layerSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Layer no. %d'%(ilay)),0,WACV)
else:
layerSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Bottom layer (substrate):'),0,WACV)
midlayer = wx.BoxSizer(wx.HORIZONTAL)
midlayer.Add(wx.StaticText(G2frame.dataWindow,label=' Substance: '),0,WACV)
midName = data['Layers'][ilay]['Name']
midSel = wx.ComboBox(G2frame.dataWindow,value=midName,
choices=list(Substances.keys()),style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[midSel.GetId()] = ilay
midSel.Bind(wx.EVT_COMBOBOX,OnSelect)
midlayer.Add(midSel,0,WACV)
if midName != 'vacuum':
if midName != 'unit scatter':
midlayer.Add(wx.StaticText(G2frame.dataWindow,label=' Den. Mult.: '),0,WACV)
midlayer.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Layers'][ilay]['DenMul'],0,
nDig=(10,4),OnLeave=Recalculate),0,WACV)
varBox = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[varBox.GetId()] = [ilay,'DenMul']
varBox.SetValue(data['Layers'][ilay]['DenMul'][1])
varBox.Bind(wx.EVT_CHECKBOX, OnCheckBox)
midlayer.Add(varBox,0,WACV)
realScatt = data['Layers'][ilay]['DenMul'][0]*Substances[midName]['Scatt density']
midlayer.Add(wx.StaticText(G2frame.dataWindow,
label=' Real scat. den.: %.4g'%(realScatt)),0,WACV)
imagScatt = data['Layers'][ilay]['DenMul'][0]*Substances[midName]['XImag density']
midlayer.Add(wx.StaticText(G2frame.dataWindow,
label=' Imag scat. den.: %.4g'%(imagScatt)),0,WACV)
else:
realScatt = data['Layers'][ilay]['DenMul'][0]
midlayer.Add(wx.StaticText(G2frame.dataWindow,
label=' Real scat. den.: %.4g'%(realScatt)),0,WACV)
imagScatt = data['Layers'][ilay]['iDenMul'][0]
midlayer.Add(wx.StaticText(G2frame.dataWindow,
label=' Imag scat. den.: %.4g'%(imagScatt)),0,WACV)
else:
midlayer.Add(wx.StaticText(G2frame.dataWindow,label=', air or gas'),0,WACV)
layerSizer.Add(midlayer)
if midName == 'unit scatter':
nxtlayer = wx.BoxSizer(wx.HORIZONTAL)
nxtlayer.Add(wx.StaticText(G2frame.dataWindow,label=' Real Den. : '),0,WACV)
nxtlayer.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Layers'][ilay]['DenMul'],0,
nDig=(10,4),OnLeave=Recalculate),0,WACV)
varBox = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[varBox.GetId()] = [ilay,'DenMul']
varBox.SetValue(data['Layers'][ilay]['DenMul'][1])
varBox.Bind(wx.EVT_CHECKBOX, OnCheckBox)
nxtlayer.Add(varBox,0,WACV)
nxtlayer.Add(wx.StaticText(G2frame.dataWindow,label=' Imag Den. : '),0,WACV)
nxtlayer.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Layers'][ilay]['iDenMul'],0,
nDig=(10,4),OnLeave=Recalculate),0,WACV)
varBox = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[varBox.GetId()] = [ilay,'iDenMul']
varBox.SetValue(data['Layers'][ilay]['iDenMul'][1])
varBox.Bind(wx.EVT_CHECKBOX, OnCheckBox)
nxtlayer.Add(varBox,0,WACV)
layerSizer.Add(nxtlayer)
if midName != 'vacuum':
if 'N' in Inst['Type'][0] and midName not in ['vacuum','unit scatter']:
magLayer = wx.BoxSizer(wx.HORIZONTAL)
magLayer.Add(wx.StaticText(G2frame.dataWindow,label=' Magnetic SLD: '),0,WACV)
magLayer.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Layers'][ilay]['Mag SLD'],0,
nDig=(10,4),OnLeave=Recalculate),0,WACV)
varBox = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[varBox.GetId()] = [ilay,'Mag SLD']
varBox.SetValue(data['Layers'][ilay]['Mag SLD'][1])
varBox.Bind(wx.EVT_CHECKBOX, OnCheckBox)
magLayer.Add(varBox,0,WACV)
magLayer.Add(wx.StaticText(G2frame.dataWindow,
label=' Real+mag scat. den.: %.4g'%(realScatt+data['Layers'][ilay]['Mag SLD'][0])),0,WACV)
layerSizer.Add(magLayer)
if ilay:
names = {'Rough':'Upper surface Roughness, '+Angstr,'Thick':'Layer Thickness, '+Angstr}
parmsline = wx.BoxSizer(wx.HORIZONTAL)
parms= ['Rough','Thick']
if ilay == len(data['Layers'])-1:
parms = ['Rough',]
for parm in parms:
parmsline.Add(wx.StaticText(G2frame.dataWindow,label=' %s: '%(names[parm])),0,WACV)
parmsline.Add(G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['Layers'][ilay][parm],0,
nDig=(10,2),OnLeave=Recalculate),0,WACV)
varBox = wx.CheckBox(G2frame.dataWindow,label='Refine?')
Indx[varBox.GetId()] = [ilay,parm]
varBox.SetValue(data['Layers'][ilay][parm][1])
varBox.Bind(wx.EVT_CHECKBOX, OnCheckBox)
parmsline.Add(varBox,0,WACV)
layerSizer.Add(parmsline)
if ilay < len(data['Layers'])-1:
newlayer = wx.BoxSizer(wx.HORIZONTAL)
insert = wx.Button(G2frame.dataWindow,label='Insert')
Indx[insert.GetId()] = ilay
insert.Bind(wx.EVT_BUTTON,OnInsertLayer)
newlayer.Add(insert)
delet = wx.Button(G2frame.dataWindow,label='Delete')
Indx[delet.GetId()] = ilay
delet.Bind(wx.EVT_BUTTON,OnDeleteLayer)
newlayer.Add(delet)
layerSizer.Add(newlayer)
G2G.HorizontalLine(layerSizer,G2frame.dataWindow)
return layerSizer
def OnRepSeq(event):
event.Skip()
stack = repseq.GetValue()
nstar = stack.count('*')
if nstar:
try:
newstack = ''
Istar = 0
for star in range(nstar):
Istar = stack.index('*',Istar+1)
iB = stack[:Istar].rfind(' ')
if iB == -1:
mult = int(stack[:Istar])
else:
mult = int(stack[iB:Istar])
pattern = stack[Istar+2:stack.index(')',Istar)]+' '
newstack += mult*pattern
stack = newstack
except ValueError:
stack += ' Error in string'
wx.MessageBox(stack,'Error',style=wx.ICON_EXCLAMATION)
repseq.SetValue(data['Layer Seq'])
return
try:
Slist = np.array(stack.split(),dtype=int)
except ValueError:
stack += ' Error in string'
repseq.SetValue(data['Layer Seq'])
wx.MessageBox(stack,'Error',style=wx.ICON_EXCLAMATION)
return
if len(Slist) < 1:
stack += ' Error in sequence - too short!'
Stest = np.arange(1,Nlayers-1)
if not np.all(np.array([item in Stest for item in Slist])):
stack += ' Error: invalid layer selection'
elif not np.all(np.ediff1d(Slist)):
stack += ' Error: Improbable sequence or bad string'
if 'Error' in stack:
repseq.SetValue(data['Layer Seq'])
wx.MessageBox(stack,'Error',style=wx.ICON_EXCLAMATION)
return
else:
data['Layer Seq'] = stack
repseq.SetValue(stack)
G2pwd.REFDModelFxn(Profile,Inst,Limits,Substances,data)
x,xr,y = G2pwd.makeSLDprofile(data,Substances)
ModelPlot(data,x,xr,y)
G2plt.PlotPatterns(G2frame,plotType='REFD')
Substances = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Substances'))['Substances']
ProfDict,Profile,Name = G2frame.GPXtree.GetItemPyData(G2frame.PatternId)[:3]
if 'ifDQ' not in ProfDict:
ProfDict['ifDQ'] = np.any(Profile[5])
data['dQ type'] = 'None'
Limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Limits'))
Inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.PatternId, 'Instrument Parameters'))[0]
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.REFDModelMenu)
G2frame.dataWindow.ClearData()
G2frame.Bind(wx.EVT_MENU, OnCopyModel, id=G2G.wxID_MODELCOPY)
G2frame.Bind(wx.EVT_MENU, OnModelPlot, id=G2G.wxID_MODELPLOT)
G2frame.Bind(wx.EVT_MENU, OnFitModel, id=G2G.wxID_MODELFIT)
G2frame.Bind(wx.EVT_MENU, OnFitModelAll, id=G2G.wxID_MODELFITALL)
G2frame.Bind(wx.EVT_MENU, OnUnDo, id=G2G.wxID_MODELUNDO)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Reflectometry fitting for: '+Name),0,WACV)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Controls:'),0,WACV)
mainSizer.Add(ControlSizer())
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Global parameters:'),0,WACV)
mainSizer.Add(OverallSizer())
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
Nlayers = len(data['Layers'])
if Nlayers > 2:
if 'Layer Seq' not in data:
data['Layer Seq'] = ' '.join([str(i+1) for i in range(Nlayers-2)])
lineSizer = wx.BoxSizer(wx.HORIZONTAL)
lineSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Layer sequence: '),0,WACV)
repseq = wx.TextCtrl(G2frame.dataWindow,value = data['Layer Seq'],style=wx.TE_PROCESS_ENTER,size=(500,25))
repseq.Bind(wx.EVT_TEXT_ENTER,OnRepSeq)
repseq.Bind(wx.EVT_KILL_FOCUS,OnRepSeq)
lineSizer.Add(repseq,0,WACV)
mainSizer.Add(lineSizer)
Str = ' Use sequence nos. from:'
for ilay,layer in enumerate(data['Layers'][1:-1]):
Str += ' %d: %s'%(ilay+1,layer['Name'])
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=Str),0,WACV)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' NB: Repeat sequence by e.g. 6*(1 2) '),0,WACV)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Layers: scatt. densities are 10%scm%s = 10%s%s%s'%(Pwr10,Pwrm2,Pwrm6,Angstr,Pwrm2)),0,WACV)
mainSizer.Add(LayerSizer())
G2frame.dataWindow.SetDataSize()
################################################################################
##### PDF controls
################################################################################
def computePDF(G2frame,data):
'''Calls :func:`GSASIIpwd.CalcPDF` to compute the PDF and put into the data tree array.
Called from OnComputePDF and OnComputeAllPDF and OnComputeAllPDF in
GSASIIimgGUI.py
'''
xydata = {}
problem = False
for key in ['Sample','Sample Bkg.','Container','Container Bkg.']:
name = data[key]['Name']
if name.strip():
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
if not pId:
print(key,'Entry',name,'Not found.')
problem = True
continue
xydata[key] = G2frame.GPXtree.GetItemPyData(pId)
if problem:
print('PDF computation aborted')
return
powId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['Sample']['Name'])
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId,'Limits'))[1]
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId,'Instrument Parameters'))[0]
auxPlot = G2pwd.CalcPDF(data,inst,limits,xydata)
data['I(Q)'] = xydata['IofQ']
data['S(Q)'] = xydata['SofQ']
data['F(Q)'] = xydata['FofQ']
data['G(R)'] = xydata['GofR']
return auxPlot
def OptimizePDF(G2frame,data,showFit=True,maxCycles=5):
'''Optimize the PDF to minimize the difference between G(r) and the expected value for
low r (-4 pi r #density).
'''
xydata = {}
for key in ['Sample','Sample Bkg.','Container','Container Bkg.']:
name = data[key]['Name']
if name:
xydata[key] = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name))
powName = data['Sample']['Name']
powId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,powName)
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId,'Limits'))[1]
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId,'Instrument Parameters'))[0]
res = G2pwd.OptimizePDF(data,xydata,limits,inst,showFit,maxCycles)
return res['success']
def UpdatePDFGrid(G2frame,data):
'''respond to selection of PWDR PDF data tree item.
'''
def PDFFileSizer():
def FillFileSizer(fileSizer,key):
#fileSizer is a FlexGridSizer(3,4)
def OnSelectFile(event):
Obj = event.GetEventObject()
fileKey,itemKey,fmt = itemDict[Obj.GetId()]
if itemKey == 'Name':
value = Obj.GetValue()
Obj.SetValue(fmt%(value))
data[fileKey][itemKey] = value
data[fileKey]['Mult'] = GetExposure(value)
mult.SetValue(data[fileKey]['Mult'])
ResetFlatBkg()
wx.CallAfter(OnComputePDF,None)
def OnMoveMult(event):
data[key]['Mult'] += multSpin.GetValue()*0.01
mult.SetValue(data[key]['Mult'])
multSpin.SetValue(0)
wx.CallAfter(OnComputePDF,None)
def OnMult(invalid,value,tc):
if invalid: return
ResetFlatBkg()
wx.CallAfter(OnComputePDF,None)
def OnRefMult(event):
item['Refine'] = refMult.GetValue()
if item['Refine']:
G2frame.GetStatusBar().SetStatusText('Be sure Mult is close to anticipated value. '+ \
'Suggest setting Flat Bkg. to 0 before Optimize Mult',1)
def GetExposure(backFile):
dataId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'PWDR'+dataFile[4:])
dataComments = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,dataId,'Comments'))
if not backFile:
return -1.
backId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,backFile)
backComments = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,backId,'Comments'))
expTime = 1.
sumExp = 1.
for item in dataComments:
if 'exposureTime' in item:
expTime = float(item.split('=')[1])
if 'summedExposures' in item:
sumExp = float(item.split('=')[1])
dataExp = expTime*sumExp
expTime = 1.
sumExp = 1.
for item in backComments:
if 'exposureTime' in item:
expTime = float(item.split('=')[1])
if 'summedExposures' in item:
sumExp = float(item.split('=')[1])
backExp = expTime*sumExp
return -dataExp/backExp
item = data[key]
fileList = [''] + GetFileList(G2frame,'PWDR')
fileSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' '+key+' file:'),0,WACV)
fileName = wx.ComboBox(G2frame.dataWindow,value=item['Name'],choices=fileList,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
itemDict[fileName.GetId()] = [key,'Name','%s']
fileName.Bind(wx.EVT_COMBOBOX,OnSelectFile)
fileSizer.Add(fileName,0,)
fileSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label='Multiplier:'),0,WACV)
mulBox = wx.BoxSizer(wx.HORIZONTAL)
mult = G2G.ValidatedTxtCtrl(G2frame.dataWindow,item,'Mult',nDig=(10,3),
typeHint=float,OnLeave=OnMult)
mulBox.Add(mult,0,)
multSpin = wx.SpinButton(G2frame.dataWindow,style=wx.SP_VERTICAL,size=wx.Size(20,25))
multSpin.SetRange(-1,1)
multSpin.SetValue(0)
multSpin.Bind(wx.EVT_SPIN, OnMoveMult)
mulBox.Add(multSpin,0,WACV)
fileSizer.Add(mulBox,0,WACV)
if 'Refine' in item and item['Name'] and 'Sample' in key:
refMult = wx.CheckBox(parent=G2frame.dataWindow,label='Refine?')
refMult.SetValue(item['Refine'])
refMult.Bind(wx.EVT_CHECKBOX, OnRefMult)
fileSizer.Add(refMult,0,WACV)
else:
fileSizer.Add((5,5),0)
def ResetFlatBkg():
Smin = np.min(G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'PWDR'+dataFile[4:]))[1][1])
Bmin = 0; Cmin = 0.; Cmul = 0.; CBmin = 0.
if data['Sample Bkg.']['Name']:
Bmin = np.min(G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['Sample Bkg.']['Name']))[1][1])
Smin += Bmin*data['Sample Bkg.']['Mult']
if data['Container']['Name']:
Cmin = np.min(G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['Container']['Name']))[1][1])
Cmul = data['Container']['Mult']
if data['Container Bkg.']['Name']:
CBmin = np.min(G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['Container Bkg.']['Name']))[1][1])
Cmin += CBmin*data['Container Bkg.']['Mult']
Smin += Cmul*Cmin
data['Flat Bkg'] = max(0,Smin)
G2frame.flatBkg.SetValue(data['Flat Bkg'])
PDFfileSizer = wx.BoxSizer(wx.VERTICAL)
PDFfileSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=' PDF data files: '),0,WACV)
PDFfileSizer.Add((5,5),0)
if 'C' in inst['Type'][0]:
str = ' Sample file: PWDR%s Wavelength, A: %.5f Energy, keV: %.3f Polariz.: %.2f '%(dataFile[4:],wave,keV,polariz)
PDFfileSizer.Add(wx.StaticText(parent=G2frame.dataWindow,label=str),0,WACV)
PDFfileSizer.Add((5,5),0)
fileSizer = wx.FlexGridSizer(0,5,5,1)
select = ['Sample Bkg.','Container']
if data['Container']['Name']:
select.append('Container Bkg.')
for key in select:
FillFileSizer(fileSizer,key)
PDFfileSizer.Add(fileSizer,0)
return PDFfileSizer
def SampleSizer():
def FillElemSizer(elemSizer,ElData):
def AfterChange(invalid,value,tc):
if invalid: return
data['Form Vol'] = max(10.0,SumElementVolumes())
wx.CallAfter(UpdatePDFGrid,G2frame,data)
wx.CallAfter(OnComputePDF,tc.event)
elemSizer.Add(wx.StaticText(parent=G2frame.dataWindow,
label=' Element: '+'%2s'%(ElData['Symbol'])+' * '),0,WACV)
num = G2G.ValidatedTxtCtrl(G2frame.dataWindow,ElData,'FormulaNo',nDig=(10,3),min=0.0,
typeHint=float,OnLeave=AfterChange)
elemSizer.Add(num,0,WACV)
elemSizer.Add(wx.StaticText(parent=G2frame.dataWindow,
label="f': %.3f"%(ElData['fp'])+' f": %.3f'%(ElData['fpp'])+' mu: %.2f barns'%(ElData['mu']) ),
0,WACV)
def AfterChange(invalid,value,tc):
if invalid: return
wx.CallAfter(UpdatePDFGrid,G2frame,data)
wx.CallAfter(OnComputePDF,tc.event)
def OnGeometry(event):
data['Geometry'] = geometry.GetValue()
wx.CallAfter(UpdatePDFGrid,G2frame,data)
#UpdatePDFGrid(G2frame,data)
wx.CallAfter(OnComputePDF,event)
sampleSizer = wx.BoxSizer(wx.VERTICAL)
if not ElList:
sampleSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Sample information: fill in this 1st'),0,WACV)
else:
sampleSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Sample information: '),0,WACV)
sampleSizer.Add((5,5),0)
Abs = G2lat.CellAbsorption(ElList,data['Form Vol'])
Trans = G2pwd.Transmission(data['Geometry'],Abs*data['Pack'],data['Diam'])
elemSizer = wx.FlexGridSizer(0,3,5,1)
for El in ElList:
FillElemSizer(elemSizer,ElList[El])
sampleSizer.Add(elemSizer,0)
sampleSizer.Add((5,5),0)
midSizer = wx.BoxSizer(wx.HORIZONTAL)
midSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Formula volume: '),0,WACV)
formVol = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Form Vol',nDig=(10,3),min=10.0,
typeHint=float,OnLeave=AfterChange)
midSizer.Add(formVol,0)
midSizer.Add(wx.StaticText(G2frame.dataWindow,
label=' Theoretical absorption: %.4f cm-1 Sample absorption: %.4f cm-1'%(Abs,Abs*data['Pack'])),
0,WACV)
sampleSizer.Add(midSizer,0)
sampleSizer.Add((5,5),0)
geoBox = wx.BoxSizer(wx.HORIZONTAL)
geoBox.Add(wx.StaticText(G2frame.dataWindow,label=' Sample geometry: '),0,WACV)
choice = ['Cylinder','Bragg-Brentano','Tilting flat plate in transmission','Fixed flat plate']
geometry = wx.ComboBox(G2frame.dataWindow,value=data['Geometry'],choices=choice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
geometry.Bind(wx.EVT_COMBOBOX, OnGeometry)
geoBox.Add(geometry,0)
geoBox.Add(wx.StaticText(G2frame.dataWindow,label=' Sample diameter/thickness, mm: '),0,WACV)
diam = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Diam',nDig=(10,3),min=0.01,
typeHint=float,OnLeave=AfterChange)
geoBox.Add(diam,0)
sampleSizer.Add(geoBox,0)
sampleSizer.Add((5,5),0)
geoBox = wx.BoxSizer(wx.HORIZONTAL)
geoBox.Add(wx.StaticText(G2frame.dataWindow,label=' Packing: '),0,WACV)
pack = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Pack',nDig=(10,2),min=0.01,
typeHint=float,OnLeave=AfterChange)
geoBox.Add(pack,0)
geoBox.Add(wx.StaticText(G2frame.dataWindow,label=' Sample transmission: %.3f %%'%(Trans)),0,WACV)
sampleSizer.Add(geoBox,0)
return sampleSizer
def SFGctrlSizer():
def OnOptimizePDF(event):
'''Optimize Flat Bkg, BackRatio & Ruland corrections to remove spurious
"intensity" from portion of G(r) with r<Rmin.
Invoked by Optimize PDF button and from menu command.
'''
if not data['ElList']:
G2frame.ErrorDialog('PDF error','Chemical formula not defined')
return
G2frame.GetStatusBar().SetStatusText('',1)
wx.BeginBusyCursor()
try:
OptimizePDF(G2frame,data)
finally:
wx.EndBusyCursor()
OnComputePDF(event)
wx.CallAfter(UpdatePDFGrid,G2frame,data)
def AfterChangeNoRefresh(invalid,value,tc):
if invalid: return
wx.CallAfter(OnComputePDF,None)
def OnDetType(event):
data['DetType'] = detType.GetValue()
wx.CallAfter(UpdatePDFGrid,G2frame,data)
wx.CallAfter(OnComputePDF,None)
def OnFlatSpin(event):
data['Flat Bkg'] += flatSpin.GetValue()*0.01*data['IofQmin']
G2frame.flatBkg.SetValue(data['Flat Bkg'])
flatSpin.SetValue(0)
wx.CallAfter(OnComputePDF,None)
def OnBackSlider(event):
value = int(backSldr.GetValue())/100.
data['BackRatio'] = value
backVal.SetValue(data['BackRatio'])
wx.CallAfter(OnComputePDF,None)
def OnRulSlider(event):
value = int(rulandSldr.GetValue())/1000.
data['Ruland'] = max(0.001,value)
rulandWdt.SetValue(data['Ruland'])
wx.CallAfter(OnComputePDF,None)
def NewQmax(invalid,value,tc):
if invalid: return
data['QScaleLim'][0] = 0.9*value
SQmin.SetValue(data['QScaleLim'][0])
wx.CallAfter(OnComputePDF,None)
def OnResetQ(event):
data['QScaleLim'][1] = qLimits[1]
SQmax.SetValue(data['QScaleLim'][1])
data['QScaleLim'][0] = 0.9*qLimits[1]
SQmin.SetValue(data['QScaleLim'][0])
wx.CallAfter(OnComputePDF,None)
def OnLorch(event):
data['Lorch'] = lorch.GetValue()
wx.CallAfter(OnComputePDF,None)
def OnNoRing(event):
data['noRing'] = not data['noRing']
wx.CallAfter(OnComputePDF,None)
sfgSizer = wx.BoxSizer(wx.VERTICAL)
sqBox = wx.BoxSizer(wx.HORIZONTAL)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' S(Q)->F(Q)->G(r) controls: '),0,WACV)
sqBox.Add((1,1),1,wx.EXPAND,1)
optB = wx.Button(G2frame.dataWindow,label='Optimize PDF',style=wx.BU_EXACTFIT)
optB.Bind(wx.EVT_BUTTON, OnOptimizePDF)
sqBox.Add(optB,0,WACV|wx.ALIGN_RIGHT)
sfgSizer.Add(sqBox,0,wx.EXPAND)
sfgSizer.Add((5,5),0)
sqBox = wx.BoxSizer(wx.HORIZONTAL)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' Detector type: '),0,WACV)
choice = ['Area detector','Point detector']
detType = wx.ComboBox(G2frame.dataWindow,value=data['DetType'],choices=choice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
detType.Bind(wx.EVT_COMBOBOX, OnDetType)
sqBox.Add(detType,0)
if data['DetType'] == 'Area detector':
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' IP transmission coeff.: '),0,WACV)
obliqCoeff = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'ObliqCoeff',nDig=(10,3),min=0.0,max=1.0,
typeHint=float,OnLeave=AfterChangeNoRefresh)
sqBox.Add(obliqCoeff,0)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' Flat Bkg.: '),0,WACV)
G2frame.flatBkg = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Flat Bkg',nDig=(10,0),min=0,
typeHint=float,OnLeave=AfterChangeNoRefresh)
sqBox.Add(G2frame.flatBkg,0)
flatSpin = wx.SpinButton(G2frame.dataWindow,style=wx.SP_VERTICAL,size=wx.Size(20,25))
flatSpin.SetRange(-1,1)
flatSpin.SetValue(0)
flatSpin.Bind(wx.EVT_SPIN, OnFlatSpin)
sqBox.Add(flatSpin,0,WACV)
sqBox.Add((1,1),1,wx.EXPAND,1)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label='Rmin: '),0,WACV|wx.ALIGN_RIGHT)
rmin = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Rmin',nDig=(5,1),
typeHint=float,size=wx.Size(50,20))
sqBox.Add(rmin,0,WACV|wx.ALIGN_RIGHT)
sfgSizer.Add(sqBox,0,wx.EXPAND)
bkBox = wx.BoxSizer(wx.HORIZONTAL)
bkBox.Add(wx.StaticText(G2frame.dataWindow,label=' Background ratio: '),0,WACV)
backSldr = wx.Slider(parent=G2frame.dataWindow,style=wx.SL_HORIZONTAL,
value=int(100*data['BackRatio']))
bkBox.Add(backSldr,1,wx.EXPAND)
backSldr.Bind(wx.EVT_SLIDER, OnBackSlider)
backVal = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'BackRatio',nDig=(10,3),min=0.0,max=1.0,
typeHint=float,OnLeave=AfterChangeNoRefresh)
bkBox.Add(backVal,0,WACV)
sfgSizer.Add(bkBox,0,wx.ALIGN_LEFT|wx.EXPAND)
sqBox = wx.BoxSizer(wx.HORIZONTAL)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' Ruland width: '),0,WACV)
rulandSldr = wx.Slider(parent=G2frame.dataWindow,style=wx.SL_HORIZONTAL,
value=int(1000*data['Ruland']))
sqBox.Add(rulandSldr,1,wx.EXPAND)
rulandSldr.Bind(wx.EVT_SLIDER, OnRulSlider)
rulandWdt = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Ruland',nDig=(10,3),min=0.001,max=1.0,
typeHint=float,OnLeave=AfterChangeNoRefresh)
sqBox.Add(rulandWdt,0,WACV)
sfgSizer.Add(sqBox,0,wx.ALIGN_LEFT|wx.EXPAND)
sqBox = wx.BoxSizer(wx.HORIZONTAL)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' Scaling Q-range: '),0,WACV)
SQmin = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['QScaleLim'],0,nDig=(10,3),
min=qLimits[0],max=.95*data['QScaleLim'][1],
typeHint=float,OnLeave=AfterChangeNoRefresh)
sqBox.Add(SQmin,0,WACV)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' to Qmax '),0,WACV)
SQmax = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data['QScaleLim'],1,nDig=(10,3),
min=qLimits[0],max=qLimits[1],typeHint=float,OnLeave=NewQmax)
sqBox.Add(SQmax,0,WACV)
resetQ = wx.Button(G2frame.dataWindow,label='Reset?',style=wx.BU_EXACTFIT)
sqBox.Add(resetQ,0,WACV)
resetQ.Bind(wx.EVT_BUTTON, OnResetQ)
sqBox.Add(wx.StaticText(G2frame.dataWindow,label=' Plot Rmax: '),0,WACV)
rmax = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'Rmax',nDig=(10,1),min=10.,max=200.,
typeHint=float,OnLeave=AfterChangeNoRefresh,size=wx.Size(50,20))
sqBox.Add(rmax,0,WACV)
lorch = wx.CheckBox(parent=G2frame.dataWindow,label='Lorch damping?')
lorch.SetValue(data['Lorch'])
lorch.Bind(wx.EVT_CHECKBOX, OnLorch)
sqBox.Add(lorch,0,WACV)
noRing = wx.CheckBox(parent=G2frame.dataWindow,label='Suppress G(0) ringing?')
noRing.SetValue(data['noRing'])
noRing.Bind(wx.EVT_CHECKBOX, OnNoRing)
sqBox.Add(noRing,0,WACV)
sfgSizer.Add(sqBox,0)
return sfgSizer
def DiffSizer():
def OnSelectGR(event):
newName = grName.GetValue()
if newName:
data['delt-G(R)'] = copy.deepcopy(data['G(R)'])
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,newName)
pId = G2gd.GetGPXtreeItemId(G2frame,Id,'PDF Controls')
subData = G2frame.GPXtree.GetItemPyData(pId)['G(R)']
if subData[1][0][-1] != data['G(R)'][1][0][-1]:
G2frame.ErrorDialog('delt-G(R) Error',' G(R) for '+newName+' not same R range')
grName.SetValue(data['diffGRname'])
return
data['diffGRname'] = newName
data['delt-G(R)'][1] = np.array([subData[1][0],data['G(R)'][1][1]-subData[1][1]])
data['delt-G(R)'][2] += ('-\n'+subData[2])
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='delt-G(R)')
wx.CallAfter(UpdatePDFGrid,G2frame,data)
def OnMult(invalid,value,tc):
if invalid: return
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,data['diffGRname'])
if Id == 0: return
pId = G2gd.GetGPXtreeItemId(G2frame,Id,'PDF Controls')
if pId == 0: return
subData = G2frame.GPXtree.GetItemPyData(pId)['G(R)']
data['delt-G(R)'][1] = np.array([subData[1][0],data['G(R)'][1][1]-data['diffMult']*subData[1][1]])
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='delt-G(R)')
diffSizer = wx.BoxSizer(wx.HORIZONTAL)
fileList = [''] + GetFileList(G2frame,'PDF')
diffSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Subtract G(R) for: '),0,WACV)
grName = wx.ComboBox(G2frame.dataWindow,value=data['diffGRname'],choices=fileList,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
grName.Bind(wx.EVT_COMBOBOX,OnSelectGR)
diffSizer.Add(grName,0,WACV)
if data['diffGRname']:
diffSizer.Add(wx.StaticText(G2frame.dataWindow,label=' Mult: '),0,WACV)
mult = G2G.ValidatedTxtCtrl(G2frame.dataWindow,data,'diffMult',nDig=(10,3),
typeHint=float,OnLeave=OnMult)
diffSizer.Add(mult,0,WACV)
OnMult(False,None,None)
return diffSizer
def SumElementVolumes():
sumVol = 0.
ElList = data['ElList']
for El in ElList:
Avol = (4.*math.pi/3.)*ElList[El]['Drad']**3
sumVol += Avol*ElList[El]['FormulaNo']
return sumVol
wx.CallAfter(OnComputePDF,None)
def OnCopyPDFControls(event):
import copy
TextList = GetFileList(G2frame,'PDF')
Source = G2frame.GPXtree.GetItemText(G2frame.PatternId)
if len(TextList) == 1:
G2frame.ErrorDialog('Nothing to copy controls to','There must be more than one "PDF" pattern')
return
od = {'label_1':'Only refine flag','value_1':False,'label_2':'Only Lorch flag','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy PDF controls','Copy controls from '+Source+' to:',TextList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
PDFlist = [TextList[i] for i in dlg.GetSelections()]
for item in PDFlist:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
olddata = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'PDF Controls'))
if od['value_1']:
olddata['Sample Bkg.']['Refine'] = data['Sample Bkg.']['Refine'] #only one flag
elif od['value_2']:
olddata['Lorch'] = data['Lorch'] #only one flag
else:
sample = olddata['Sample']
olddata.update(copy.deepcopy(data))
olddata['Sample'] = sample
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'PDF Controls'),olddata)
G2frame.GetStatusBar().SetStatusText('PDF controls copied',1)
finally:
dlg.Destroy()
def OnSavePDFControls(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PDF controls file', pth, '',
'PDF controls files (*.pdfprm)|*.pdfprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
# make sure extension is .pdfprm
filename = os.path.splitext(filename)[0]+'.pdfprm'
File = open(filename,'w')
File.write("#GSAS-II PDF controls file; do not add/delete items!\n")
for item in data:
if item[:] not in ['Sample','I(Q)','S(Q)','F(Q)','G(R)']:
File.write(item+':'+data[item]+'\n')
File.close()
print ('PDF controls saved to: '+filename)
finally:
dlg.Destroy()
def OnLoadPDFControls(event):
pth = G2G.GetExportPath(G2frame)
dlg = wx.FileDialog(G2frame, 'Choose GSAS-II PDF controls file', pth, '',
'PDF controls files (*.pdfprm)|*.pdfprm',wx.FD_OPEN)
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
File = open(filename,'r')
newdata = {}
S = File.readline()
while S:
if '#' in S:
S = File.readline()
continue
key,val = S.split(':',1)
try:
newdata[key] = eval(val)
#except SyntaxError:
except:
newdata[key] = val.strip()
S = File.readline()
File.close()
data.update(newdata)
finally:
dlg.Destroy()
OnComputePDF(event)
wx.CallAfter(UpdatePDFGrid,G2frame,data)
def OnAddElement(event):
ElList = data['ElList']
choice = list(ElList.keys())
PE = G2elemGUI.PickElements(G2frame,choice)
if PE.ShowModal() == wx.ID_OK:
for El in PE.Elem:
if El not in ElList:
try:
data['ElList'][El] = G2elem.GetElInfo(El,inst)
data['ElList'][El]['FormulaNo'] = 1.0
except IndexError: # happens with element Q
pass
data['Form Vol'] = max(10.0,SumElementVolumes())
PE.Destroy()
wx.CallAfter(UpdatePDFGrid,G2frame,data)
def OnDeleteElement(event):
ElList = data['ElList']
choice = list(ElList.keys())
dlg = G2elemGUI.DeleteElement(G2frame,choice=choice)
if dlg.ShowModal() == wx.ID_OK:
del ElList[dlg.GetDeleteElement()]
dlg.Destroy()
wx.CallAfter(UpdatePDFGrid,G2frame,data)
def OnComputePDF(event):
'''Compute and plot PDF, in response to a menu command or a change to a
computation parameter.
'''
if not data['ElList']:
G2frame.ErrorDialog('PDF error','Chemical formula not defined')
OnAddElement(event)
auxPlot = computePDF(G2frame,data)
if auxPlot is None: return
G2frame.GetStatusBar().SetStatusText('PDF computed',1)
for plot in auxPlot:
XY = np.array(plot[:2])
G2plt.PlotXY(G2frame,[XY,],Title=plot[2])
if event is not None:
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='I(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='S(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='F(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='G(R)')
else:
G2plt.PlotISFG(G2frame,data,newPlot=True)
def OnComputeAllPDF(event):
print('Calculating PDFs...')
choices = []
if G2frame.GPXtree.GetCount():
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
while Id:
Name = G2frame.GPXtree.GetItemText(Id)
if Name.startswith('PDF '):
Data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id,'PDF Controls'))
if not Data['ElList']:
print(' No chemical formula for {}'.format(Name))
else:
choices.append(Name)
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
if not choices:
print(' No PDFs to compute\n')
return
od = {'label_1':'Optimize PDFs','value_1':True}
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Select PDFs to compute','Select PDFs',
choices,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
results = dlg.GetSelections()
else:
return
finally:
dlg.Destroy()
if not results:
print(' No PDFs to compute\n')
return
Names = [choices[i] for i in results]
pgbar = wx.ProgressDialog('Compute PDF','PDFs done: 0',len(Names)+1,
style = wx.PD_ELAPSED_TIME|wx.PD_AUTO_HIDE|wx.PD_CAN_ABORT)
notConverged = 0
Id, cookie = G2frame.GPXtree.GetFirstChild(G2frame.root)
N = 0
try:
while Id:
Name = G2frame.GPXtree.GetItemText(Id)
if Name in Names:
N += 1
msg = 'PDFs done: {} of {}'.format(N-1,len(Names))
if not pgbar.Update(N,msg)[0]:
pgbar.Destroy()
break
pId = G2gd.GetGPXtreeItemId(G2frame,Id,'PDF Controls')
Data = G2frame.GPXtree.GetItemPyData(pId)
print(' Computing {}'.format(Name))
computePDF(G2frame,Data)
if od['value_1']:
notConverged += not OptimizePDF(G2frame,Data,maxCycles=10)
computePDF(G2frame,Data)
G2frame.GPXtree.SetItemPyData(pId,Data)
Id, cookie = G2frame.GPXtree.GetNextChild(G2frame.root, cookie)
finally:
pgbar.Destroy()
if od['value_1']:
msg = '{}/{} PDFs computed; {} unconverged'.format(N,len(Names),notConverged)
else:
msg = '{}/{} PDFs computed'.format(N,len(Names))
G2frame.GetStatusBar().SetStatusText(msg,1)
print(msg)
# what item is being plotted? -- might be better to select from tree
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='I(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='S(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='F(Q)')
G2plt.PlotISFG(G2frame,data,newPlot=True,plotType='G(R)')
# Routine UpdatePDFGrid starts here
global inst
tth2q = lambda t,w:4.0*math.pi*sind(t/2.0)/w
tof2q = lambda t,C:2.0*math.pi*C/t
dataFile = G2frame.GPXtree.GetItemText(G2frame.PatternId)
powName = 'PWDR'+dataFile[4:]
powId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root, powName)
if powId: # skip if no matching PWDR entry
fullLimits,limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId, 'Limits'))[:2]
inst = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,powId, 'Instrument Parameters'))[0]
if 'C' in inst['Type'][0]:
wave = G2mth.getWave(inst)
keV = 12.397639/wave
qLimits = [tth2q(fullLimits[0],wave),tth2q(fullLimits[1],wave)]
polariz = inst['Polariz.'][1]
else: #'T'of
qLimits = [tof2q(fullLimits[1],inst['difC'][1]),tof2q(fullLimits[0],inst['difC'][1])]
polariz = 1.0
data['QScaleLim'][1] = min(qLimits[1],data['QScaleLim'][1])
if data['QScaleLim'][0]:
data['QScaleLim'][0] = max(qLimits[0],data['QScaleLim'][0])
else: #initial setting at 90% of max Q
data['QScaleLim'][0] = 0.90*data['QScaleLim'][1]
itemDict = {}
#patch
if 'BackRatio' not in data:
data['BackRatio'] = 0.
if 'noRing' not in data:
data['noRing'] = False
if 'Rmax' not in data:
data['Rmax'] = 100.
if 'Flat Bkg' not in data:
data['Flat Bkg'] = 0.
if 'IofQmin' not in data:
data['IofQmin'] = 1.0
if 'Rmin' not in data:
data['Rmin'] = 1.5
if data['DetType'] == 'Image plate':
data['DetType'] = 'Area detector'
if 'Refine' not in data['Sample Bkg.']:
data['Sample Bkg.']['Refine'] = False
if 'diffGRname' not in data:
data['diffGRname'] = ''
if 'diffMult' not in data:
data['diffMult'] = 1.0
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.PDFMenu)
if powId:
G2frame.dataWindow.PDFMenu.EnableTop(0,enable=True)
else:
G2frame.dataWindow.PDFMenu.EnableTop(0,enable=False)
G2frame.Bind(wx.EVT_MENU, OnCopyPDFControls, id=G2G.wxID_PDFCOPYCONTROLS)
G2frame.Bind(wx.EVT_MENU, OnSavePDFControls, id=G2G.wxID_PDFSAVECONTROLS)
G2frame.Bind(wx.EVT_MENU, OnLoadPDFControls, id=G2G.wxID_PDFLOADCONTROLS)
G2frame.Bind(wx.EVT_MENU, OnAddElement, id=G2G.wxID_PDFADDELEMENT)
G2frame.Bind(wx.EVT_MENU, OnDeleteElement, id=G2G.wxID_PDFDELELEMENT)
G2frame.Bind(wx.EVT_MENU, OnComputePDF, id=G2G.wxID_PDFCOMPUTE)
G2frame.Bind(wx.EVT_MENU, OnComputeAllPDF, id=G2G.wxID_PDFCOMPUTEALL)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
if powId:
ElList = data['ElList']
mainSizer.Add(PDFFileSizer(),0,WACV)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
mainSizer.Add(SampleSizer(),0,WACV)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
mainSizer.Add(SFGctrlSizer(),0,WACV)
G2G.HorizontalLine(mainSizer,G2frame.dataWindow)
mainSizer.Add(DiffSizer(),0,WACV)
else:
mainSizer.Add(wx.StaticText(G2frame.dataWindow,wx.ID_ANY,
powName+' not in Tree'))
G2frame.dataWindow.SetDataSize()
###############################################################################################################
#UpdatePDFPeaks: peaks in G(r)
###############################################################################################################
def UpdatePDFPeaks(G2frame,peaks,data):
def limitSizer():
def NewLim(invalid,value,tc):
if invalid:
return
G2plt.PlotISFG(G2frame,data,newPlot=False,plotType='G(R)',peaks=peaks)
limitBox = wx.BoxSizer(wx.HORIZONTAL)
limitBox.Add(wx.StaticText(G2frame.dataWindow,label=' PDF Limits: '),0,WACV)
lowLim = G2G.ValidatedTxtCtrl(G2frame.dataWindow,peaks['Limits'],0,nDig=(10,3),
min=0.,max=10.,typeHint=float,OnLeave=NewLim)
limitBox.Add(lowLim,0,WACV)
highLim = G2G.ValidatedTxtCtrl(G2frame.dataWindow,peaks['Limits'],1,nDig=(10,3),
min=peaks['Limits'][0],max=10.,typeHint=float,OnLeave=NewLim)
limitBox.Add(highLim,0,WACV)
return limitBox
def backSizer():
def NewBack(invalid,value,tc):
if invalid:
return
G2plt.PlotISFG(G2frame,data,newPlot=False,plotType='G(R)',peaks=peaks)
def OnRefBack(event):
peaks['Background'][2] = refbk.GetValue()
backBox = wx.BoxSizer(wx.HORIZONTAL)
backBox.Add(wx.StaticText(G2frame.dataWindow,label=' Background slope: '),0,WACV)
slope = G2G.ValidatedTxtCtrl(G2frame.dataWindow,peaks['Background'][1],1,nDig=(10,3),
min=-4.*np.pi,max=0.,typeHint=float,OnLeave=NewBack)
backBox.Add(slope,0,WACV)
refbk = wx.CheckBox(parent=G2frame.dataWindow,label=' Refine?')
refbk.SetValue(peaks['Background'][2])
refbk.Bind(wx.EVT_CHECKBOX, OnRefBack)
backBox.Add(refbk,0,WACV)
return backBox
def peakSizer():
def PeaksRefine(event):
c = event.GetCol()
if PDFPeaks.GetColLabelValue(c) == 'refine':
choice = ['P - position','M - magnitude','S - standrd deviation']
dlg = wx.MultiChoiceDialog(G2frame,'Select','Refinement controls',choice)
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetSelections()
parms = ''
for x in sel:
parms += choice[x][0]
for peak in peaks['Peaks']:
peak[3] = parms
dlg.Destroy()
wx.CallAfter(UpdatePDFPeaks,G2frame,peaks,data)
def ElTypeSelect(event):
r,c = event.GetRow(),event.GetCol()
if 'Atom' in PDFPeaks.GetColLabelValue(c):
PE = G2elemGUI.PickElement(G2frame)
if PE.ShowModal() == wx.ID_OK:
el = PE.Elem.strip()
peaks['Peaks'][r][c] = el
PDFPeaks.SetCellValue(r,c,el)
PE.Destroy()
colLabels = ['position','magnitude','sig','refine','Atom A','Atom B','Bond No.']
Types = 3*[wg.GRID_VALUE_FLOAT+':10,3',]+[wg.GRID_VALUE_CHOICE+': ,P,M,S,PM,PS,MS,PMS',]+ \
2*[wg.GRID_VALUE_STRING,]+[wg.GRID_VALUE_FLOAT+':10,3',]
rowLabels = [str(i) for i in range(len(peaks['Peaks']))]
peakTable = G2G.Table(peaks['Peaks'],rowLabels=rowLabels,colLabels=colLabels,types=Types)
PDFPeaks = G2G.GSGrid(G2frame.dataWindow)
PDFPeaks.SetTable(peakTable,True)
PDFPeaks.AutoSizeColumns(False)
PDFPeaks.Bind(wg.EVT_GRID_LABEL_LEFT_DCLICK, PeaksRefine)
PDFPeaks.Bind(wg.EVT_GRID_CELL_LEFT_DCLICK, ElTypeSelect)
peakBox = wx.BoxSizer(wx.VERTICAL)
peakBox.Add(wx.StaticText(G2frame.dataWindow,label=' PDF Peaks:'),0,WACV)
peakBox.Add(PDFPeaks,0,WACV)
return peakBox
def OnCopyPDFPeaks(event):
import copy
TextList = GetFileList(G2frame,'PDF')
Source = G2frame.GPXtree.GetItemText(G2frame.PatternId)
if len(TextList) == 1:
G2frame.ErrorDialog('Nothing to copy PDF peaks to','There must be more than one "PDF" pattern')
return
od = {'label_1':'Only refine flags','value_1':False}
dlg = G2G.G2MultiChoiceDialog(G2frame,'Copy PDF peaks','Copy peaks from '+Source+' to:',TextList,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
PDFlist = [TextList[i] for i in dlg.GetSelections()]
for item in PDFlist:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,item)
olddata = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'PDF Peaks'))
if od['value_1']:
olddata['Background'][2] = peaks['Background'][2]
for ip,peak in enumerate(olddata['Peaks']):
peak[3] = peaks['Peaks'][ip][3]
else:
olddata.update(copy.deepcopy(peaks))
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,Id, 'PDF Peaks'),olddata)
G2frame.GetStatusBar().SetStatusText('PDF peaks copied',1)
finally:
dlg.Destroy()
def OnFitPDFpeaks(event):
PatternId = G2frame.PatternId
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'PDF Controls'))
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'PDF Peaks'))
if not peaks:
G2frame.ErrorDialog('No peaks!','Nothing to fit!')
return
newpeaks = G2pwd.PDFPeakFit(peaks,data['G(R)'])[0]
print ('PDF peak fit finished')
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,PatternId, 'PDF Peaks'),newpeaks)
G2plt.PlotISFG(G2frame,data,peaks=newpeaks,newPlot=False)
wx.CallAfter(UpdatePDFPeaks,G2frame,newpeaks,data)
def OnFitAllPDFpeaks(event):
Names = G2gd.GetGPXtreeDataNames(G2frame,['PDF ',])
od = {'label_1':'Copy to next','value_1':False,'label_2':'Reverse order','value_2':False}
dlg = G2G.G2MultiChoiceDialog(G2frame,'PDF peak fitting','Select PDFs to fit:',Names,extraOpts=od)
try:
if dlg.ShowModal() == wx.ID_OK:
Id = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Sequential PDF peak fit results')
if Id:
SeqResult = G2frame.GPXtree.GetItemPyData(Id)
else:
SeqResult = {}
Id = G2frame.GPXtree.AppendItem(parent=G2frame.root,text='Sequential PDF peak fit results')
SeqResult = {'SeqPseudoVars':{},'SeqParFitEqList':[]}
items = dlg.GetSelections()
if od['value_2']:
items.reverse()
newpeaks = None
G2frame.EnablePlot = False
for item in items:
name = Names[item]
pId = G2gd.GetGPXtreeItemId(G2frame,G2frame.root,name)
data = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,pId, 'PDF Controls'))
if od['value_1'] and newpeaks is not None:
peaks = copy.deepcopy(newpeaks)
else:
peaks = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(G2frame,pId,'PDF Peaks'))
newpeaks,vals,varyList,sigList,parmDict,Rvals = G2pwd.PDFPeakFit(peaks,data['G(R)'])
if vals is None:
print ('Nothing varied!')
dlg.Destroy()
return
SeqResult[name] = {'variables':vals,'varyList':varyList,'sig':sigList,'Rvals':Rvals,
'covMatrix':np.eye(len(varyList)),'title':name,'parmDict':parmDict}
G2frame.GPXtree.SetItemPyData(G2gd.GetGPXtreeItemId(G2frame,pId, 'PDF Peaks'),newpeaks)
SeqResult['histNames'] = Names
G2frame.G2plotNB.Delete('Sequential refinement') #clear away probably invalid plot
G2plt.PlotISFG(G2frame,data,peaks=newpeaks,newPlot=False)
G2frame.GPXtree.SetItemPyData(Id,SeqResult)
G2frame.GPXtree.SelectItem(Id)
print ('All PDFs peak fitted - results in Sequential PDF peak fit results')
else:
print ('Sequential fit cancelled')
finally:
dlg.Destroy()
def OnClearPDFpeaks(event):
peaks['Peaks'] = []
G2plt.PlotISFG(G2frame,data,peaks=peaks,newPlot=False)
wx.CallAfter(UpdatePDFPeaks,G2frame,peaks,data)
G2gd.SetDataMenuBar(G2frame,G2frame.dataWindow.PDFPksMenu)
G2frame.Bind(wx.EVT_MENU, OnCopyPDFPeaks, id=G2G.wxID_PDFCOPYPEAKS)
G2frame.Bind(wx.EVT_MENU, OnFitPDFpeaks, id=G2G.wxID_PDFPKSFIT)
G2frame.Bind(wx.EVT_MENU, OnFitAllPDFpeaks, id=G2G.wxID_PDFPKSFITALL)
G2frame.Bind(wx.EVT_MENU, OnClearPDFpeaks, id=G2G.wxID_CLEARPDFPEAKS)
G2frame.dataWindow.ClearData()
mainSizer = G2frame.dataWindow.GetSizer()
mainSizer.Add((5,5),0)
mainSizer.Add(wx.StaticText(G2frame.dataWindow,label=' PDF peak fit controls:'),0,WACV)
mainSizer.Add((5,5),0)
mainSizer.Add(limitSizer(),0,WACV)
mainSizer.Add((5,5),0)
mainSizer.Add(backSizer())
if len(peaks['Peaks']):
mainSizer.Add((5,5),0)
mainSizer.Add(peakSizer())
G2frame.dataWindow.SetDataSize() | PypiClean |
/Interface_KB-1.1.0.tar.gz/Interface_KB-1.1.0/Interface_KB/PerformanceFunctions.py | from .InterfaceObjects import Parameter,PerformanceModel,StopCondition,Constraint,Objective,DecisionVariable,AnalysisModel,ResultMetric,Contact,BoundaryCondition,Term,Expression
#import helper functions
from .HelperFunctions import *
#-----------------------------------------------------------------------------------------------------------
#
# Performance extension
#
#
# -----------------------------------------------------------------------------------------------------------
def getPerformance( OptimizationProblemName,model):
"""
Function to fetch the complete performance model using the name as identifier.
The function returns an interface block of the performance model
:param string OptimizationProblemName: Name identifier of the optimization problem
:param object model: Metamodel instance model
:return object PerformanceModel: Interface object of the performance model (-1 = error)
"""
#define performance model interface object
pModel = PerformanceModel()
pModel.clean() # Flushing the pModel
found = 0
if len(model.includesOptimizationProblem.items) != 0:
# match the assemblySystem by Name
for optimizationProblem in model.includesOptimizationProblem.items:
if OptimizationProblemName == optimizationProblem.hasName:
found = 1
pModel.Name=optimizationProblem.hasName
pModel.Description = optimizationProblem.hasDescription
# fetch the analysysDescription
pModel.OptimizationMethod=[optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasName,optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasDescription,optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasAlgorithmClass.name]
StopList = []
for STP in optimizationProblem.hasOptimizationAnalysisdDescription.hasStopCriterion.items:
s = StopCondition(Name=STP.hasName,Description=STP.hasDescription,Value=STP.hasValue,Stopcriteria=STP.hasStopCriterion.name)
StopList.append(s)
pModel.StopConditionList=StopList
# fetch the Optimization targets
for designTarget in optimizationProblem.hasOptimizationTargetDescription.hasDesignTarget.items:
# check the typing
x = str(type(designTarget))
if 'Constraint' in x:
cstr = Constraint(Name=designTarget.hasName,Description=designTarget.hasDescription)
#expression
expression_new = Expression()
#operator
expression_new.ExpressionOperator = designTarget.hasExpression.hasExpressionOperator.hasValue
#terms
for term in designTarget.hasExpression.hasTerm.items:
if "LEFT" in term.hasInfixPosition.name:
position = "LEFT"
if "RIGHT" in term.hasInfixPosition.name:
position = "RIGHT"
x = str(type(term))
if 'Textual' in x: # textual term
term_new = Term(None, Value=term.hasValue)
if 'Numerical' in x: # numerical term
term_new = Term(None, Value=term.hasValue)
if 'Variable' in x: # variable term
term_new = Term(None, Value=[term.hasDecisionVariable.hasName,term.hasDecisionVariable.hasDescription,term.hasDecisionVariable.hasOptimum])
#cstr.Expression.append(TERM)
if position=="LEFT":
expression_new.LeftTerm = term_new
elif position=="RIGHT":
expression_new.RightTerm = term_new
cstr.Expression = expression_new
pModel.ConstraintList.append(cstr)
if 'Objective' in x:
OBJ_O = Objective(Name=designTarget.hasName, Description=designTarget.hasDescription)
x = str(type(designTarget.hasTerm))
if 'Textual' in x: # textual term
term_new = Term(None, Value=designTarget.hasTerm.hasValue)
OBJ_O.ObjectiveTerm = term_new
if 'Numerical' in x: # numerical term
term_new = Term(None, Value=designTarget.hasTerm.hasValue)
OBJ_O.ObjectiveTerm = term_new
if 'Variable' in x: # variable term
term_new = Term(None, Value=[designTarget.hasTerm.hasDecisionVariable.hasName,designTarget.hasTerm.hasDecisionVariable.hasDescription,designTarget.hasTerm.hasDecisionVariable.hasOptimum])
OBJ_O.ObjectiveTerm = term_new
#add options
OBJ_O.ObjectiveOption = designTarget.hasOption.name
pModel.ObjectiveList.append(OBJ_O)
# fetch the design variables
for variable in optimizationProblem.hasOptimizationTargetDescription.hasDecisionVariable.items:
dv = DecisionVariable(Name=variable.hasName,Description=variable.hasDescription,InitialValue=variable.hasInitialValue,MaxValue=variable.hasMaxValue,MinValue=variable.hasMinValue,Optimum=variable.hasOptimum,Resolution=variable.hasResolution)
try:
par = Parameter(Name=variable.hasParameter.hasName,Description=variable.hasParameter.hasDescription,Key=variable.hasParameter.hasKey, GUID=variable.hasParameter.hasGUID,Value=variable.hasParameter.hasValue)
dv.parameterList.append(par)
except:
par = []
pModel.parameterList.append(dv)
if found:
return pModel
else:
return -1
else:
return -1 # no Optimization Problem!
def updatePerformance(interfaceObject,model,KBPath):
"""
Function to update the complete performance model matching the name as identifier.
The performance interface object is used to interface with the function.
The function returns whether or not the function is performed correctly
:param object PerformanceModel: Interface object of the performance model
:param object model: Metamodel instance model
:param string KBPath: Absolute path to the metamodel instance model
:return int Error: -1 = error, 1= function performed correcly
"""
#find the optimization model
found = 0
if len(model.includesOptimizationProblem.items) != 0:
# match the assemblySystem by Name
for optimizationProblem in model.includesOptimizationProblem.items:
if interfaceObject.Name == optimizationProblem.hasName:
found = 1
optimizationProblem.hasName=interfaceObject.Name
optimizationProblem.hasDescription=interfaceObject.Description
#analysisDescription
optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasName = interfaceObject.OptimizationMethod[0]
optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasDescription = interfaceObject.OptimizationMethod[1]
optimizationProblem.hasOptimizationAnalysisdDescription.hasOptimizationMethod.hasAlgorithmClass.name = interfaceObject.OptimizationMethod[2]
StopList = []
for STP in optimizationProblem.hasOptimizationAnalysisdDescription.hasStopCriterion.items:
for STP_NEW in interfaceObject.StopConditionList:
if STP_NEW.Name == STP.hasName:
STP.hasName=STP_NEW.Name
STP.hasDescription=STP_NEW.Description
STP.hasValue=STP_NEW.Value
STP.hasStopCriterion.name=STP_NEW.StopCriteria
#Objectives and constraints
for designTarget in optimizationProblem.hasOptimizationTargetDescription.hasDesignTarget.items:
# check the typing
x = str(type(designTarget))
if 'Constraint' in x:
for constraint_new in interfaceObject.ConstraintList:
if constraint_new.Name ==designTarget.hasName:
designTarget.hasName=constraint_new.Name
designTarget.hasDescription=constraint_new.Description
for term in designTarget.hasExpression.hasTerm.items:
y=1
#TODO: using the current KB (version6), updating the expressions is not possible! discuss if needed
if 'Objective' in x:
for obj_new in interfaceObject.ObjectiveList:
designTarget.hasName = obj_new.Name
designTarget.hasDescription=obj_new.Description
x = str(type(designTarget.hasTerm))
if 'Textual' in x: # textual term
designTarget.hasTerm.hasValue = obj_new.ObjectiveOption[0]
if 'Numerical' in x: # numerical term
designTarget.hasTerm.hasValue = obj_new.ObjectiveOption[0]
if 'Variable' in x: # variable term
designTarget.hasTerm.hasDecisionVariable.hasName= obj_new.ObjectiveTerm.Value[0]
designTarget.hasTerm.hasDecisionVariable.hasDescription= obj_new.ObjectiveTerm.Value[1]
designTarget.hasTerm.hasDecisionVariable.hasOptimum = obj_new.ObjectiveTerm.Value[2]
#Decision Variables
for variable in optimizationProblem.hasOptimizationTargetDescription.hasDecisionVariable.items:
for variable_new in interfaceObject.parameterList:
if variable.hasName == variable_new.Name:
variable.hasName=variable_new.Name
variable.hasInitialValue=variable_new.InitialValue
variable.hasMaxValue=variable_new.MaxValue
variable.hasMinValue=variable_new.MinValue
variable.hasOptimum=variable_new.Optimum
variable.hasResolution=variable_new.Resolution
for par in variable_new.parameterList:
if variable.hasParameter.hasName==par.Name:
variable.hasParameter.hasName = par.Name
variable.hasParameter.hasDescription = par.Description
variable.hasParameter.hasKey = par.Key
variable.hasParameter.hasGUID = par.GUID
variable.hasParameter.hasValue = str(par.Value)
if found:
updateKBv6(KBPath, model)
return 1
else:
return -1
else:
return -1 # no Optimization Problem!
def setPerformance(interfaceObject,model,KBPath,API):
"""
Function to add a complete performance model to an blank KB (no existing performance part).
The performance interface object is used to interface with the function.
The function returns whether or not the function is performed correctly
:param object PerformanceModel: Interface object of the performance model
:param object model: Metamodel instance model
:param string KBPath: Absolute path to the metamodel instance model
:return int Error: -1 = error, 1= function performed correcly
"""
# generate a new ASG config and configure
optimizationProblem = API.create_connected("OptimizationProblem")
optimizationProblem.hasName=interfaceObject.Name
optimizationProblem.hasDescription=interfaceObject.Description
#analysisDescription
OptimizationAnalysisdDescription = API.create_noPlatformRoot("OptimizationAnalysisDescription")
#add the method
OptimizationMethod = API.create_noPlatformRoot("OptimizationMethod")
OptimizationMethod.hasName = interfaceObject.OptimizationMethod[0]
OptimizationMethod.hasDescription = interfaceObject.OptimizationMethod[1]
OptimizationMethod.hasAlgorithmClass.name = interfaceObject.OptimizationMethod[2]
OptimizationAnalysisdDescription.hasOptimizationMethod = OptimizationMethod
#add the stop criteria
for STP_NEW in interfaceObject.StopConditionList:
StopCriterion = API.create_noPlatformRoot("StopCriterion")
StopCriterion.hasName = STP_NEW.Name
StopCriterion.hasDescription = STP_NEW.Description
StopCriterion.hasValue = STP_NEW.Value
StopCriterion.hasStopCriterion.name = STP_NEW.StopCriteria
OptimizationAnalysisdDescription.hasStopCriterion.append(StopCriterion)
optimizationProblem.hasOptimizationAnalysisdDescription = OptimizationAnalysisdDescription
OptimizationTargetDescription = API.create_noPlatformRoot("OptimizationTargetDescription")
# Decision Variables
for variable_new in interfaceObject.parameterList:
variable = API.create_noPlatformRoot("Variable")
variable.hasName = variable_new.Name
variable.hasInitialValue = variable_new.InitialValue
variable.hasMaxValue = variable_new.MaxValue
variable.hasMinValue = variable_new.MinValue
variable.hasOptimum = variable_new.Optimum
variable.hasResolution = variable_new.Resolution
for par in variable_new.parameterList:
subvariable = API.create_noPlatformRoot("Variable")
subvariable.hasName = par.Name
subvariable.hasDescription = par.Description
subvariable.hasKey = par.Key
subvariable.hasGUID = par.GUID
subvariable.hasValue = str(par.Value)
OptimizationTargetDescription.hasDecisionVariable.append(variable)
#Objectives and constraints
for constraint_new in interfaceObject.ConstraintList:
constraint = API.create_noPlatformRoot("Constraint")
constraint.hasName=constraint_new.Name
constraint.hasDescription=constraint_new.Description
# ----------------------Added 18/10/2022 meeting Pavel--------------------------
expression = API.create_noPlatformRoot("Expression")
#operator
expressionOperator = API.create_noPlatformRoot("ExpressionOperator")
expressionOperator.hasValue = constraint_new.Expression.ExpressionOperator
expression.hasExpressionOperator=expressionOperator
#Left term
if isinstance(constraint_new.Expression.LeftTerm.Value, str):
term = API.create_noPlatformRoot("TextualTerm")
term.hasInfixPosition = "LEFT"
term.hasValue = constraint_new.Expression.LeftTerm.Value
elif isinstance(constraint_new.Expression.LeftTerm.Value, list):
term = API.create_noPlatformRoot("VariableTerm")
term.hasInfixPosition = "LEFT"
for var in OptimizationTargetDescription.hasDecisionVariable.items:
if var.hasName == constraint_new.Expression.LeftTerm.Value[0]:
term.hasDecisionVariable = var
else:
term = API.create_noPlatformRoot("NumericalTerm")
term.hasInfixPosition = "LEFT"
term.hasValue = constraint_new.Expression.LeftTerm.Value
expression.hasTerm.append(term)
# Right term
if isinstance(constraint_new.Expression.RightTerm.Value, str):
term = API.create_noPlatformRoot("TextualTerm")
term.hasInfixPosition = "RIGHT"
term.hasValue = constraint_new.Expression.RightTerm.Value
elif isinstance(constraint_new.Expression.RightTerm.Value, list):
term = API.create_noPlatformRoot("VariableTerm")
term.hasInfixPosition = "RIGHT"
for var in OptimizationTargetDescription.hasDecisionVariable.items:
if var.hasName == constraint_new.Expression.RightTerm.Value[0]:
term.hasDecisionVariable = var
else:
term = API.create_noPlatformRoot("NumericalTerm")
term.hasInfixPosition = "RIGHT"
term.hasValue = constraint_new.Expression.RightTerm.Value
expression.hasTerm.append(term)
constraint.hasExpression = expression
# ------------------------------------------------------------------------------
OptimizationTargetDescription.hasDesignTarget.append(constraint)
for obj_new in interfaceObject.ObjectiveList:
objective = API.create_noPlatformRoot("Objective")
objective.hasName = obj_new.Name
objective.hasDescription=obj_new.Description
# ----------------------Added 18/10/2022 meeting Pavel--------------------------
if isinstance(obj_new.ObjectiveTerm.Value, str):
term = API.create_noPlatformRoot("TextualTerm")
term.hasValue = obj_new.ObjectiveTerm.Value
elif isinstance(obj_new.ObjectiveTerm.Value, list):
term = API.create_noPlatformRoot("VariableTerm")
for var in OptimizationTargetDescription.hasDecisionVariable.items:
if var.hasName== obj_new.ObjectiveTerm.Value[0]:
term.hasDecisionVariable=var
else:
term = API.create_noPlatformRoot("NumericalTerm")
term.hasValue = obj_new.ObjectiveTerm.Value
objective.hasTerm = term
objective.hasOption=obj_new.ObjectiveOption
# ------------------------------------------------------------------------------
OptimizationTargetDescription.hasDesignTarget.append(objective)
optimizationProblem.hasOptimizationTargetDescription = OptimizationTargetDescription
updateKBv6(KBPath, model)
return 1
def getAnalysisModel(analysisModelName,model):
"""
Function to fetch the complete analysis model using the name as identifier.
The function returns an interface block of the analysis model
:param string OptimizationProblemName: Name identifier of the optimization problem
:param object model: Metamodel instance model
:return object PerformanceModel: Interface object of the performance model (-1 = error)
"""
# define performance model interface object
aModel = AnalysisModel()
aModel.clean() # Flushing the pModel
found = 0
if len(model.includesProduct[0].hasAnalysisModel.items) != 0:
# match the assemblySystem by Name
for analysisModel in model.includesProduct[0].hasAnalysisModel.items:
found = 1
if analysisModel.hasName == analysisModelName:
aModel.Name = analysisModel.hasName
aModel.Description = analysisModel.hasDescription
aModel.Version = analysisModel.hasVersion
aModel.SubsetNumber = analysisModel.hasSubsetNumber
aModel.ModelFile = analysisModel.hasModelFile
#fetch the modelDescription
aModel.ModelDescription = analysisModel.hasModeldescription
#fetch the meshes
meshes=[]
for m in analysisModel.hasMesh.items:
meshes.append(m.hasMeshType)
aModel.Mesh = meshes
# -- fetch the analysis elements --
#fetch result metrics
results = []
aModel.AnalysisType = analysisModel.hasAnalysis.hasAnalysisType
for result in analysisModel.hasAnalysis.hasAnalysisResult.items:
metric = ResultMetric(Name=result.hasName,Description=result.hasDescription,Optimum=result.hasOptimum,Unit=result.hasUnit,Value=result.hasValue)
results.append(metric)
aModel.AnalysisResults = results
# fetch contacts
contacts = []
aModel.AnalysisType = analysisModel.hasAnalysis.hasAnalysisType
for result in analysisModel.hasAnalysis.hasContact.items:
contact = Contact(Name=result.hasName, Type=result.hasType,SearchDistance=result.hasSearchDistance)
areas = []
for area in result.hasApplicationArea:
areas.append([area.hasName,area.hasProductPart]) #TODO: check if productpart is present
contact.ApplicationAreas = areas
contacts.append(contact)
aModel.Contacts = contacts
# fetch boundaryConditions
boundaries = []
for result in analysisModel.hasAnalysis.hasBoundaryCondition.items:
bound = BoundaryCondition(Name=result.hasName,Type=result.hasType,Value=result.hasValue)
#direction
bound.Direction = [result.hasDirection.hasX,result.hasDirection.hasY,result.hasDirection.hasZ,result.hasDirection.hasRx,result.hasDirection.hasRy,result.hasDirection.hasRz]
#DOF
bound.DOF = [result.hasDOF.hasDOF1,result.hasDOF.hasDOF2,result.hasDOF.hasDOF3,result.hasDOF.hasDOF4,result.hasDOF.hasDOF5,result.hasDOF.hasDOF6]
#ApplicationAreas
areas = []
for area in result.hasApplicationarea:
areas.append([area.hasName, area.hasProductPart]) # TODO: check if productpart is present
bound.ApplicationAreas = areas
boundaries.append(bound)
aModel.BoundaryConditions = boundaries
# fetch ToolDescription
aModel.ToolDescription = [analysisModel.hasAnalysis.hasTooldescription.hasName,analysisModel.hasAnalysis.hasTooldescription.hasDescription,analysisModel.hasAnalysis.hasTooldescription.hasVersion]
# fetch response points
points = []
for p in analysisModel.hasAnalysis.hasResponsepoint:
points.append([p.hasResponseType,[p.position.hasX,p.position.hasY,p.position.hasZ]])
aModel.Responsepoints=points
# fetch decision variables
variables = []
for var in analysisModel.hasAnalysis.hasDecisionvariable:
variable = DecisionVariable(Name=var.hasName,Description=var.hasDescription,InitialValue=var.hasInitialValue,MinValue=var.hasMinValue,MaxValue=var.hasMaxValue,Optimum=var.hasOptimum,Resolution=var.hasResolution,Unit=var.hasUnit)
parList = []
try:
for par in var.hasParameter:
p = Parameter(Name=par.hasName,Description=par.hasDescription,Key=par.hasKey, GUID=par.hasGUID,Value=par.hasValue)
parList.append(p)
variable.parameterList = parList
except:
x=1
variables.append(variable)
aModel.DecisionVariables = variables
if found:
return aModel
else:
return -1
else:
return -1 # no Optimization Problem!
def updateAnalysisModel(interfaceObject,model,KBPath):
"""
Function to update the complete analysis model matching the name as identifier.
The analysis interface object is used to interface with the function.
The function returns whether or not the function is performed correctly
:param object Analysis Model: Interface object of the analysis model
:param object model: Metamodel instance model
:param string KBPath: Absolute path to the metamodel instance model
:return int Error: -1 = error, 1= function performed correcly
"""
found = 0
if len(model.includesProduct[0].hasAnalysisModel.items) != 0:
# match the assemblySystem by Name
for analysisModel in model.includesProduct[0].hasAnalysisModel.items:
found = 1
if analysisModel.hasName == interfaceObject.Name:
analysisModel.hasName = interfaceObject.Name
analysisModel.hasDescription = interfaceObject.Description
analysisModel.hasVersion = interfaceObject.Version
analysisModel.hasSubsetNumber = interfaceObject.SubsetNumber
analysisModel.hasModelFile = interfaceObject.ModelFile
# update the modelDescription
analysisModel.hasModeldescription.hasFormalism = interfaceObject.ModelDescription[0]
analysisModel.hasModeldescription.hasVersion = interfaceObject.ModelDescription[1]
analysisModel.hasModeldescription.hasCreatedBy = interfaceObject.ModelDescription[2]
#TODO: [DISCUSS] meshes can only be ADDED, as no identification is provided. Is this sufficient?
# -- fetch the analysis elements --
# update result metrics
for results_update in interfaceObject.AnalysisResults:
for result in analysisModel.hasAnalysis.hasAnalysisResult.items:
if result.hasName == results_update.Name:
result.hasName = results_update.Name
result.hasDescription = results_update.Description
result.hasOptimum = results_update.Optimum
result.hasUnit = results_update.Unit
result.hasValue = results_update.Value
#analysisModel.hasAnalysis.hasAnalysisType = results_update.Name
# update contacts
for results_update in interfaceObject.Contacts:
for result in analysisModel.hasAnalysis.hasContact.items:
result.hasName = results_update.Name
result.hasType = results_update.Type
result.hasSearchDistance = results_update.SearchDistance
for area_update in results_update.ApplicationAreas:
for area in result.hasApplicationArea:
if area_update[0]==area.hasName:
area.hasName = area_update[0]
area.hasProductPart = area_update[1]
# update boundaryConditions
for results_update in interfaceObject.BoundaryConditions:
for result in analysisModel.hasAnalysis.hasBoundaryCondition.items:
if result.hasName == results_update.Name:
result.hasName = results_update.Name
result.hasType = results_update.Type
result.hasValue = results_update.Value
# direction
result.hasDirection.hasX = results_update.Direction[0]
result.hasDirection.hasY = results_update.Direction[1]
result.hasDirection.hasZ = results_update.Direction[2]
result.hasDirection.hasRx = results_update.Direction[3]
result.hasDirection.hasRy = results_update.Direction[4]
result.hasDirection.hasRz = results_update.Direction[5]
# DOF
result.hasDOF.hasDOF1 = results_update.DOF[0]
result.hasDOF.hasDOF2 = results_update.DOF[1]
result.hasDOF.hasDOF3 = results_update.DOF[2]
result.hasDOF.hasDOF4 = results_update.DOF[3]
result.hasDOF.hasDOF5 = results_update.DOF[4]
result.hasDOF.hasDOF6 = results_update.DOF[5]
# ApplicationAreas
for area_update in results_update.ApplicationAreas:
for area in result.hasApplicationarea:
if area_update[0] == area.hasName:
area.hasName = area_update[0]
area.hasProductPart = area_update[1]
# fetch ToolDescription
try:
analysisModel.hasAnalysis.hasTooldescription.hasName = interfaceObject.ToolDescription[0]
analysisModel.hasAnalysis.hasTooldescription.hasDescription = interfaceObject.ToolDescription[1]
analysisModel.hasAnalysis.hasTooldescription.hasVersion = interfaceObject.ToolDescription[2]
except:
x=1
#TODO: [DISCUSS] Response points can only be ADDED, as no identification is provided. Is this sufficient?
# update decision variables
for results_update in interfaceObject.DecisionVariables:
for result in analysisModel.hasAnalysis.hasDecisionvariable.items:
if result.hasName == results_update.Name:
result.hasName = results_update.Name
result.hasDescription = results_update.Description
result.hasInitialValue = results_update.InitialValue
result.hasMinValue = results_update.MinValue
result.hasMaxValue = results_update.MaxValue
result.hasOptimum = results_update.Optimum
result.hasResolution = results_update.Resolution
result.hasUnit = results_update.Unit
for parameter_update in results_update.parameterList:
for parameter in result.hasParameter:
parameter.hasName = parameter_update.Name
parameter.hasDescription = parameter_update.Description
parameter.hasKey = parameter_update.Key
parameter.hasGUID = parameter_update.GUID
parameter.hasValue = parameter_update.Value
if found:
updateKBv6(KBPath, model)
return 1
else:
return -1
else:
return -1 # no Analysis!
def setAnalysisModel(interfaceObject,model,KBPath,API):
"""
Function to add a complete analysis model to an blank KB (no existing analysis part).
The analysis interface object is used to interface with the function.
The function returns whether or not the function is performed correctly
:param object AnalysisModel: Interface object of the analysis model
:param object model: Metamodel instance model
:param string KBPath: Absolute path to the metamodel instance model
:return int Error: -1 = error, 1= function performed correcly
"""
# generate a new ASG config and configure
analysisModel = API.create_noPlatformRoot("PerformanceModel")
analysisModel.hasName = interfaceObject.Name
analysisModel.hasDescription = interfaceObject.Description
analysisModel.hasVersion = interfaceObject.Version
analysisModel.hasSubsetNumber = interfaceObject.SubsetNumber
analysisModel.hasModelFile = interfaceObject.ModelFile
# add the modelDescription
Modeldescription = API.create_noPlatformRoot("ModelDescription")
Modeldescription.hasFormalism = interfaceObject.ModelDescription[0]
Modeldescription.hasVersion = interfaceObject.ModelDescription[1]
Modeldescription.hasCreatedBy = interfaceObject.ModelDescription[2]
analysisModel.hasModeldescription = Modeldescription
# add meshes
for mesh in interfaceObject.Mesh:
m = API.create_noPlatformRoot("Mesh")
m.hasMeshType = mesh
analysisModel.hasMesh.append(m)
# -- fetch the analysis elements --
Analysis = API.create_noPlatformRoot("Analysis")
# update result metrics
for results_update in interfaceObject.AnalysisResults:
result = API.create_noPlatformRoot("AnalysisResult")
result.hasName = results_update.Name
result.hasDescription = results_update.Description
result.hasOptimum = results_update.Optimum
result.hasUnit = results_update.Unit
result.hasValue = results_update.Value
Analysis.hasAnalysisResult.append(result)
# add contacts
for results_update in interfaceObject.Contacts:
result = API.create_noPlatformRoot("Contact")
result.hasName = results_update.Name
result.hasType = results_update.Type
result.hasSearchDistance = results_update.SearchDistance
for area_update in results_update.ApplicationAreas:
area = API.create_noPlatformRoot("ApplicationArea")
area.hasName = area_update[0]
area.hasProductPart = area_update[1]
result.hasApplicationArea.append(area)
Analysis.hasContact.append(result)
# update boundaryConditions
for results_update in interfaceObject.BoundaryConditions:
result = API.create_noPlatformRoot("BoundaryCondition")
result.hasName = results_update.Name
result.hasType = results_update.Type
result.hasValue = results_update.Value
# direction
direction = API.create_noPlatformRoot("Direction")
direction.hasX = results_update.Direction[0]
direction.hasY = results_update.Direction[1]
direction.hasZ = results_update.Direction[2]
direction.hasRx = results_update.Direction[3]
direction.hasRy = results_update.Direction[4]
direction.hasRz = results_update.Direction[5]
result.hasDirection = direction
# DOF
DOF = API.create_noPlatformRoot("DOF")
DOF.hasDOF1 = results_update.DOF[0]
DOF.hasDOF2 = results_update.DOF[1]
DOF.hasDOF3 = results_update.DOF[2]
DOF.hasDOF4 = results_update.DOF[3]
DOF.hasDOF5 = results_update.DOF[4]
DOF.hasDOF6 = results_update.DOF[5]
result.hasDOF = DOF
# ApplicationAreas
for area_update in results_update.ApplicationAreas:
area = API.create_noPlatformRoot("ApplicationArea")
area.hasName = area_update[0]
area.hasProductPart = area_update[1]
result.hasApplicationarea.append(area)
Analysis.hasBoundaryCondition.append(result)
# fetch ToolDescription
try:
Tooldescription = API.create_noPlatformRoot("Tooldescription")
Tooldescription.hasName = interfaceObject.ToolDescription[0]
Tooldescription.hasDescription = interfaceObject.ToolDescription[1]
Tooldescription.hasVersion = interfaceObject.ToolDescription[2]
analysisModel.hasToolDescription = Tooldescription
except:
x = 1
# add response points
for rp in interfaceObject.Responsepoints:
m = API.create_noPlatformRoot("ResponsePoint")
m.hasResponseType = rp
Analysis.hasResponsepoint.append(m)
# update decision variables
for results_update in interfaceObject.DecisionVariables:
result = API.create_noPlatformRoot("Decisionvariable")
result.hasName = results_update.Name
result.hasDescription = results_update.Description
result.hasInitialValue = results_update.InitialValue
result.hasMinValue = results_update.MinValue
result.hasMaxValue = results_update.MaxValue
result.hasOptimum = results_update.Optimum
result.hasResolution = results_update.Resolution
result.hasUnit = results_update.Unit
for parameter_update in results_update.parameterList:
parameter = API.create_noPlatformRoot("Parameter")
parameter.hasName = parameter_update.Name
parameter.hasDescription = parameter_update.Description
parameter.hasKey = parameter_update.Key
parameter.hasGUID = parameter_update.GUID
parameter.hasValue = parameter_update.Value
result.hasParameter.append(parameter)
Analysis.hasDecisionvariables.append(result)
analysisModel.hasAnalysis = Analysis
model.includesProduct.items[0].hasAnalysisModel.append(analysisModel)
updateKBv6(KBPath, model)
return 1 | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/layout/GridContainer.js | if(!dojo._hasResource["dojox.layout.GridContainer"]){
dojo._hasResource["dojox.layout.GridContainer"]=true;
dojo.provide("dojox.layout.GridContainer");
dojo.experimental("dojox.layout.GridContainer");
dojo.require("dijit._base.focus");
dojo.require("dijit._Templated");
dojo.require("dijit._Container");
dojo.require("dijit._Contained");
dojo.require("dojo.dnd.move");
dojo.require("dojox.layout.dnd.PlottedDnd");
dojo.declare("dojox.layout.GridContainer",[dijit._Widget,dijit._Templated,dijit._Container,dijit._Contained],{templateString:dojo.cache("dojox.layout","resources/GridContainer.html","<div id=\"${id}\" class=\"gridContainer\" dojoAttachPoint=\"containerNode\" tabIndex=\"0\" dojoAttachEvent=\"onkeypress:_selectFocus\">\n\t<table class=\"gridContainerTable\" dojoAttachPoint=\"gridContainerTable\" cellspacing=\"0\" cellpadding=\"0\">\n\t\t<tbody class=\"gridContainerBody\">\n\t\t\t<tr class=\"gridContainerRow\" dojoAttachPoint=\"gridNode\"></tr>\n\t\t</tbody>\n\t</table>\n</div>\n"),isContainer:true,isAutoOrganized:true,isRightFixed:false,isLeftFixed:false,hasResizableColumns:true,nbZones:1,opacity:1,colWidths:[],minColWidth:20,minChildWidth:150,acceptTypes:[],mode:"right",allowAutoScroll:false,timeDisplayPopup:1500,isOffset:false,offsetDrag:{},withHandles:false,handleClasses:[],_draggedWidget:null,_isResized:false,_activeGrip:null,_a11yOn:false,_canDisplayPopup:true,constructor:function(_1,_2){
_1=_1||{};
this.acceptTypes=_1.acceptTypes||["dijit.layout.ContentPane"];
this.offsetDrag=_1.offsetDrag||_1.dragOffset||{x:0,y:0};
},postCreate:function(){
this.inherited(arguments);
if(this.nbZones===0){
this.nbZones=1;
}
if(dojo.isIE&&dojo.marginBox(this.gridNode).height){
var _3=document.createTextNode(" ");
this.gridNode.appendChild(_3);
}
for(var i=0;i<this.nbZones;i++){
var _4=dojo.create("td",{id:this.id+"_dz"+i,className:"gridContainerZone",style:{width:this._getColWidth(i)+"%"}},this.gridNode);
}
},startup:function(){
this.grid=this._createGrid();
this.connect(dojo.global,"onresize","onResized");
this.connect(this,"onDndDrop","_placeGrips");
this.dropHandler=dojo.subscribe("/dnd/drop",this,"_placeGrips");
this._oldwidth=this.domNode.offsetWidth;
if(this.hasResizableColumns){
this._initPlaceGrips();
this._placeGrips();
}
if(this.usepref!==true){
this[(this.isAutoOrganized?"_organizeServices":"_organizeServicesManually")]();
}
for(var j=0;j<this.grid.length;j++){
var dz=this.grid[j];
dojo.forEach(dz.node.childNodes,function(_5){
dz.setItem(_5.id,{data:_5,type:[_5.getAttribute("dndType")]});
});
}
this.inherited(arguments);
},destroy:function(){
for(var i=0;i<this.handleDndStart;i++){
dojo.disconnect(this.handleDndStart[i]);
}
dojo.unsubscribe(this.dropHandler);
this.inherited(arguments);
},resize:function(){
dojo.forEach(this.getChildren(),function(_6){
_6.resize&&_6.resize();
});
},getZones:function(){
return dojo.query(".gridContainerZone",this.containerNode);
},getNewChildren:function(){
return dojo.query("> [widgetId]",this.containerNode).map(dijit.byNode);
},getChildren:function(){
var _7=dojo.query(".gridContainerZone > [widgetId]",this.containerNode).map(dijit.byNode);
return _7;
},onResized:function(){
if(this.hasResizableColumns){
this._placeGrips();
}
},_organizeServices:function(){
var _8=this.nbZones,_9=this.getNewChildren(),_a=_9.length,_b=Math.floor(_a/_8),_c=_a%_8,i=0;
for(var z=0;z<_8;z++){
for(var r=0;r<_b;r++){
this._insertService(z,i,_9[i],true);
i++;
}
if(_c>0){
try{
this._insertService(z,i,_9[i],true);
i++;
}
catch(e){
console.error("Unable to insert service in grid container",e,_9);
}
_c--;
}else{
if(_b===0){
break;
}
}
}
},_organizeServicesManually:function(){
var _d=this.getNewChildren();
for(var i=0;i<_d.length;i++){
try{
this._insertService(_d[i].column-1,i,_d[i],true);
}
catch(e){
console.error("Unable to insert service in grid container",e,_d[i]);
}
}
},_insertService:function(z,p,_e,_f){
if(_e===undefined){
return;
}
var _10=this.getZones()[z];
var _11=_10.childNodes.length;
if(p===undefined||p>_11){
p=_11;
}
var _12=dojo.place(_e.domNode,_10,p);
_e.domNode.setAttribute("tabIndex",0);
if(!_e.dragRestriction){
dojo.addClass(_e.domNode,"dojoDndItem");
}
if(!_e.domNode.getAttribute("dndType")){
_e.domNode.setAttribute("dndType",_e.declaredClass);
}
dojox.layout.dnd._setGcDndHandle(_e,this.withHandles,this.handleClasses,_f);
if(this.hasResizableColumns){
if(_e.onLoad){
this.connect(_e,"onLoad","_placeGrips");
}
if(_e.onExecError){
this.connect(_e,"onExecError","_placeGrips");
}
if(_e.onUnLoad){
this.connect(_e,"onUnLoad","_placeGrips");
}
}
this._placeGrips();
return _e.id;
},addService:function(_13,z,p){
return this.addChild(_13,z,p);
},addChild:function(_14,z,p){
_14.domNode.id=_14.id;
if(z<=0){
z=0;
}
var _15=z||0;
if(p<=0){
p=0;
}
var row=p||0;
var _16=this._insertService(_15,row,_14);
if(this._started&&!_14._started){
this.grid[z].setItem(_14.id,{data:_14.domNode,type:[_14.domNode.getAttribute("dndType")]});
_14.startup();
}
return _16;
},_createGrid:function(){
var _17=[];
var i=0;
while(i<this.nbZones){
var _18=this._createZone(this.getZones()[i]);
if(this.hasResizableColumns&&i!=(this.nbZones-1)){
this._createGrip(_18);
}
_17.push(_18);
i++;
}
if(this.hasResizableColumns){
this.handleDndStart=[];
for(var j=0;j<_17.length;j++){
var dz=_17[j];
var _19=this;
this.handleDndStart.push(dojo.connect(dz,"onDndStart",dz,function(_1a){
if(_1a==this){
_19.handleDndInsertNodes=[];
for(i=0;i<_19.grid.length;i++){
_19.handleDndInsertNodes.push(dojo.connect(_19.grid[i],"insertNodes",_19,function(){
_19._disconnectDnd();
}));
}
_19.handleDndInsertNodes.push(dojo.connect(dz,"onDndCancel",_19,_19._disconnectDnd));
_19.onResized();
}
}));
}
}
return _17;
},_disconnectDnd:function(){
dojo.forEach(this.handleDndInsertNodes,dojo.disconnect);
setTimeout(dojo.hitch(this,"onResized"),0);
},_createZone:function(_1b){
var dz=new dojox.layout.dnd.PlottedDnd(_1b.id,{accept:this.acceptTypes,withHandles:this.withHandles,handleClasses:this.handleClasses,singular:true,hideSource:true,opacity:this.opacity,dom:this.domNode,allowAutoScroll:this.allowAutoScroll,isOffset:this.isOffset,offsetDrag:this.offsetDrag});
this.connect(dz,"insertDashedZone","_placeGrips");
this.connect(dz,"deleteDashedZone","_placeGrips");
return dz;
},_createGrip:function(dz){
var _1c=document.createElement("div");
_1c.className="gridContainerGrip";
_1c.setAttribute("tabIndex","0");
var _1d=this;
this.onMouseOver=this.connect(_1c,"onmouseover",function(e){
var _1e=false;
for(var i=0;i<_1d.grid.length-1;i++){
if(dojo.hasClass(_1d.grid[i].grip,"gridContainerGripShow")){
_1e=true;
break;
}
}
if(!_1e){
dojo.removeClass(e.target,"gridContainerGrip");
dojo.addClass(e.target,"gridContainerGripShow");
}
});
this.connect(_1c,"onmouseout",function(e){
if(!_1d._isResized){
dojo.removeClass(e.target,"gridContainerGripShow");
dojo.addClass(e.target,"gridContainerGrip");
}
});
this.connect(_1c,"onmousedown",function(e){
_1d._a11yOn=false;
_1d._activeGrip=e.target;
_1d.resizeColumnOn(e);
});
this.domNode.appendChild(_1c);
dz.grip=_1c;
},_initPlaceGrips:function(){
var dcs=dojo.getComputedStyle(this.domNode);
this._x=parseInt(dcs.paddingLeft);
var _1f=parseInt(dcs.paddingTop);
if(dojo.isIE||dojo.getComputedStyle(this.gridContainerTable).borderCollapse!="collapse"){
var ex=dojo._getBorderExtents(this.gridContainerTable);
this._x+=ex.l;
_1f+=ex.t;
}
_1f+="px";
for(var z=0;z<this.grid.length;z++){
var _20=this.grid[z];
if(_20.grip){
var _21=_20.grip;
if(!dojo.isIE){
_20.pad=dojo._getPadBorderExtents(_20.node).w;
}
_21.style.top=_1f;
}
}
},_placeGrips:function(){
var _22;
var _23=this._x;
dojo.forEach(this.grid,function(_24){
if(_24.grip){
if(_22===undefined){
if(this.allowAutoScroll){
_22=this.gridNode.scrollHeight;
}else{
_22=dojo.contentBox(this.gridNode).h;
}
}
var _25=_24.grip;
_23+=dojo[(dojo.isIE?"marginBox":"contentBox")](_24.node).w+(dojo.isIE?0:_24.pad);
dojo.style(_25,{left:_23+"px",height:_22+"px"});
}
},this);
},_getZoneByIndex:function(n){
return this.grid[(n>=0&&n<this.grid.length?n:0)];
},getIndexZone:function(_26){
for(var z=0;z<this.grid.length;z++){
if(this.grid[z].node.id==_26.id){
return z;
}
}
return -1;
},resizeColumnOn:function(e){
var k=dojo.keys;
var i;
if(!(this._a11yOn&&e.keyCode!=k.LEFT_ARROW&&e.keyCode!=k.RIGHT_ARROW)){
e.preventDefault();
dojo.body().style.cursor="ew-resize";
this._isResized=true;
this.initX=e.pageX;
var _27=[];
for(i=0;i<this.grid.length;i++){
_27[i]=dojo.contentBox(this.grid[i].node).w;
}
this.oldTabSize=_27;
for(i=0;i<this.grid.length;i++){
if(this._activeGrip==this.grid[i].grip){
this.currentColumn=this.grid[i].node;
this.currentColumnWidth=_27[i];
this.nextColumn=this.currentColumn.nextSibling;
this.nextColumnWidth=_27[i+1];
}
this.grid[i].node.style.width=_27[i]+"px";
}
var _28=function(_29,_2a){
var _2b=0;
var _2c=0;
dojo.forEach(_29,function(_2d){
if(_2d.nodeType==1){
var _2e=dojo.getComputedStyle(_2d);
var _2f=(dojo.isIE?_2a:parseInt(_2e.minWidth));
_2c=_2f+parseInt(_2e.marginLeft)+parseInt(_2e.marginRight);
if(_2b<_2c){
_2b=_2c;
}
}
});
return _2b;
};
var _30=_28(this.currentColumn.childNodes,this.minChildWidth);
var _31=_28(this.nextColumn.childNodes,this.minChildWidth);
var _32=Math.round((dojo.marginBox(this.gridContainerTable).w*this.minColWidth)/100);
this.currentMinCol=_30;
this.nextMinCol=_31;
if(_32>this.currentMinCol){
this.currentMinCol=_32;
}
if(_32>this.nextMinCol){
this.nextMinCol=_32;
}
if(this._a11yOn){
this.connectResizeColumnMove=this.connect(dojo.doc,"onkeypress","resizeColumnMove");
}else{
this.connectResizeColumnMove=this.connect(dojo.doc,"onmousemove","resizeColumnMove");
this.connectResizeColumnOff=this.connect(document,"onmouseup","resizeColumnOff");
}
}
},resizeColumnMove:function(e){
var d=0;
if(this._a11yOn){
var k=dojo.keys;
switch(e.keyCode){
case k.LEFT_ARROW:
d=-10;
break;
case k.RIGHT_ARROW:
d=10;
break;
}
}else{
e.preventDefault();
d=e.pageX-this.initX;
}
if(d==0){
return;
}
if(!(this.currentColumnWidth+d<this.currentMinCol||this.nextColumnWidth-d<this.nextMinCol)){
this.currentColumnWidth+=d;
this.nextColumnWidth-=d;
this.initX=e.pageX;
this.currentColumn.style["width"]=this.currentColumnWidth+"px";
this.nextColumn.style["width"]=this.nextColumnWidth+"px";
this._activeGrip.style.left=parseInt(this._activeGrip.style.left)+d+"px";
this._placeGrips();
}
if(this._a11yOn){
this.resizeColumnOff(e);
}
},resizeColumnOff:function(e){
dojo.body().style.cursor="default";
if(this._a11yOn){
this.disconnect(this.connectResizeColumnMove);
this._a11yOn=false;
}else{
this.disconnect(this.connectResizeColumnMove);
this.disconnect(this.connectResizeColumnOff);
}
var _33=[];
var _34=[];
var _35=this.gridContainerTable.clientWidth;
var i;
for(i=0;i<this.grid.length;i++){
var _36=dojo.contentBox(this.grid[i].node);
if(dojo.isIE){
_33[i]=dojo.marginBox(this.grid[i].node).w;
_34[i]=_36.w;
}else{
_33[i]=_36.w;
_34=_33;
}
}
var _37=false;
for(i=0;i<_34.length;i++){
if(_34[i]!=this.oldTabSize[i]){
_37=true;
break;
}
}
if(_37){
var mul=dojo.isIE?100:10000;
for(i=0;i<this.grid.length;i++){
this.grid[i].node.style.width=Math.round((100*mul*_33[i])/_35)/mul+"%";
}
this._placeGrips();
}
if(this._activeGrip){
dojo.removeClass(this._activeGrip,"gridContainerGripShow");
dojo.addClass(this._activeGrip,"gridContainerGrip");
}
this._isResized=false;
},setColumns:function(_38){
var _39;
if(_38>0){
var _3a=this.grid.length-_38;
if(_3a>0){
var _3b=[];
var _3c,end,z,_3d,j;
if(this.mode=="right"){
end=(this.isLeftFixed&&this.grid.length>0)?1:0;
_3c=this.grid.length-(this.isRightFixed?2:1);
for(z=_3c;z>=end;z--){
_3d=0;
_39=this.grid[z].node;
for(j=0;j<_39.childNodes.length;j++){
if(_39.childNodes[j].nodeType==1&&!(_39.childNodes[j].id=="")){
_3d++;
break;
}
}
if(_3d==0){
_3b[_3b.length]=z;
}
if(_3b.length>=_3a){
this._deleteColumn(_3b);
break;
}
}
if(_3b.length<_3a){
console.error("Move boxes in first columns, in all tabs before changing the organization of the page");
}
}else{
_3c=(this.isLeftFixed&&this.grid.length>0)?1:0;
end=this.grid.length;
if(this.isRightFixed){
end--;
}
for(z=_3c;z<end;z++){
_3d=0;
_39=this.grid[z].node;
for(j=0;j<_39.childNodes.length;j++){
if(_39.childNodes[j].nodeType==1&&!(_39.childNodes[j].id=="")){
_3d++;
break;
}
}
if(_3d==0){
_3b[_3b.length]=z;
}
if(_3b.length>=_3a){
this._deleteColumn(_3b);
break;
}
}
if(_3b.length<_3a){
console.warn("Move boxes in last columns, in all tabs before changing the organization of the page");
}
}
}else{
if(_3a<0){
this._addColumn(Math.abs(_3a));
}
}
this._initPlaceGrips();
this._placeGrips();
}
},_addColumn:function(_3e){
var _3f;
if(this.hasResizableColumns&&!this.isRightFixed&&this.mode=="right"){
_3f=this.grid[this.grid.length-1];
this._createGrip(_3f);
}
for(var i=0;i<_3e;i++){
_3f=dojo.doc.createElement("td");
dojo.addClass(_3f,"gridContainerZone");
_3f.id=this.id+"_dz"+this.nbZones;
var dz;
if(this.mode=="right"){
if(this.isRightFixed){
this.grid[this.grid.length-1].node.parentNode.insertBefore(_3f,this.grid[this.grid.length-1].node);
dz=this._createZone(_3f);
this.grid.splice(this.grid.length-1,0,dz);
}else{
var _40=this.gridNode.appendChild(_3f);
dz=this._createZone(_3f);
this.grid.push(dz);
}
}else{
if(this.isLeftFixed){
(this.grid.length==1)?this.grid[0].node.parentNode.appendChild(_3f,this.grid[0].node):this.grid[1].node.parentNode.insertBefore(_3f,this.grid[1].node);
dz=this._createZone(_3f);
this.grid.splice(1,0,dz);
}else{
this.grid[this.grid.length-this.nbZones].node.parentNode.insertBefore(_3f,this.grid[this.grid.length-this.nbZones].node);
dz=this._createZone(_3f);
this.grid.splice(this.grid.length-this.nbZones,0,dz);
}
}
if(this.hasResizableColumns){
var _41=this;
var _42=dojo.connect(dz,"onDndStart",dz,function(_43){
if(_43==this){
_41.handleDndInsertNodes=[];
for(var o=0;o<_41.grid.length;o++){
_41.handleDndInsertNodes.push(dojo.connect(_41.grid[o],"insertNodes",_41,function(){
_41._disconnectDnd();
}));
}
_41.handleDndInsertNodes.push(dojo.connect(dz,"onDndCancel",_41,_41._disconnectDnd));
_41.onResized();
}
});
if(this.mode=="right"){
if(this.isRightFixed){
this.handleDndStart.splice(this.handleDndStart.length-1,0,_42);
}else{
this.handleDndStart.push(_42);
}
}else{
if(this.isLeftFixed){
this.handleDndStart.splice(1,0,_42);
}else{
this.handleDndStart.splice(this.handleDndStart.length-this.nbZones,0,_42);
}
}
this._createGrip(dz);
}
this.nbZones++;
}
this._updateColumnsWidth();
},_deleteColumn:function(_44){
var _45,_46,_47;
_47=0;
for(var i=0;i<_44.length;i++){
var idx=_44[i];
if(this.mode=="right"){
_45=this.grid[idx];
}else{
_45=this.grid[idx-_47];
}
for(var j=0;j<_45.node.childNodes.length;j++){
if(_45.node.childNodes[j].nodeType!=1){
continue;
}
_46=dijit.byId(_45.node.childNodes[j].id);
for(var x=0;x<this.getChildren().length;x++){
if(this.getChildren()[x]===_46){
this.getChildren().splice(x,1);
break;
}
}
}
_45.node.parentNode.removeChild(_45.node);
if(this.mode=="right"){
if(this.hasResizableColumns){
dojo.disconnect(this.handleDndStart[idx]);
}
this.grid.splice(idx,1);
}else{
if(this.hasResizableColumns){
dojo.disconnect(this.handleDndStart[idx-_47]);
}
this.grid.splice(idx-_47,1);
}
this.nbZones--;
_47++;
if(_45.grip){
this.domNode.removeChild(_45.grip);
}
}
this._updateColumnsWidth();
},_getColWidth:function(idx){
if(idx<this.colWidths.length){
return this.colWidths[idx];
}
var _48=100;
dojo.forEach(this.colWidths,function(_49){
_48-=_49;
});
return _48/(this.nbZones-this.colWidths.length);
},_updateColumnsWidth:function(){
var _4a;
for(var z=0;z<this.grid.length;z++){
this.grid[z].node.style.width=this._getColWidth(z)+"%";
}
},_selectFocus:function(_4b){
var e=_4b.keyCode;
var _4c=null;
var _4d=dijit.getFocus();
var _4e=_4d.node;
var k=dojo.keys;
var i,_4f,_50,r,z,_51;
var _52=(e==k.UP_ARROW||e==k.LEFT_ARROW)?"lastChild":"firstChild";
var pos=(e==k.UP_ARROW||e==k.LEFT_ARROW)?"previousSibling":"nextSibling";
if(_4e==this.containerNode){
switch(e){
case k.DOWN_ARROW:
case k.RIGHT_ARROW:
for(i=0;i<this.gridNode.childNodes.length;i++){
_4c=this.gridNode.childNodes[i].firstChild;
_4f=false;
while(!_4f){
if(_4c!=null){
if(_4c.style.display!=="none"){
dijit.focus(_4c);
dojo.stopEvent(_4b);
_4f=true;
}else{
_4c=_4c[pos];
}
}else{
break;
}
}
if(_4f){
break;
}
}
break;
case k.UP_ARROW:
case k.LEFT_ARROW:
for(i=this.gridNode.childNodes.length-1;i>=0;i--){
_4c=this.gridNode.childNodes[i].lastChild;
_4f=false;
while(!_4f){
if(_4c!=null){
if(_4c.style.display!=="none"){
dijit.focus(_4c);
dojo.stopEvent(_4b);
_4f=true;
}else{
_4c=_4c[pos];
}
}else{
break;
}
}
if(_4f){
break;
}
}
break;
}
}else{
if(_4e.parentNode.parentNode==this.gridNode){
switch(e){
case k.UP_ARROW:
case k.DOWN_ARROW:
dojo.stopEvent(_4b);
var _53=0;
dojo.forEach(_4e.parentNode.childNodes,function(_54){
if(_54.style.display!=="none"){
_53++;
}
});
if(_53==1){
return;
}
_4f=false;
_4c=_4e[pos];
while(!_4f){
if(_4c==null){
_4c=_4e.parentNode[_52];
if(_4c.style.display!=="none"){
_4f=true;
}else{
_4c=_4c[pos];
}
}else{
if(_4c.style.display!=="none"){
_4f=true;
}else{
_4c=_4c[pos];
}
}
}
if(_4b.shiftKey){
if(dijit.byNode(_4e).dragRestriction){
return;
}
_51=_4e.getAttribute("dndtype");
_50=false;
for(i=0;i<this.acceptTypes.length;i++){
if(_51==this.acceptTypes[i]){
_50=true;
break;
}
}
if(_50){
var _55=_4e.parentNode;
var _56=_55.firstChild;
var _57=_55.lastChild;
while(_56.style.display=="none"||_57.style.display=="none"){
if(_56.style.display=="none"){
_56=_56.nextSibling;
}
if(_57.style.display=="none"){
_57=_57.previousSibling;
}
}
if(e==k.UP_ARROW){
r=_55.removeChild(_4e);
if(r==_56){
_55.appendChild(r);
}else{
_55.insertBefore(r,_4c);
}
r.setAttribute("tabIndex","0");
dijit.focus(r);
}else{
if(_4e==_57){
r=_55.removeChild(_4e);
_55.insertBefore(r,_4c);
r.setAttribute("tabIndex","0");
dijit.focus(r);
}else{
r=_55.removeChild(_4c);
_55.insertBefore(r,_4e);
_4e.setAttribute("tabIndex","0");
dijit.focus(_4e);
}
}
}else{
this._displayPopup();
}
}else{
dijit.focus(_4c);
}
break;
case k.RIGHT_ARROW:
case k.LEFT_ARROW:
dojo.stopEvent(_4b);
if(_4b.shiftKey){
if(dijit.byNode(_4e).dragRestriction){
return;
}
z=0;
if(_4e.parentNode[pos]==null){
if(e==k.LEFT_ARROW){
z=this.gridNode.childNodes.length-1;
}
}else{
if(_4e.parentNode[pos].nodeType==3){
z=this.gridNode.childNodes.length-2;
}else{
for(i=0;i<this.gridNode.childNodes.length;i++){
if(_4e.parentNode[pos]==this.gridNode.childNodes[i]){
break;
}
z++;
}
}
}
_51=_4e.getAttribute("dndtype");
_50=false;
for(i=0;i<this.acceptTypes.length;i++){
if(_51==this.acceptTypes[i]){
_50=true;
break;
}
}
if(_50){
var _58=_4e.parentNode;
var _59=dijit.byNode(_4e);
r=_58.removeChild(_4e);
var _5a=(e==k.RIGHT_ARROW?0:this.gridNode.childNodes[z].length);
this.addService(_59,z,_5a);
r.setAttribute("tabIndex","0");
dijit.focus(r);
this._placeGrips();
}else{
this._displayPopup();
}
}else{
var _5b=_4e.parentNode;
while(_4c===null){
if(_5b[pos]!==null&&_5b[pos].nodeType!==3){
_5b=_5b[pos];
}else{
if(pos==="previousSibling"){
_5b=_5b.parentNode.childNodes[_5b.parentNode.childNodes.length-1];
}else{
_5b=_5b.parentNode.childNodes[0];
}
}
_4f=false;
var _5c=_5b[_52];
while(!_4f){
if(_5c!=null){
if(_5c.style.display!=="none"){
_4c=_5c;
_4f=true;
}else{
_5c=_5c[pos];
}
}else{
break;
}
}
}
dijit.focus(_4c);
}
break;
}
}else{
if(dojo.hasClass(_4e,"gridContainerGrip")||dojo.hasClass(_4e,"gridContainerGripShow")){
this._activeGrip=_4b.target;
this._a11yOn=true;
this.resizeColumnOn(_4b);
}
}
}
},_displayPopup:function(){
if(this._canDisplayPopup){
var _5d=dojo.doc.createElement("div");
dojo.addClass(_5d,"gridContainerPopup");
_5d.innerHTML="this widget type is not accepted to be moved!";
var _5e=this.containerNode.appendChild(_5d);
this._canDisplayPopup=false;
setTimeout(dojo.hitch(this,function(){
this.containerNode.removeChild(_5e);
dojo.destroy(_5e);
this._canDisplayPopup=true;
}),this.timeDisplayPopup);
}
}});
dojo.extend(dijit._Widget,{dragRestriction:false,column:"1",group:""});
} | PypiClean |
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/categories/admin/views/categories.py | from django.contrib import messages
from django.shortcuts import redirect
from django.utils.translation import gettext_lazy as _
from ... import THREADS_ROOT_NAME
from ....acl.cache import clear_acl_cache
from ....admin.views import generic
from ....threads.threadtypes import trees_map
from ...models import Category, RoleCategoryACL
from ..forms import CategoryFormFactory, DeleteFormFactory
class CategoryAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:categories:index"
model = Category
templates_dir = "misago/admin/categories"
message_404 = _("Requested category does not exist.")
def get_target(self, kwargs):
target = super().get_target(kwargs)
threads_tree_id = trees_map.get_tree_id_for_root(THREADS_ROOT_NAME)
target_is_special = bool(target.special_role)
target_not_in_categories_tree = target.tree_id != threads_tree_id
if target.pk and (target_is_special or target_not_in_categories_tree):
raise Category.DoesNotExist()
else:
return target
class CategoriesList(CategoryAdmin, generic.ListView):
def get_queryset(self):
return Category.objects.all_categories()
def process_context(self, request, context):
context["items"] = [f for f in context["items"]]
children_lists = {}
for item in context["items"]:
item.level_range = range(item.level - 1)
item.first = False
item.last = False
children_lists.setdefault(item.parent_id, []).append(item)
for level_items in children_lists.values():
level_items[0].first = True
level_items[-1].last = True
return context
class CategoryFormMixin:
def get_form_class(self, request, target):
return CategoryFormFactory(target)
def handle_form(self, form, request, target):
if form.instance.pk:
if form.instance.parent_id != form.cleaned_data["new_parent"].pk:
form.instance.move_to(
form.cleaned_data["new_parent"], position="last-child"
)
form.instance.save()
if form.instance.parent_id != form.cleaned_data["new_parent"].pk:
Category.objects.clear_cache()
else:
form.instance.insert_at(
form.cleaned_data["new_parent"], position="last-child", save=True
)
Category.objects.clear_cache()
if form.cleaned_data.get("copy_permissions"):
form.instance.category_role_set.all().delete()
copy_from = form.cleaned_data["copy_permissions"]
copied_acls = []
for acl in copy_from.category_role_set.all():
copied_acls.append(
RoleCategoryACL(
role_id=acl.role_id,
category=form.instance,
category_role_id=acl.category_role_id,
)
)
if copied_acls:
RoleCategoryACL.objects.bulk_create(copied_acls)
clear_acl_cache()
messages.success(request, self.message_submit % {"name": target.name})
class NewCategory(CategoryFormMixin, CategoryAdmin, generic.ModelFormView):
message_submit = _('New category "%(name)s" has been saved.')
class EditCategory(CategoryFormMixin, CategoryAdmin, generic.ModelFormView):
message_submit = _('Category "%(name)s" has been edited.')
class DeleteCategory(CategoryAdmin, generic.ModelFormView):
message_submit = _('Category "%(name)s" has been deleted.')
template_name = "delete.html"
def get_form_class(self, request, target):
return DeleteFormFactory(target)
def handle_form(self, form, request, target):
move_children_to = form.cleaned_data.get("move_children_to")
move_threads_to = form.cleaned_data.get("move_threads_to")
if move_children_to:
for child in target.get_children():
# refresh child and new parent
move_children_to = Category.objects.get(pk=move_children_to.pk)
child = Category.objects.get(pk=child.pk)
child.move_to(move_children_to, "last-child")
if move_threads_to and child.pk == move_threads_to.pk:
move_threads_to = child
else:
for child in target.get_descendants().order_by("-lft"):
child.delete_content()
child.delete()
if move_threads_to:
target.move_content(move_threads_to)
move_threads_to.synchronize()
move_threads_to.save()
else:
target.delete_content()
# refresh instance
instance = Category.objects.get(pk=form.instance.pk)
instance.delete()
messages.success(request, self.message_submit % {"name": target.name})
return redirect(self.root_link)
class MoveDownCategory(CategoryAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = target.get_next_sibling()
except Category.DoesNotExist:
other_target = None
if other_target:
Category.objects.move_node(target, other_target, "right")
Category.objects.clear_cache()
message = _('Category "%(name)s" has been moved below "%(other)s".')
targets_names = {"name": target.name, "other": other_target.name}
messages.success(request, message % targets_names)
class MoveUpCategory(CategoryAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = target.get_previous_sibling()
except Category.DoesNotExist:
other_target = None
if other_target:
Category.objects.move_node(target, other_target, "left")
Category.objects.clear_cache()
message = _('Category "%(name)s" has been moved above "%(other)s".')
targets_names = {"name": target.name, "other": other_target.name}
messages.success(request, message % targets_names) | PypiClean |
/EmBCI-0.1.2.tar.gz/EmBCI-0.1.2/embci/utils/ili9341_api.py | # built-ins
import os
import time
import threading
# requirements.txt: necessary: pillow
# requirements.txt: data-processing: numpy
# requirements.txt: drivers: spidev, gpio4
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import spidev
from gpio4 import SysfsGPIO
__dir__ = os.path.dirname(os.path.abspath(__file__))
__all__ = ['rgb888to565', 'rgb888to565_pro', 'rgb565to888', 'rgb565to888_pro',
'rgb24to565', 'rgb565to24', 'ILI9341_API']
# ILI9341 registers
ILI9341_NOP = 0x00
ILI9341_SWRESET = 0x01
ILI9341_RDDID = 0x04
ILI9341_RDDST = 0x09
ILI9341_SLPIN = 0x10
ILI9341_SLPOUT = 0x11
ILI9341_PTLON = 0x12
ILI9341_NORON = 0x13
ILI9341_RDMODE = 0x0A
ILI9341_RDMADCTL = 0x0B
ILI9341_RDPIXFMT = 0x0C
ILI9341_RDIMGFMT = 0x0A
ILI9341_RDSELFDIAG = 0x0F
ILI9341_INVOFF = 0x20
ILI9341_INVON = 0x21
ILI9341_GAMMASET = 0x26
ILI9341_DISPOFF = 0x28
ILI9341_DISPON = 0x29
ILI9341_CASET = 0x2A
ILI9341_PASET = 0x2B
ILI9341_RAMWR = 0x2C
ILI9341_RAMRD = 0x2E
ILI9341_PTLAR = 0x30
ILI9341_MADCTL = 0x36
ILI9341_PIXFMT = 0x3A
ILI9341_FRMCTR1 = 0xB1
ILI9341_FRMCTR2 = 0xB2
ILI9341_FRMCTR3 = 0xB3
ILI9341_INVCTR = 0xB4
ILI9341_DFUNCTR = 0xB6
ILI9341_PWCTR1 = 0xC0
ILI9341_PWCTR2 = 0xC1
ILI9341_PWCTR3 = 0xC2
ILI9341_PWCTR4 = 0xC3
ILI9341_PWCTR5 = 0xC4
ILI9341_VMCTR1 = 0xC5
ILI9341_VMCTR2 = 0xC7
ILI9341_RDID1 = 0xDA
ILI9341_RDID2 = 0xDB
ILI9341_RDID3 = 0xDC
ILI9341_RDID4 = 0xDD
ILI9341_GMCTRP1 = 0xE0
ILI9341_GMCTRN1 = 0xE1
ILI9341_PWCTR6 = 0xFC
# colors R G B
ILI9341_BLACK = [0x00, 0x00] # 0 0 0
ILI9341_BLUE = [0x00, 0x1F] # 0 0 255
ILI9341_GREEN = [0x07, 0xE0] # 0 255 0
ILI9341_CYAN = [0x07, 0xFF] # 0 255 255
ILI9341_RED = [0xF8, 0x00] # 255 0 0
ILI9341_MAGENTA = [0xF8, 0x1F] # 255 0 255
ILI9341_YELLOW = [0xFF, 0xE0] # 255 255 0
ILI9341_WHITE = [0xFF, 0xFF] # 255 255 255
ILI9341_PURPLE = [0x41, 0x2B] # 128 0 128
ILI9341_ORANGE = [0xFD, 0xC0] # 255 160 10
ILI9341_GREY = [0x84, 0x10] # 128 128 128
# rotation definition
ILI9341_MADCTL_MY = 0x80
ILI9341_MADCTL_MX = 0x40
ILI9341_MADCTL_MV = 0x20
ILI9341_MADCTL_ML = 0x10
ILI9341_MADCTL_RGB = 0x00
ILI9341_MADCTL_BGR = 0x08
ILI9341_MADCTL_MH = 0x04
def rgb888to565(r, g, b):
'''input r, g, b and output [chigh, clow]'''
c = ((r & 0b11111000) << 8) | ((g & 0b11111100) << 3) | (b >> 3)
return [c >> 8, c & 0xff]
def rgb888to565_pro(r, g, b):
'''takes about 1.5 time than normal rgb888to565, but more precise'''
c = ((r * 249 + 1014) & 0xf800 |
((g * 253 + 505) >> 5) & 0xffe0 |
(b * 249 + 1014) >> 11)
return [c >> 8, c & 0xff]
def rgb565to888(ch, cl):
'''input [chigh, clow] and output (r, g, b)'''
r = ch & 0b11111000 | ((ch >> 3) & 0b00111)
g = (ch & 0b111) << 5 | (cl & 0b11100000) >> 3 | (cl >> 5) & 0b011
b = (cl & 0b11111) << 3 | cl & 0b00111
return (r, g, b)
def rgb565to888_pro(ch, cl):
'''takes about 1.4 times than normal rgb565to888, but more precise'''
r = ((ch >> 3) * 527 + 23) >> 6
g = (((ch & 0b00000111) << 3 | cl >> 5) * 259 + 33) >> 6
b = ((cl & 0b00011111) * 527 + 23) >> 6
return (r, g, b)
def rgb24to565(v):
'''input v between 0x000000 - 0xffffff and output [chigh, clow]'''
return rgb888to565(v >> 16, v >> 8 & 0xff, v & 0xff)
def rgb565to24(ch, cl):
'''input [chigh, clow] and output v between 0x000000 - 0xffffff'''
r, g, b = rgb565to888(ch, cl)
return r << 16 | g << 8 | b
class ILI9341_API(spidev.SpiDev):
_lock = threading.Lock()
def __init__(self, dc, rst=None, width=320, height=240, *a, **k):
'''
Create an interface of ILI9341 SPI Screen by establishing SPI
connection through `/dev/spidev*.*`. GPIO number of D/C pin must be
provided(more details about Data/Command pin in ILI9341 datasheet),
as well as the spidev number(bus and cs pin). Reset pin is optional.
Parameters
----------
dev : tuple
(bus, cs) indicating device `/dev/spidev${bus}.${cs}`
dc : int
Data/Command select pin number
rst : int
Reset pin number
width, height : int
screen width and height in pixel, default 320 x 240
Notes
-----
Basic principle of this API:
1. maintain a framebuffer (self.fb)
2. draw on framebuffer (self.draw_*)
3. render framebuffer to screen (self.flush)
'''
self._dc = SysfsGPIO(dc)
if rst is None:
self._rst = None
else:
self._rst = SysfsGPIO(rst)
self._opened = False
self.width = width
self.height = height
self.fb = np.zeros((self.height, self.width, 2), np.uint8)
self.font = None
self.size = 16
def open(self, dev, max_speed_hz=25000000):
assert not self._opened, 'already used spidev{}.{}'.format(*self._dev)
super(ILI9341_API, self).open(dev[0], dev[1])
self.max_speed_hz = max_speed_hz
self.mode = 0
self._dev = dev
self._dc.export = True
self._dc.direction = 'out'
if self._rst is not None:
self._rst.export = True
self._rst.direction = 'out'
self._opened = True
def setfont(self, filename, size=None):
size = size or self.size
if self.font is None or self.font.path != filename:
try:
font = ImageFont.truetype(filename, size * 2)
self.font = font
except IOError:
pass
self.size = size
def setsize(self, size):
if self.font is None:
print('[ILI9341 API] font not set yet!')
return
if self.size != size:
self.font = ImageFont.truetype(self.font.path, size * 2)
self.size = size
def _command(self, data):
'''Write an array of bytes to screen as command data'''
self._dc.value = 0
self.writebytes([data])
def _data(self, data, chunk=4096):
'''Write an array of bytes to screen as display data'''
if len(data):
self._dc.value = 1
for s in range(0, len(data), chunk):
self.xfer2(data[s:(s + chunk)])
def _set_window(self, x1, y1, x2, y2):
'''
Set the pixel address window for proceeding drawing commands.
x1 and x2 should define the minimum and maximum x pixel bounds.
y1 and y2 should define the minimum and maximum y pixel bounds.
If no parameters are specified the default will be to update the
entire display from (0, 0) to (239, 319)
'''
self._command(0x2A); self._data([x1 >> 8, x1, x2 >> 8, x2])
self._command(0x2B); self._data([y1 >> 8, y1, y2 >> 8, y2])
self._command(0x2C) # write to RAM
def flush(self, x1, y1, x2, y2):
'''write data in framebuffer to screen'''
with self._lock:
self._set_window(x1, y1, x2, y2)
self._data(self.fb[y1:y2+1, x1:x2+1].flatten().tolist())
def reset(self):
if self._rst is None:
return False
self._rst.value = 1
time.sleep(0.02)
self._rst.value = 0
time.sleep(0.02)
self._rst.value = 1
time.sleep(0.15)
return True
def start(self, *a, **k):
'''
Initialize the display. This should be called at least once before
using other draw_* methods.
'''
assert self._opened, 'you need to open a spi device first'
if self.reset():
time.sleep(0.5)
self._command(0xEF); self._data([0x03, 0x80, 0x02])
self._command(0xCF); self._data([0x00, 0xC1, 0x30])
self._command(0xED); self._data([0x64, 0x03, 0x12, 0x81])
self._command(0xE8); self._data([0x85, 0x00, 0x78])
self._command(0xCB); self._data([0x39, 0x2C, 0x00, 0x34, 0x02])
self._command(0xF7); self._data([0x20])
self._command(0xEA); self._data([0x00, 0x00])
self._command(0xC0); self._data([0x23])
self._command(0xC1); self._data([0x10])
self._command(0xC5); self._data([0x3e, 0x28])
self._command(0xC7); self._data([0x86])
self._command(0x36); self._data([0x58])
self._command(0x3A); self._data([0x55])
self._command(0xB1); self._data([0x00, 0x18])
self._command(0xB6); self._data([0x08, 0x82, 0x27])
self._command(0xF2); self._data([0x00])
self._command(0x26); self._data([0x01])
self._command(0xE0); self._data([0x0F, 0x31, 0x2B, 0x0C, 0x0E,
0x08, 0x4E, 0xF1, 0x37, 0x07,
0x10, 0x03, 0x0E, 0x09, 0x00])
self._command(0xE1); self._data([0x00, 0x0E, 0x14, 0x03, 0x11,
0x07, 0x31, 0xC1, 0x48, 0x08,
0x0F, 0x0C, 0x31, 0x36, 0x0F])
self._command(0x11) # Exit Sleep
time.sleep(0.12)
self._command(0x29) # Display on
time.sleep(0.2)
self.set_rotation(3) # Set screen direction
self.clear()
def close(self, *a, **k):
if not self._opened:
return
self.clear()
super(ILI9341_API, self).close()
self._dc.value = 0
self._dc.export = False
if self._rst is not None:
self._rst.value = 0
self._rst.export = False
self._opened = False
def draw_point(self, x, y, c, *a, **k):
self.fb[y, x] = c
self.flush(x, y, x, y)
def draw_line(self, x1, y1, x2, y2, c, *a, **k):
# draw vertical or horizontal line
if (x1 == x2) or (y1 == y2):
self.draw_rectf(x1, y1, x2, y2, c)
return
# draw a line cross point(x1, y1) and point(x2, y2)
# 1. get line function `y = k * x + b`
k, b = np.polyfit([x1, x2], [y1, y2], 1)
if abs(y2 - y1) > abs(x2 - x1):
# 2. use y as index to get smoother line
_y = np.arange(min(y1, y2), max(y1, y2)).astype(np.uint16)
_x = np.round((_y - b) / k).astype(np.uint16)
else:
# 2. use x as index to get smoother line
_x = np.arange(min(x1, x2), max(x1, x2)).astype(np.uint16)
_y = np.round(k * _x + b).astype(np.uint16)
# 3. plot _x, _y on framebuffer
self.fb[_y, _x] = c
self.flush(_x.min(), _y.min(), _x.max(), _y.max())
def draw_rect(self, x1, y1, x2, y2, c, *a, **k):
self.fb[y1, x1:x2] = self.fb[y2, (x1 + 1):(x2 + 1)] = c
self.fb[y1:y2, x2] = self.fb[(y1 + 1):(y2 + 1), x1] = c
if max((x2 - x1), (y2 - y1)) < 45: # 45*45*2 = 4050 bytes < 4096 chunk
self.flush(x1, y1, x2, y2) # draw whole rectangle
else:
self.flush(x1, y1, x2 - 1, y1) # draw top line
self.flush(x1 + 1, y2, x2, y2) # draw bottom line
self.flush(x1, y1 + 1, x1, y2) # draw left line
self.flush(x2, y1, x2, y2 - 1) # draw right line
def draw_rectf(self, x1, y1, x2, y2, c, *a, **k):
self.fb[y1:(y2 + 1), x1:(x2 + 1)] = c
self.flush(x1, y1, x2, y2)
def draw_circle(self, x, y, r, c, s=0, e=360, step=0.5, f=False, *a, **k):
'''
x, y: center of circle
r: radius
c: color
s, e: start and end degree between [0, 360], default s=0, e=360
step: this value smaller, the smooth level of arc higher
f: whether fill circle with color, only support s=0, 90, 180, 270
and e=90, 180, 270, 360
'''
d = np.arange(s, e, step) * np.pi / 180 # degree to rad
_x, _y = x + r * np.cos(d), y - r * np.sin(d) # rad to pos (float)
_x, _y = np.round([_x, _y]).astype(np.uint32) # pos to index (int)
d = np.unique(_x << 16 | _y) # remove repeat index
_x, _y = d >> 16, d & 0xffff # recover index data
if f is True:
if s in [90, 180]: # fill from _x to x
for _x, _y in np.stack([_x, _y], -1):
self.fb[_y, _x:x] = c
elif s in [0, 270]: # fill from x to _x
for _x, _y in np.stack([_x, _y], -1):
self.fb[_y, x:_x] = c
else:
raise ValueError('only support s=0, 90, 180, 270')
else:
self.fb[_y, _x] = c
self.flush(_x.min(), _y.min(), _x.max(), _y.max())
def draw_circlef(self, x, y, r, c, *a, **k):
'''
draw a filled whole circle, faster than draw_circle(s=0, e=360, f=True)
'''
_y = np.arange(y - r, y + r + 1).astype(np.uint16)
_x = np.round(np.sqrt(r**2 - (_y - y)**2)).astype(np.uint16)
for m_x, m_y in np.stack([_x, _y], -1):
self.fb[m_y, (x - m_x):(x + m_x)] = c
self.flush(x - r, y - r, x + r, y + r)
def draw_round(self, x, y, r, c, m, *a, **k):
'''
x, y: center of round corner
r: radius
c: color, 2-bytes list of rgb565, such as `blue`: [0x00, 0x1F]
m: corner num, see below graph, m = 0, 1, 2, 3
+--------------------------------+
|(0, 0) |
| |
| m=1 m=0 |
| +---------+ |
| |I am rect| |
| +---------+ |
| m=2 m=3 |
| |
| (319, 239)|
+--------------------------------+
'''
assert m in [0, 1, 2, 3], 'Invalid corner number!'
self.draw_circle(x, y, r, c, m * 90, (m + 1) * 90)
def draw_roundf(self, x, y, r, c, m, step=0.5, *a, **k):
'''
See Also
--------
draw_round
draw_circle
'''
assert m in [0, 1, 2, 3], 'Invalid corner number!'
self.draw_circle(x, y, r, c, m * 90, (m + 1) * 90, f=True)
def draw_round_rect(self, x1, y1, x2, y2, r, c, *a, **k):
self.draw_round(x2 - r, y2 - r, r, c, 0) # right - bottom
self.draw_round(x1 + r, y2 - r, r, c, 1) # left - bottom
self.draw_round(x1 + r, y1 + r, r, c, 2) # left - top
self.draw_round(x2 - r, y1 + r, r, c, 3) # right - top
self.draw_rectf(x1 + r, y1, x2 - r, y1, c)
self.draw_rectf(x1 + r, y2, x2 - r, y2, c)
self.draw_rectf(x1, y1 + r, x1, y2 - r, c)
self.draw_rectf(x2, y1 + r, x2, y2 - r, c)
def draw_round_rectf(self, x1, y1, x2, y2, r, c, *a, **k):
self.draw_roundf(x2 - r, y2 - r, r, c, 0)
self.draw_roundf(x1 + r, y2 - r, r, c, 1)
self.draw_roundf(x1 + r, y1 + r, r, c, 2)
self.draw_roundf(x2 - r, y1 + r, r, c, 3)
self.draw_rectf(x1 + r, y1, x2 - r, y2, c)
self.draw_rectf(x1, y1 + r, x1 + r, y2 - r, c)
self.draw_rectf(x2 - r, y1 + r, x2, y2 - r, c)
def draw_img(self, x, y, img, *a, **k):
'''draw RGB[A] img with shape of (height, width, depth) at (x, y)'''
img = np.atleast_3d(img).astype(np.uint8)
x1, y1 = x, y
x2 = max(min(x1 + img.shape[1], self.width), x1)
y2 = max(min(y1 + img.shape[0], self.height), y1)
# img shape correction and extracting alpha channel
img = img[:(y2 - y1), :(x2 - x1)].astype(np.int16)
if img.shape[2] == 4:
img, alpha = np.split(img, [-1], axis=-1)
alpha = alpha.astype(np.float) / 255
else:
if img.shape[2] != 3:
img = np.repeat(img[:, :, 0], 3, axis=-1)
alpha = np.ones((y2 - y1, x2 - x1, 1), np.float)
# calculate difference of image and current framebuffer
current = np.split(self.fb[y1:y2, x1:x2].astype(np.uint16), 2, -1)
current = np.int16(np.concatenate(rgb565to888_pro(*current), -1))
# weight it with alpha channel
dest = current + (img - current) * alpha
# convert to rgb565 and draw back on framebuffer
dest = np.split(dest.astype(np.uint16), 3, -1)
dest = np.concatenate(rgb888to565_pro(*dest), -1).astype(np.uint8)
self.fb[y1:y2, x1:x2] = dest
self.flush(x1, y1, x2 - 1, y2 - 1)
def draw_text(self, x, y, s, c, size=None, font=None, *a, **k):
assert self.font, '[ILI9341 API] font not set yet!'
if size is not None and self.size != size:
self.setsize(size)
if font is not None and os.path.exists(font):
self.setfont(font)
w, h = self.font.getsize(s)
img = Image.new(mode='RGBA', size=(w, h))
ImageDraw.Draw(img).text((0, 0), s, rgb565to888(*c), font)
img = img.resize((w / 2, h / 2), resample=Image.ANTIALIAS)
self.draw_img(x, y, np.array(img, dtype=np.uint8))
def set_rotation(self, m):
with self._lock:
self._command(0x36)
if (m % 4) == 0:
self._data([ILI9341_MADCTL_MX | ILI9341_MADCTL_BGR])
elif (m % 4) == 1:
self._data([ILI9341_MADCTL_MV | ILI9341_MADCTL_BGR])
elif (m % 4) == 2:
self._data([ILI9341_MADCTL_MY | ILI9341_MADCTL_BGR])
elif (m % 4) == 3:
self._data([ILI9341_MADCTL_MX | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MV | ILI9341_MADCTL_BGR])
def clear(self, c=ILI9341_BLACK, *a, **k):
self.draw_rectf(0, 0, self.width - 1, self.height - 1, c)
# THE END | PypiClean |
/Flask-AppBuilder-jack-3.3.4.tar.gz/Flask-AppBuilder-jack-3.3.4/flask_appbuilder/charts/views.py | import logging
from flask_babel import lazy_gettext
from .jsontools import dict_to_json
from .widgets import ChartWidget, DirectChartWidget
from ..baseviews import BaseModelView, expose
from ..models.group import DirectProcessData, GroupByProcessData
from ..security.decorators import has_access
from ..urltools import get_filter_args
from ..widgets import SearchWidget
log = logging.getLogger(__name__)
class BaseChartView(BaseModelView):
"""
This is the base class for all chart views.
Use DirectByChartView or GroupByChartView, override their properties
and their base classes
(BaseView, BaseModelView, BaseChartView) to customise your charts
"""
chart_template = "appbuilder/general/charts/chart.html"
""" The chart template, override to implement your own """
chart_widget = ChartWidget
""" Chart widget override to implement your own """
search_widget = SearchWidget
""" Search widget override to implement your own """
chart_title = "Chart"
""" A title to be displayed on the chart """
title = "Title"
group_by_label = lazy_gettext("Group by")
""" The label that is displayed for the chart selection """
default_view = "chart"
chart_type = "PieChart"
""" The chart type PieChart, ColumnChart, LineChart """
chart_3d = "true"
""" Will display in 3D? """
width = 400
""" The width """
height = "400px"
group_bys = {}
""" New for 0.6.4, on test, don't use yet """
def __init__(self, **kwargs):
self._init_titles()
super(BaseChartView, self).__init__(**kwargs)
def _init_titles(self):
self.title = self.chart_title
def _get_chart_widget(self, filters=None, widgets=None, **args):
raise NotImplementedError
def _get_view_widget(self, **kwargs):
"""
:return:
Returns a widget
"""
return self._get_chart_widget(**kwargs).get("chart")
class GroupByChartView(BaseChartView):
definitions = []
"""
These charts can display multiple series,
based on columns or methods defined on models.
You can display multiple charts on the same view.
This data can be grouped and aggregated has you like.
:label: (optional) String label to display on chart selection.
:group: String with the column name or method from model.
:formatter: (optional) function that formats the output of 'group' key
:series: A list of tuples with the aggregation function and the column name
to apply the aggregation
::
[{
'label': 'String',
'group': '<COLNAME>'|'<FUNCNAME>'
'formatter: <FUNC>
'series': [(<AGGR FUNC>, <COLNAME>|'<FUNCNAME>'),...]
}
]
example::
class CountryGroupByChartView(GroupByChartView):
datamodel = SQLAInterface(CountryStats)
chart_title = 'Statistics'
definitions = [
{
'label': 'Country Stat',
'group': 'country',
'series': [(aggregate_avg, 'unemployed_perc'),
(aggregate_avg, 'population'),
(aggregate_avg, 'college_perc')
]
}
]
"""
chart_type = "ColumnChart"
chart_template = "appbuilder/general/charts/jsonchart.html"
chart_widget = DirectChartWidget
ProcessClass = GroupByProcessData
def __init__(self, **kwargs):
super(GroupByChartView, self).__init__(**kwargs)
for definition in self.definitions:
col = definition.get("group")
# Setup labels
try:
self.label_columns[col] = (
definition.get("label") or self.label_columns[col]
)
except Exception:
self.label_columns[col] = self._prettify_column(col)
if not definition.get("label"):
definition["label"] = self.label_columns[col]
# Setup Series
for serie in definition["series"]:
if isinstance(serie, tuple):
if hasattr(serie[0], "_label"):
key = serie[0].__name__ + serie[1]
self.label_columns[key] = (
serie[0]._label + " " + self._prettify_column(serie[1])
)
else:
self.label_columns[serie] = self._prettify_column(serie)
def get_group_by_class(self, definition):
"""
intantiates the processing class (Direct or Grouped) and returns it.
"""
group_by = definition["group"]
series = definition["series"]
if "formatter" in definition:
formatter = {group_by: definition["formatter"]}
else:
formatter = {}
return self.ProcessClass([group_by], series, formatter)
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
direct=None,
height=None,
definition="",
**args
):
height = height or self.height
widgets = widgets or dict()
joined_filters = filters.get_joined_filters(self._base_filters)
# check if order_column may be database ordered
if not self.datamodel.get_order_columns_list([order_column]):
order_column = ""
order_direction = ""
count, lst = self.datamodel.query(
filters=joined_filters,
order_column=order_column,
order_direction=order_direction,
)
if not definition:
definition = self.definitions[0]
group = self.get_group_by_class(definition)
value_columns = group.to_json(
group.apply(lst, sort=order_column == ""), self.label_columns
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=0):
group_by = int(group_by)
form = self.search_form.refresh()
get_filter_args(self._filters)
widgets = self._get_chart_widget(
filters=self._filters,
definition=self.definitions[group_by],
order_column=self.definitions[group_by]["group"],
order_direction="asc",
)
widgets = self._get_search_widget(form=form, widgets=widgets)
self.update_redirect()
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
definitions=self.definitions,
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
)
class DirectByChartView(GroupByChartView):
"""
Use this class to display charts with multiple series,
based on columns or methods defined on models.
You can display multiple charts on the same view.
Default routing point is '/chart'
Setup definitions property to configure the chart
:label: (optional) String label to display on chart selection.
:group: String with the column name or method from model.
:formatter: (optional) function that formats the output of 'group' key
:series: A list of tuples with the aggregation function and the column name
to apply the aggregation
The **definitions** property respects the following grammar::
definitions = [
{
'label': 'label for chart definition',
'group': '<COLNAME>'|'<MODEL FUNCNAME>',
'formatter': <FUNC FORMATTER FOR GROUP COL>,
'series': ['<COLNAME>'|'<MODEL FUNCNAME>',...]
}, ...
]
example::
class CountryDirectChartView(DirectByChartView):
datamodel = SQLAInterface(CountryStats)
chart_title = 'Direct Data Example'
definitions = [
{
'label': 'Unemployment',
'group': 'stat_date',
'series': ['unemployed_perc',
'college_perc']
}
]
"""
ProcessClass = DirectProcessData
# -------------------------------------------------------
# DEPRECATED SECTION
# -------------------------------------------------------
class BaseSimpleGroupByChartView(BaseChartView): # pragma: no cover
group_by_columns = []
""" A list of columns to be possibly grouped by, this list must be filled """
def __init__(self, **kwargs):
if not self.group_by_columns:
raise Exception(
"Base Chart View property <group_by_columns> must not be empty"
)
else:
super(BaseSimpleGroupByChartView, self).__init__(**kwargs)
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
group_by=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
group_by = group_by or self.group_by_columns[0]
joined_filters = filters.get_joined_filters(self._base_filters)
value_columns = self.datamodel.query_simple_group(
group_by, filters=joined_filters
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
class BaseSimpleDirectChartView(BaseChartView): # pragma: no cover
direct_columns = []
"""
Make chart using the column on the dict
chart_columns = {'chart label 1':('X column','Y1 Column','Y2 Column, ...),
'chart label 2': ('X Column','Y1 Column',...),...}
"""
def __init__(self, **kwargs):
if not self.direct_columns:
raise Exception(
"Base Chart View property <direct_columns> must not be empty"
)
else:
super(BaseSimpleDirectChartView, self).__init__(**kwargs)
def get_group_by_columns(self):
"""
returns the keys from direct_columns
Used in template, so that user can choose from options
"""
return list(self.direct_columns.keys())
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
direct=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
joined_filters = filters.get_joined_filters(self._base_filters)
count, lst = self.datamodel.query(
filters=joined_filters,
order_column=order_column,
order_direction=order_direction,
)
value_columns = self.datamodel.get_values(lst, list(direct))
value_columns = dict_to_json(
direct[0], direct[1:], self.label_columns, value_columns
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
class ChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple (and hopefully nice) way to draw charts on your application.
This will show Google Charts based on group by of your tables.
"""
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
group_by = group_by or self.group_by_columns[0]
widgets = self._get_chart_widget(filters=self._filters, group_by=group_by)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.group_by_columns,
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
)
class TimeChartView(BaseSimpleGroupByChartView): # pragma: no cover
"""
**DEPRECATED**
Provides a simple way to draw some time charts on your application.
This will show Google Charts based on count and group
by month and year for your tables.
"""
chart_template = "appbuilder/general/charts/chart_time.html"
chart_type = "ColumnChart"
def _get_chart_widget(
self,
filters=None,
order_column="",
order_direction="",
widgets=None,
group_by=None,
period=None,
height=None,
**args
):
height = height or self.height
widgets = widgets or dict()
group_by = group_by or self.group_by_columns[0]
joined_filters = filters.get_joined_filters(self._base_filters)
if period == "month" or not period:
value_columns = self.datamodel.query_month_group(
group_by, filters=joined_filters
)
elif period == "year":
value_columns = self.datamodel.query_year_group(
group_by, filters=joined_filters
)
widgets["chart"] = self.chart_widget(
route_base=self.route_base,
chart_title=self.chart_title,
chart_type=self.chart_type,
chart_3d=self.chart_3d,
height=height,
value_columns=value_columns,
modelview_name=self.__class__.__name__,
**args
)
return widgets
@expose("/chart/<group_by>/<period>")
@expose("/chart/")
@has_access
def chart(self, group_by="", period=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
group_by = group_by or self.group_by_columns[0]
widgets = self._get_chart_widget(
filters=self._filters, group_by=group_by, period=period, height=self.height
)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.group_by_columns,
group_by_label=self.group_by_label,
widgets=widgets,
appbuilder=self.appbuilder,
)
class DirectChartView(BaseSimpleDirectChartView): # pragma: no cover
"""
**DEPRECATED**
This class is responsible for displaying a Google chart with
direct model values. Chart widget uses json.
No group by is processed, example::
class StatsChartView(DirectChartView):
datamodel = SQLAInterface(Stats)
chart_title = lazy_gettext('Statistics')
direct_columns = {'Some Stats': ('X_col_1', 'stat_col_1', 'stat_col_2'),
'Other Stats': ('X_col2', 'stat_col_3')}
"""
chart_type = "ColumnChart"
chart_widget = DirectChartWidget
@expose("/chart/<group_by>")
@expose("/chart/")
@has_access
def chart(self, group_by=""):
form = self.search_form.refresh()
get_filter_args(self._filters)
direct_key = group_by or list(self.direct_columns.keys())[0]
direct = self.direct_columns.get(direct_key)
if self.base_order:
order_column, order_direction = self.base_order
else:
order_column, order_direction = "", ""
widgets = self._get_chart_widget(
filters=self._filters,
order_column=order_column,
order_direction=order_direction,
direct=direct,
)
widgets = self._get_search_widget(form=form, widgets=widgets)
return self.render_template(
self.chart_template,
route_base=self.route_base,
title=self.chart_title,
label_columns=self.label_columns,
group_by_columns=self.get_group_by_columns(),
group_by_label=self.group_by_label,
height=self.height,
widgets=widgets,
appbuilder=self.appbuilder,
) | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/Struct14.py | pairs="""
15 329
259 27
265 325
258 67
87 25
224 68
331 143
39 97
256 310
288 89
239 65
0 93
108 116
109 184
59 289
163 155
306 328
120 341
212 277
243 139
192 315
273 279
62 300
166 229
294 337
138 119
120 83
286 282
30 168
203 264
197 82
196 82
261 224
161 220
297 36
249 83
252 168
288 301
21 18
213 157
28 72
189 52
177 254
255 318
57 18
173 335
182 174
234 329
179 115
225 97
298 187
130 336
163 84
294 219
48 40
16 253
129 275
163 330
261 62
166 153
253 159
233 286
296 238
305 22
193 77
186 315
271 147
170 234
105 257
113 280
30 226
280 311
216 26
122 293
83 327
300 37
126 325
125 70
334 102
332 150
38 247
15 137
22 37
210 95
207 194
46 148
251 139
217 187
162 224
45 245
90 332
74 250
240 307
265 54
149 189
59 326
56 209
194 134
332 324
109 105
213 108
86 187
3 147
200 249
7 160
210 153
162 66
280 76
271 186
228 237
61 10
183 117
125 213
171 75
99 329
216 316
7 104
99 230
120 269
98 46
152 51
144 231
206 221
106 45
86 21
251 33
242 268
210 266
135 35
64 145
318 261
135 84
242 93
291 161
292 249
14 251
103 248
176 145
271 73
165 203
303 51
90 182
177 26
6 290
199 262
145 286
135 131
58 127
231 194
150 178
128 177
313 31
90 67
61 134
223 192
49 315
152 312
313 252
13 172
195 40
3 127
142 266
96 84
63 58
4 268
123 235
285 77
48 36
269 78
75 37
283 8
49 182
270 83
41 76
252 244
38 54
53 117
222 256
158 133
306 114
11 98
44 187
270 119
0 50
331 181
217 281
243 338
241 98
64 293
227 88
105 75
146 160
144 156
60 289
12 48
31 247
51 46
33 277
340 66
104 276
223 133
162 274
85 68
174 151
331 298
301 129
88 158
176 136
180 219
0 140
7 125
63 147
198 38
204 302
149 93
53 1
99 258
126 326
225 179
260 143
103 102
204 35
320 171
40 245
337 153
191 140
309 246
138 220
73 231
336 180
150 161
2 77
243 250
111 168
287 333
122 176
128 56
63 69
230 108
317 96
182 178
244 66
166 205
200 311
174 192
11 94
87 159
6 215
253 23
69 52
22 33
70 297
233 201
28 57
269 130
309 200
121 181
53 39
47 195
200 41
29 279
173 299
258 212
123 60
45 322
209 154
52 298
58 218
14 322
272 120
79 340
16 241
267 116
281 314
303 333
100 27
282 136
283 336
55 65
38 299
107 164
44 268
123 232
158 190
16 317
307 5
284 330
85 328
44 183
286 54
198 335
284 175
190 248
267 14
144 132
87 148
202 291
14 114
79 328
20 36
76 293
15 34
243 322
207 236
231 28
17 26
128 8
225 77
167 259
131 330
41 327
294 229
112 168
257 320
217 242
85 106
29 97
263 152
196 264
86 284
321 79
141 303
112 307
154 316
115 285
202 24
279 104
28 321
169 296
211 48
99 54
287 5
287 4
55 197
234 226
207 137
184 139
173 218
36 24
33 34
9 92
176 62
323 10
9 315
165 278
59 339
170 230
126 151
320 72
57 132
211 45
170 227
338 245
169 11
170 255
13 327
309 183
88 256
115 248
50 188
1 29
160 297
59 285
110 218
25 251
124 202
226 274
63 205
100 339
184 22
103 62
296 281
138 327
260 203
223 228
114 227
113 177
340 72
12 160
308 185
324 172
239 159
214 141
197 132
280 20
179 300
142 5
336 56
214 81
60 302
130 13
334 213
9 207
214 287
110 91
43 37
111 10
80 190
294 84
203 215
273 278
42 27
267 277
239 175
250 91
270 140
299 92
218 95
255 158
270 164
338 335
340 68
16 74
31 92
43 34
305 195
114 319
246 292
53 41
246 5
210 313
197 71
221 188
298 330
247 274
166 144
118 74
272 292
148 101
111 148
42 172
96 142
319 101
329 247
264 131
318 238
240 72
227 116
306 94
242 333
225 146
216 125
318 226
71 209
141 312
273 215
17 334
103 255
290 264
220 76
311 254
339 301
12 277
216 107
332 32
222 190
193 289
105 238
121 2
239 134
273 154
49 236
131 143
284 196
43 236
288 143
321 241
191 188
66 91
21 82
61 137
51 93
334 185
252 274
39 293
78 275
6 232
100 283
118 139
278 71
122 161
111 303
44 181
305 179
309 4
128 316
1 278
165 82
215 235
308 222
20 60
276 324
228 173
272 241
27 19
12 305
188 181
211 267
89 219
199 186
1 13
175 257
196 65
310 178
50 268
259 308
167 339
152 23
19 222
47 80
65 23
193 235
123 297
180 301
17 221
135 89
151 19
169 147
325 8
259 119
238 3
312 209
201 40
32 300
199 88
306 199
113 233
337 142
169 189
304 118
321 94
26 50
257 314
78 229
124 157
167 206
133 3
291 106
275 52
104 316
73 205
178 19
237 195
32 97
323 141
165 117
102 145
81 56
295 110
214 337
49 262
64 113
253 46
229 69
208 119
9 73
80 151
118 159
254 4
17 64
244 307
341 140
78 180
276 67
186 133
304 109
212 108
224 136
325 233
304 155
31 137
265 67
217 189
30 296
291 310
192 299
341 275
124 116
295 74
290 23
288 289
263 154
122 32
328 322
295 96
317 292
110 266
320 194
313 10
302 89
121 260
0 107
208 70
308 157
102 230
58 155
121 117
106 136
282 245
260 193
201 326
335 91
146 279
42 150
302 311
237 338
15 25
47 326
167 191
156 134
11 101
246 18
138 42
29 324
290 35
130 71
220 24
81 219
310 262
276 8
109 127
208 157
331 129
208 24
223 80
211 202
7 212
235 146
263 6
206 185
258 34
86 314
240 314
204 249
317 35
112 281
201 20
112 333
92 95
124 256
319 234
265 174
2 221
47 115
30 101
100 126
85 262
162 198
295 155
263 107
185 248
81 254
55 312
240 18
87 61
323 153
272 57
204 232
164 70
269 132
206 285
191 129
163 69
236 171
79 250
75 261
55 156
198 282
25 319
244 266
271 94
232 164
323 156
304 175
2 39
21 183
149 341
149 98
283 172
90 43
184 237
228 127
95 205
171 68
"""
waters="""
0.50775 0.41038 0.86041
0.81516 0.06628 0.7628
0.12357 0.27969 0.71302
0.2923 0.52356 0.26963
0.2143 0.07335 0.88894
0.1283 0.97047 0.02422
0.80329 0.43793 0.85302
0.67979 0.32413 0.61124
0.47477 0.0 0.66666
0.55482 0.91542 0.26872
0.50048 0.14166 0.13791
0.55054 0.53808 0.15604
0.88584 0.46939 0.53421
0.71989 0.94072 0.77773
0.7642 0.54066 0.35502
0.62651 0.22712 0.32501
0.84375 0.61679 0.07298
0.31147 0.24597 0.66966
0.94379 0.00975 0.02009
0.52863 0.76396 0.55032
0.0655 0.75403 0.66367
0.97047 0.1283 0.97577
0.93733 0.38132 0.40333
0.77288 0.39939 0.99168
0.84389 0.72032 0.62032
0.67587 0.35566 0.27791
0.38321 0.22697 0.73965
0.54066 0.7642 0.64497
0.77563 0.88908 0.12551
0.80951 0.14128 0.67183
0.37268 0.31758 0.20851
0.4608 0.0558 0.2647
0.92666 0.14096 0.5556
0.80422 0.36025 0.40346
0.7077 0.23126 0.39703
0.94662 0.61833 0.93552
0.93276 0.67075 0.62516
0.92386 0.25283 0.40902
0.32928 0.9882 0.38202
0.02953 0.15782 0.69089
0.07091 0.70223 0.51407
0.93684 0.95111 0.77008
0.66823 0.85872 0.66149
0.77304 0.15625 0.40631
0.18484 0.25112 0.90386
0.93372 0.74888 0.42947
0.63976 0.44398 0.07013
0.20023 0.60449 0.51964
0.94491 0.62732 0.54184
0.67075 0.93276 0.37483
0.36868 0.29777 0.81926
0.56247 0.35076 0.9868
0.35076 0.56247 0.0132
0.95013 0.07975 0.7685
0.3748 0.04136 0.46748
0.69216 0.1477 0.00082
0.46192 0.01246 0.82271
0.80028 0.90482 0.03647
0.22032 0.63908 0.18561
0.22712 0.62651 0.67499
0.05339 0.67171 0.73114
0.63464 0.19671 0.18635
0.11345 0.22438 0.45884
0.34639 0.66313 0.13876
0.19972 0.10454 0.6302
0.80441 0.28546 0.02815
0.07975 0.95013 0.23149
0.58962 0.09738 0.52707
0.93404 0.99026 0.31325
0.32022 0.64434 0.05543
0.71454 0.51895 0.69482
0.68242 0.0551 0.87517
0.91302 0.0 0.16667
0.55886 0.83417 0.18811
0.92909 0.63133 0.15259
0.0 0.23125 0.33334
0.99026 0.93404 0.68675
0.0939 0.38826 0.67355
0.47644 0.76874 0.9363
0.85905 0.7857 0.22227
0.31674 0.65361 0.47209
0.33345 0.95865 0.86585
0.87643 0.15611 0.95365
0.75138 0.75084 0.84467
0.1477 0.69216 0.99917
0.84218 0.87171 0.35755
0.05873 0.25025 0.03502
0.68327 0.33688 0.19457
0.53921 0.59501 0.40197
0.16583 0.72469 0.85477
0.70223 0.07091 0.48592
0.06628 0.81516 0.23719
0.43346 0.92871 0.2703
0.49327 0.3918 0.95018
0.6252 0.66655 0.19918
0.35882 0.85834 0.19543
0.06724 0.73799 0.0415
0.94127 0.19152 0.63164
0.61868 0.55601 0.07
0.4674 0.17544 0.45056
0.43753 0.78829 0.67986
0.53061 0.41645 0.20087
0.32376 0.25414 0.52528
0.25414 0.32376 0.47472
0.64924 0.21171 0.65346
0.07614 0.32896 0.25764
0.92025 0.87038 0.43517
0.59574 0.39551 0.8137
0.56207 0.36536 0.51968
0.06267 0.44399 0.26334
0.1294 0.79172 0.17598
0.47137 0.23533 0.11634
0.25112 0.18484 0.09614
0.23125 0.0 0.66667
0.64216 0.52392 0.32126
0.17544 0.4674 0.54944
0.64118 0.49953 0.47124
0.0 0.157 0.83333
0.90263 0.49225 0.19374
0.65893 0.67072 0.71535
0.67104 0.74718 0.9243
0.10005 0.29098 0.79793
0.98573 0.04889 0.56324
0.91542 0.55482 0.73128
0.65846 0.59972 0.51954
0.60061 0.37349 0.65835
0.36025 0.80422 0.59653
0.19578 0.55603 0.2632
0.44399 0.06267 0.73666
0.36092 0.58124 0.85227
0.63133 0.92909 0.8474
0.07129 0.50474 0.93696
0.68853 0.9345 0.997
0.37349 0.60061 0.34165
0.72469 0.16583 0.14523
0.08458 0.6394 0.93538
0.04889 0.98573 0.43675
0.60953 0.13999 0.26682
0.74976 0.80848 0.70168
0.89853 0.50673 0.28351
0.53808 0.55054 0.84395
0.47608 0.11824 0.98793
0.15611 0.87643 0.04635
0.14166 0.50048 0.86209
0.61833 0.94662 0.06447
0.22438 0.11345 0.54115
0.88176 0.35784 0.65459
0.39551 0.59574 0.1863
0.58124 0.36092 0.14773
0.52523 0.52523 0.0
0.74888 0.93372 0.57052
0.41876 0.77969 0.51894
0.64434 0.32022 0.94458
0.40499 0.9442 0.06864
0.63908 0.22032 0.81439
0.11417 0.58356 0.13246
0.6394 0.08458 0.06461
0.59501 0.53921 0.59803
0.39047 0.53046 0.39984
0.79978 0.40426 0.14703
0.83321 0.4233 0.61396
0.87038 0.92025 0.56482
0.15782 0.02953 0.30911
0.16679 0.59009 0.0527
0.70804 0.5326 0.78389
0.89996 0.19093 0.86873
0.46954 0.86001 0.06651
0.40991 0.5767 0.71937
0.33768 0.20828 0.15735
0.41038 0.50775 0.13959
0.44518 0.3606 0.39794
0.89546 0.09518 0.29687
0.65181 0.93712 0.7001
0.23581 0.77647 0.31164
0.46939 0.88584 0.46579
0.95865 0.33345 0.13414
0.08699 0.08699 0.5
0.32896 0.07614 0.74236
0.66232 0.8706 0.50931
0.04136 0.3748 0.53252
0.40426 0.79978 0.85297
0.20828 0.33768 0.84265
0.62732 0.94491 0.45815
0.04987 0.12962 0.89815
0.0 0.47477 0.33333
0.3606 0.44518 0.60205
0.51895 0.71454 0.30518
0.19049 0.33177 0.99483
0.31758 0.37268 0.79149
0.3918 0.49327 0.04982
0.36536 0.56207 0.48032
0.41645 0.53061 0.79912
0.4233 0.83321 0.38604
0.0558 0.4608 0.7353
0.74586 0.06962 0.19195
0.09738 0.58962 0.47293
0.9061 0.29436 0.99312
0.73799 0.06724 0.95849
0.19152 0.94127 0.36835
0.61174 0.70564 0.34021
0.0 0.91302 0.83333
0.15625 0.77304 0.59369
0.80907 0.70903 0.5354
0.94126 0.34154 0.85288
0.93038 0.67624 0.85862
0.43793 0.80329 0.14699
0.30784 0.45554 0.66584
0.67171 0.05339 0.26886
0.70564 0.61174 0.65978
0.58356 0.11417 0.86754
0.34154 0.94126 0.14712
0.8706 0.66232 0.49068
0.65361 0.31674 0.5279
0.53046 0.39047 0.60016
0.34108 0.0118 0.95132
0.85834 0.35882 0.80457
0.52356 0.2923 0.73036
0.28531 0.34819 0.03344
0.23604 0.76468 0.21699
0.29196 0.82456 0.88277
0.87171 0.84218 0.64244
0.26201 0.32925 0.70817
0.49953 0.64118 0.52876
0.32413 0.67979 0.38876
0.06596 0.05621 0.35342
0.9882 0.32928 0.61798
0.32925 0.26201 0.29183
0.56655 0.49526 0.39636
0.21171 0.64924 0.34653
0.39939 0.77288 0.00832
0.44115 0.27532 0.47856
0.67624 0.93038 0.14138
0.83417 0.55886 0.81189
0.25283 0.92386 0.59097
0.45554 0.30784 0.33415
0.92871 0.43346 0.7297
0.75403 0.0655 0.33633
0.10147 0.6082 0.38315
0.22697 0.38321 0.26035
0.82456 0.29196 0.11722
0.01428 0.06316 0.10342
0.74718 0.67104 0.07569
0.34819 0.28531 0.96656
0.93712 0.65181 0.29989
0.157 0.0 0.16667
0.05928 0.77917 0.4444
0.00974 0.94379 0.97991
0.38826 0.0939 0.32645
0.27532 0.44115 0.52144
0.88908 0.77563 0.87448
0.94072 0.71989 0.22226
0.78829 0.43753 0.32013
0.29098 0.10005 0.20207
0.76874 0.47644 0.06369
0.24862 0.99946 0.82199
0.32829 0.38168 0.39781
0.59972 0.65846 0.48045
0.99946 0.24862 0.17801
0.60449 0.20023 0.48036
0.52392 0.64216 0.67874
0.05874 0.40029 0.81379
0.10454 0.19972 0.36981
0.72032 0.84389 0.37968
0.66313 0.34639 0.86124
0.9442 0.40499 0.93136
0.44946 0.98754 0.51062
0.19093 0.89996 0.13126
0.76396 0.52863 0.44967
0.28011 0.22083 0.88893
0.61679 0.84375 0.92702
0.66655 0.6252 0.80081
0.5326 0.70804 0.2161
0.76876 0.76876 0.0
0.76468 0.23604 0.783
0.27969 0.12357 0.28699
0.44398 0.63976 0.92987
0.6082 0.10147 0.61684
0.77969 0.41876 0.48106
0.79172 0.1294 0.82401
0.77647 0.23581 0.68835
0.09518 0.89546 0.70314
0.22083 0.28011 0.11107
0.14096 0.92666 0.44439
0.50673 0.89853 0.71648
0.0118 0.34108 0.04868
0.1956 0.48106 0.63852
0.24916 0.00054 0.51134
0.25025 0.05873 0.96498
0.19671 0.63464 0.81365
0.13999 0.60953 0.73317
0.86001 0.46954 0.93349
0.84301 0.84301 0.5
0.90482 0.80028 0.96353
0.05621 0.06596 0.64658
0.28546 0.80441 0.97185
0.0551 0.68242 0.12482
0.29777 0.36868 0.18074
0.8523 0.54446 0.66749
0.22354 0.45934 0.97831
0.35784 0.88176 0.34541
0.00054 0.24916 0.48865
0.33688 0.68327 0.80542
0.06962 0.74586 0.80805
0.45934 0.22354 0.02169
0.01246 0.46192 0.17728
0.98754 0.44946 0.48938
0.67072 0.65893 0.28465
0.12962 0.04987 0.10184
0.49526 0.56655 0.60363
0.06316 0.01427 0.89657
0.70903 0.80907 0.46459
0.11092 0.88655 0.79218
0.59009 0.16679 0.94729
0.40029 0.05874 0.18621
0.07335 0.2143 0.11106
0.54446 0.8523 0.33251
0.55603 0.19578 0.7368
0.9345 0.68853 0.00299
0.24597 0.31147 0.33034
0.5767 0.40991 0.28062
0.88655 0.11092 0.20782
0.75084 0.75138 0.15532
0.85872 0.66823 0.3385
0.50474 0.07129 0.06303
0.71469 0.06288 0.63322
0.38132 0.93733 0.59667
0.23126 0.7077 0.60297
0.7857 0.85905 0.77772
0.80848 0.74976 0.29831
0.48106 0.1956 0.36149
0.11824 0.47608 0.01207
0.23533 0.47137 0.88366
0.77917 0.05928 0.55559
0.33177 0.19049 0.00517
0.38168 0.32829 0.60219
0.14128 0.80951 0.32816
0.49225 0.90263 0.80626
0.29436 0.9061 0.00688
0.06288 0.71469 0.36677
0.35566 0.67587 0.72209
0.95111 0.93684 0.22991
0.55601 0.61868 0.93
"""
coord= "relative"
cages="""
14 0.55103 0.04651 0.38799
14 0.95349 0.50452 0.05465
14 0.04651 0.55103 0.61201
14 0.49548 0.44897 0.72132
14 0.50452 0.95349 0.94534
14 0.44897 0.49548 0.27867
12 0.55398 0.0 0.16667
12 0.0 0.55398 0.83333
12 0.44602 0.44602 0.5
15 0.11674 0.16521 0.23012
15 0.83479 0.95153 0.89678
15 0.16521 0.11674 0.76988
15 0.04847 0.88326 0.56345
15 0.95153 0.83479 0.10321
15 0.88326 0.04847 0.43654
12 0.30899 0.05827 0.07404
12 0.94173 0.25072 0.7407
12 0.05827 0.30899 0.92596
12 0.74928 0.69101 0.40737
12 0.25072 0.94173 0.25929
12 0.69101 0.74928 0.59262
12 0.43755 0.37467 0.08041
12 0.62533 0.06288 0.74707
12 0.37467 0.43755 0.91959
12 0.93712 0.56245 0.41374
12 0.06288 0.62533 0.25292
12 0.56245 0.93712 0.58625
15 0.18781 0.43934 0.13376
15 0.56066 0.74847 0.80042
15 0.43934 0.18781 0.86624
15 0.25153 0.81219 0.46709
15 0.74847 0.56066 0.19957
15 0.81219 0.25153 0.5329
16 0.15434 0.43282 0.39886
16 0.56718 0.72152 0.06552
16 0.43282 0.15434 0.60114
16 0.27848 0.84566 0.73219
16 0.72152 0.56718 0.93447
16 0.84566 0.27848 0.2678
12 0.14121 0.14121 0.0
12 0.85879 0.0 0.66666
12 0.0 0.85879 0.33333
14 0.26383 0.15238 0.41369
14 0.84762 0.11145 0.08035
14 0.15238 0.26383 0.58631
14 0.88855 0.73617 0.74702
14 0.11145 0.84762 0.91964
14 0.73617 0.88855 0.25297
12 0.65148 0.27339 0.06608
12 0.72661 0.37809 0.73274
12 0.27339 0.65148 0.93392
12 0.62191 0.34852 0.39941
12 0.37809 0.72661 0.26725
12 0.34852 0.62191 0.60058
12 0.48745 0.23497 0.24484
12 0.76503 0.25248 0.9115
12 0.23497 0.48745 0.75516
12 0.74752 0.51255 0.57817
12 0.25248 0.76503 0.08849
12 0.51255 0.74752 0.42182
"""
bondlen = 3
cell = """
26.935331764866525 0.0 0.0
-13.467665882433257 23.326681567736355 0.0
2.258815726823271e-15 3.912383603793526e-15 36.8892602895128
"""
density = 0.44104471589272415
from genice.cell import cellvectors
cell = cellvectors(a=26.935331764866525,
b=26.935331764866525,
c=36.8892602895128,
C=119.99999999999999) | PypiClean |
/Kerko-1.0.0-py3-none-any.whl/kerko/specs.py | from abc import ABC, abstractmethod
from collections.abc import Iterable
from typing import Any, Dict, List, Optional
from babel.numbers import format_decimal
from flask import Request, url_for
from flask_babel import get_locale
from w3lib.url import safe_url_string
from werkzeug.datastructures import MultiDict
from whoosh.fields import ID
from whoosh.query import Prefix, Term
from kerko import extractors, renderers
from kerko.codecs import (BaseFacetCodec, CollectionFacetCodec,
IdentityFieldCodec)
from kerko.text import slugify, sort_normalize
from kerko.tree import Tree
class ScopeSpec:
"""
Specifies a scope for keyword searches.
This is a configuration element, with no effect on the search index schema.
"""
def __init__(self, key, selector_label, breadbox_label, weight=0, help_text=''):
self.key = key
self.selector_label = selector_label
self.breadbox_label = breadbox_label
self.weight = weight
self.help_text = help_text
def add_keywords(self, value, active_keywords=None):
"""
Add a value for this scope to active keywords.
:param string value: The value to add.
:param MultiDict active_keywords: The active keywords to derive from.
:return MultiDict: A copy of the keywords with the added value.
"""
new_keywords = active_keywords.deepcopy() if active_keywords else MultiDict()
new_keywords.add(self.key, value.strip())
return new_keywords
def remove_keywords(self, value, active_keywords):
"""
Remove a value for this facet from active keywords.
:param string value: The value to remove.
:param MultiDict active_keywords: The active keywords to derive from.
:return MultiDict: A copy of the keywords with the value removed.
"""
new_keywords = active_keywords.deepcopy()
if new_keywords:
new_values = [v for v in new_keywords.poplist(self.key) if v != value]
if new_values:
new_keywords.setlist(self.key, new_values)
return new_keywords
class BaseFieldSpec(ABC):
def __init__(
self,
key,
field_type,
extractor,
):
"""
Initialize this field specification.
:param str key: Unique key for referencing the field in the search index
schema.
:param whoosh.fields.FieldType field_type: Instance of the schema field
type. Set to `None` if the field's value is to be passed along with
documents, but not added to the schema, e.g., boost factors (see
`whoosh.writing.IndexWriter.add_document`).
:param kerko.extractors.Extractor extractor: Instance of the extractor
that will extract the field value from a Zotero item.
"""
self.key = key
self.field_type = field_type
self.extractor = extractor
def extract_to_document(self, document, item, library_context):
"""Extract the value of this element from a Zotero item."""
self.extractor.extract_and_store(document, item, library_context, self)
class FieldSpec(BaseFieldSpec):
"""Specifies a schema field."""
def __init__(
self,
codec=None,
scopes=None,
**kwargs
):
"""
Initialize this field specification.
:param BaseFieldCodec codec: Value encoder/decoder for this field.
:param list scopes: List of keys to `ScopeSpec` instances to which this
field applies. If `None`, the field won't be available for keyword
search.
"""
super().__init__(**kwargs)
self.scopes = scopes or []
self.codec = codec or IdentityFieldCodec()
def encode(self, value):
return self.codec.encode(value)
def decode(self, encoded_value):
return self.codec.decode(encoded_value)
class FacetSpec(BaseFieldSpec):
"""Specifies a facet for search grouping and filtering."""
def __init__(
self,
*,
title,
filter_key,
weight=0,
initial_limit=0,
initial_limit_leeway=2,
codec=None,
missing_label=None,
sort_by=None,
sort_reverse=False,
item_view=True,
allow_overlap=True,
query_class=None,
renderer=None,
**kwargs
):
"""
Initialize this facet specification.
:param str title: Title of the facet.
:param str filter_key: Key to use in URLs when filtering with this
facet.
:param int weight: Determine the position of this facet relative to the
others.
:param int initial_limit: Maximum number of filters to show by default
under this facet. Excess filters will be shown if the user clicks a
"view more" button. Defaults to 0 (no limit).
:param int initial_limit_leeway: If the number of filters under this
facet exceeds `initial_limit` by this tolerance margin or less, all
filters will be shown. Defaults to 0 (no tolerance margin).
:param str missing_label: Label to use for items that do not have any
value for this facet. Defaults to `None` (show no label at all).
:param BaseFacetCodec codec: Value encoder/decoder for this facet.
:param bool item_view: Show this facet on item view pages.
:param `renderers.Renderer` renderer: A renderer for this facet. The
rendering context provides the following variables:
- `spec`: The `FacetSpec` instance.
- `items`: The facet values retrieved by the search query.
- `mode`: A string whose value is one of the following:
- `'search'`: Displaying the facet on a search results page.
- `'field'`: Displaying the facet on a full bibliographic record
page.
- `'breadbox'`: Displaying the facet as a search criteria in the
breadbox.
.. seealso: Additional :meth:`BaseFieldSpec.__init__` arguments.
"""
super().__init__(**kwargs)
self.title = title
self.filter_key = filter_key
self.weight = weight
self.initial_limit = initial_limit
self.initial_limit_leeway = initial_limit_leeway
self.codec = codec or BaseFacetCodec()
self.missing_label = missing_label
self.sort_by = sort_by
self.sort_reverse = sort_reverse
self.item_view = item_view
self.allow_overlap = allow_overlap
self.query_class = query_class or Term
self.renderer = renderer or renderers.TemplateResolverRenderer(
'kerko/_facet_{mode}.html.jinja2'
)
def encode(self, value):
return self.codec.encode(value)
def decode(self, encoded_value, default_value=None, default_label=None):
if encoded_value is None:
return '', ''
return self.codec.decode(encoded_value, default_value, default_label)
@abstractmethod
def add_filter(self, value, active_filters):
"""
Add a value for this facet to active filters.
:param string value: The value to add.
:param MultiDict active_filters: The active filters to derive from.
:return MultiDict: A copy of the filters with the added value.
"""
@abstractmethod
def remove_filter(self, value, active_filters):
"""
Remove a value for this facet from active filters.
:param string value: The value to remove.
:param MultiDict active_filters: The active filters to derive from.
:return MultiDict: A copy of the filters with the value removed.
"""
@abstractmethod
def build(self, results, criteria, active_only=False):
"""
Construct a facet's items for display.
:param iterable results: Iterable of (value, count) tuples representing
the facet's results from the search query.
:param Criteria criteria: The current search criteria. If None, the
facet will be built for performing a new search.
:param active_only: Only build the items that are related to active
filters, i.e., filters actually present in the search criteria.
"""
def sort_items(self, items):
if self.sort_by is None:
return
# Sort items based on multiple-keys.
return sorted(
items,
key=lambda x: (
# First sort key: show active items first.
not x['remove_url'],
# Second sort key: show items with missing labels last.
bool(x['label']) if self.sort_reverse else not x['label'],
# Third sort key: sort items according to the facet's specified
# sort keys. If 'count' is used as key, multiply the count value
# by -1 to reverse order (because the desired default when
# ordering by count is the descending order).
*[
x[k] * -1 if k == 'count' else
(sort_normalize(x[k]) if isinstance(x[k], str) else x[k])
for k in self.sort_by
]
),
reverse=self.sort_reverse
)
def render(self, items, mode):
return self.renderer.render(spec=self, items=items, mode=mode)
class FlatFacetSpec(FacetSpec):
def add_filter(self, value, active_filters):
if value is None: # Special case for missing value (None is returned by Whoosh).
value = ''
filters = active_filters.deepcopy()
active_values = filters.getlist(self.filter_key)
if active_values and value in active_values:
return None # Already filtering with value. No add needed.
filters.setlistdefault(self.filter_key).append(value)
return filters
def remove_filter(self, value, active_filters):
if value is None: # Special case for missing value (None is returned by Whoosh).
value = ''
filters = active_filters.deepcopy()
active_values = filters.getlist(self.filter_key)
if not active_values or value not in active_values:
return None # Not currently filtering with value. No remove needed.
active_values.remove(value)
filters.setlist(self.filter_key, active_values)
return filters
def build(self, results, criteria, active_only=False):
items = []
for value, count in results.items():
if value or self.missing_label:
value, label = self.decode(value, default_value=value, default_label=value)
new_filters = self.remove_filter(value, criteria.filters)
if new_filters:
remove_url = url_for(
'.search',
**criteria.params(
filters=new_filters,
options={
'page': None,
'page-len': None,
'id': None,
},
)
)
else:
remove_url = None
if remove_url or active_only:
add_url = None
else:
new_filters = self.add_filter(value, criteria.filters)
if new_filters:
add_url = url_for(
'.search',
**criteria.params(
filters=new_filters,
options={
'page': None,
'page-len': None,
'id': None,
},
)
)
else:
add_url = None
if remove_url or add_url: # Only items with an URL get displayed.
items.append({
'label': label,
'count': count,
'count_formatted': format_decimal(count, locale=get_locale()),
'remove_url': remove_url,
'add_url': add_url,
})
return self.sort_items(items)
class TreeFacetSpec(FacetSpec):
def __init__(self, path_separator='.', **kwargs):
super().__init__(**kwargs)
self.path_separator = path_separator
@staticmethod
def is_ancestor(ancestor, descendant):
"""
Return True if `ancestor` is an ancestor of `descendant`.
:param str ancestor: Potential ancestor.
:param str descendant: Path to look for potential ancestor.
"""
# True if ancestor is a prefix of descendant. Warning: This simple
# condition only works for finding ancestors when all path components
# have the same length, otherwise any partial prefix could match.
return descendant.find(ancestor) == 0 and ancestor != descendant
def get_parent(self, value):
parent = value.rsplit(sep=self.path_separator, maxsplit=1)[0]
return parent if parent != value else None
def add_filter(self, value, active_filters):
if value is None:
# Special case for missing value (None is returned by Whoosh).
value = ''
filters = active_filters.deepcopy()
active_values = filters.getlist(self.filter_key)
for i, active_value in enumerate(active_values):
if value == active_value or self.is_ancestor(value, active_value):
# Already filtering with value or a descendant. No add needed.
return None
elif self.is_ancestor(active_value, value):
# Active value is ancestor of value. Replace the active value.
active_values[i] = value
filters.setlist(self.filter_key, active_values)
break
else:
# This is an all new filter. Add its value.
filters.setlistdefault(self.filter_key).append(value)
return filters
def remove_filter(self, value, active_filters):
if value is None:
# Special case for missing value (None is returned by Whoosh).
value = ''
filters = active_filters.deepcopy()
active_values = filters.getlist(self.filter_key)
if not active_values:
# Filter not active at all. No remove needed.
return None
new_values = []
change = False
for active_value in active_values:
if value == active_value or self.is_ancestor(value, active_value):
# Currently filtering with value or a descendant. Remove needed.
change = True
parent = self.get_parent(value)
if parent and parent not in new_values:
new_values.append(parent)
else:
# Filter unrelated to value. Preserve it.
new_values.append(active_value)
if not change:
# Neither filtering with value or a descendant. No remove needed.
return None
filters.setlist(self.filter_key, new_values)
return filters
def sort_tree(self, tree):
"""
Convert tree to list, sorting children along the way.
In the process, each item remains a dict as in a flat facet, but with
an extra 'children' key.
"""
lst = []
for child in tree['children'].values():
child['node']['children'] = self.sort_tree(child)
lst.append(child['node'])
return self.sort_items(lst)
def build(self, results, criteria, active_only=False):
tree = Tree()
for value, count in results.items():
if value or self.missing_label:
value, label = self.decode(value, default_value=value, default_label=value)
new_filters = self.remove_filter(value, criteria.filters)
if new_filters:
remove_url = url_for(
'.search',
**criteria.params(
filters=new_filters,
options={
'page': None,
'page-len': None,
'id': None,
},
)
)
else:
remove_url = None
if remove_url or active_only:
add_url = None
else:
new_filters = self.add_filter(value, criteria.filters)
if new_filters:
add_url = url_for(
'.search',
**criteria.params(
filters=new_filters,
options={
'page': None,
'page-len': None,
'id': None,
},
)
)
else:
add_url = None
if remove_url or add_url: # Only items with an URL get displayed.
path = value.split(sep=self.path_separator)
# Build the tree path. Part of the path may or may not already
# exist as the facet values are not ordered.
node = tree # Start at tree root.
for component in path:
node = node['children'][component]
# Add data at the leaf.
node['node'] = {
'id': '-'.join(path),
'label': label,
'count': count,
'count_formatted': format_decimal(count, locale=get_locale()),
'remove_url': remove_url,
'add_url': add_url,
}
return self.sort_tree(tree)
class CollectionFacetSpec(TreeFacetSpec):
"""
Specifies a facet based on a top-level Zotero collection.
A top-level Zotero collection can act as a facet when its key matches a
given `collection_key`. Subcollections become values within the facet.
"""
def __init__(self, *, collection_key, **kwargs):
# Provide some convenient defaults for this type of facet.
kwargs.setdefault('key', f'facet_collection_{collection_key}')
kwargs.setdefault('field_type', ID(stored=True))
kwargs.setdefault('filter_key', slugify(str(kwargs.get('title'))))
kwargs.setdefault('codec', CollectionFacetCodec())
kwargs.setdefault('query_class', Prefix)
kwargs.setdefault('extractor', extractors.CollectionFacetTreeExtractor())
super().__init__(**kwargs)
self.collection_key = collection_key
class SortSpec:
"""
Specifies a sort option.
This is a configuration element, with no effect on the search index schema.
"""
def __init__(
self,
key,
label,
fields,
weight=0,
reverse=False,
is_allowed=True
):
"""
Initialize a sort option.
:param str key: Key of this sort option.
:param str label: Label of this sort option.
:param list fields: List of `FieldSpec` instances to use when doing
search queries with this sort option, in order of precedence.
:param int weight: Determine the position of this option relative to the
other options.
:param bool reverse: Whether the fields should be sorted in reverse
order. To provide per-field reverse settings, an iterable may be
supplied instead of a bool, in which case it should contain the same
number of elements as the `fields` parameter.
:param callable is_allowed: Optional callable to determine if, given a
criteria object, the sort option should be allowed.
"""
self.key = key
self.label = label
self.fields = fields
self.weight = weight
if isinstance(reverse, Iterable):
if all(reverse):
self.reverse = True
elif not any(reverse):
self.reverse = False
else:
self.reverse = reverse
else:
self.reverse = reverse
self._is_allowed = is_allowed
def is_allowed(self, criteria):
if callable(self._is_allowed):
return self._is_allowed(criteria)
return self._is_allowed
def get_field_keys(self):
if self.fields:
return [spec.key for spec in self.fields]
return None
class BibFormatSpec:
"""
Specifies a bibliographic record download format.
This is a configuration element, with no effect on the search index schema.
"""
def __init__(
self,
key,
field,
label,
help_text,
weight,
extension,
mime_type,
group_format='{}',
group_item_delimiter=''
):
"""
Initialize a format.
:param str key: Key of this format.
:param FieldSpec field: `FieldSpec` instance associated to this format.
:param str label: Label of this format.
:param int weight: Determine the position of this format relative to the
others in lists.
:param str extension: File extension of this format.
:param str mime_type: MIME type of this format.
:param str group_format: Format string for wrapping multiple entries.
:param str group_item_delimiter: Delimiter string to insert between
entries.
"""
self.key = key
self.field = field
self.label = label
self.help_text = help_text
self.weight = weight
self.extension = extension
self.mime_type = mime_type
self.group_format = group_format
self.group_item_delimiter = group_item_delimiter
class RelationSpec:
"""
Specifies a type of relation between items.
This is a configuration element, with no effect on the search index schema.
"""
def __init__(
self,
*,
key,
field,
label,
weight,
id_fields,
directed=True,
reverse=False,
reverse_key='',
reverse_field_key='',
reverse_label=''
):
"""
Initialize a relation type.
:param str key: Key of this relation type.
:param FieldSpec field: `FieldSpec` instance associated to this relation
type. The field will be looked up in the search index to retrieve
item identifiers related to a given item.
:param str label: Label of this relation type.
:param int weight: Determine the position of this relation type
relatively to other relation types in lists.
:paras list id_fields: List of `FieldSpec` instances representing the
fields to search when trying to resolve an item identifier.
:param bool directed: Whether a relation is directed (i.e.
bidirectional) or not.
:param bool reverse: Whether a reverse relation should be exposed. If
`directed` is `False`, this can only be `False` as well.
:param str reverse_key: Key of the reverse relation. Should be set only
if `reverse` is `True`.
:param str reverse_field_key: Field key to use for storing the reverse
relation. This isn't a `FieldSpec` as the field won't be looked up
in the search index. Instead, it will be dynamically populated with
items whose `field` contain a given item. Should be set only if
`reverse` is `True`.
:param str reverse_label: Label of the reverse relation. Should be set
only if `reverse` is `True`.
"""
assert not reverse and not directed or directed
assert not reverse and not reverse_key or reverse
assert not reverse and not reverse_field_key or reverse
assert not reverse and not reverse_label or reverse
self.key = key
self.field = field
self.label = label
self.weight = weight
self.id_fields = id_fields
self.directed = directed
self.reverse = reverse
self.reverse_key = reverse_key
self.reverse_field_key = reverse_field_key
self.reverse_label = reverse_label
class BadgeSpec:
"""
Specifies a badge.
Badges may be displayed on items (in search results and on full
bibliographic record pages).
This is a configuration element, with no effect on the search index schema.
"""
def __init__(
self,
key,
field,
activator,
renderer,
weight=0,
):
"""
Initialize this badge specification.
:param str key: Key of this badge.
:param FieldSpec field: `FieldSpec` instance required by this badge.
:param callable activator: Callable which, given a `FieldSpec` instance
and an item, must return `True` if the badge should be displayed.
:param `renderers.Renderer` renderer: A renderer for this badge. The
rendering context provides the following variables:
- `field`: The `FieldSpec` instance.
- `item`: The item retrieved from the search index.
- `mode`: A string whose value is one of the following:
- `'result':` The item is being viewed in a list of results.
- `'item'`: Viewing the item's full bibliographic record.
:param int weight: Determine the position of this badge relative to the
others.
"""
self.key = key
self.field = field
self.activator = activator
self.renderer = renderer
self.weight = weight
def is_active(self, item):
"""
Return `True` is this badge is active for the given item.
"""
return self.activator(self.field, item)
def render(self, item, mode):
"""
Render the badge, if necessary, for the given item.
:return str: The rendered badge, or `''` if the badge is not activated
on the item.
"""
if self.is_active(item):
return self.renderer.render(field=self.field, item=item, mode=mode)
return ''
class LinkSpec(ABC):
def __init__(self, *, text: str, new_window=False, weight=0):
self.text = text
self.new_window = new_window
self.weight = weight
def is_active(self, request: Request) -> bool: # pylint: disable=unused-argument
return False
@property
@abstractmethod
def url(self) -> str:
pass
class LinkByURLSpec(LinkSpec):
def __init__(self, *, url: str, **kwargs):
super().__init__(**kwargs)
self._url = url
@property
def url(self) -> str:
return safe_url_string(self._url)
class LinkByEndpointSpec(LinkSpec):
def __init__(
self,
*,
endpoint: str,
external: bool = False,
anchor: Optional[str] = None,
scheme: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.endpoint = endpoint
self.external = external
self.anchor = anchor or None # Replace empty string with None.
self.scheme = scheme or None # Replace empty string with None.
self.parameters = parameters or {}
def is_active(self, request: Request) -> bool:
return request.endpoint == self.endpoint
@property
def url(self) -> str:
return url_for(
self.endpoint,
_anchor=self.anchor,
_scheme=self.scheme,
_external=self.external,
**self.parameters,
)
class LinkGroupSpec:
def __init__(self, key: str, links: Optional[List[LinkSpec]] = None):
self.key = key
self.links = links or []
def add_item(self, item: LinkSpec):
self.links.append(item)
def get_ordered_links(self) -> List[LinkSpec]:
return sorted(self.links, key=lambda spec: spec.weight) | PypiClean |
/Madengine_client_messenger-0.0.1-py3-none-any.whl/client/add_contact.py | import sys
from PyQt6.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
from PyQt6.QtCore import Qt
from logs.client_log_config import log
sys.path.append('../')
# Инициализация клиентского логера
logger = log
# Диалог выбора контакта для добавления
class AddContactDialog(QDialog):
def __init__(self, transport, database):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WidgetAttribute.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов
self.possible_contacts_update()
# Назначаем действие на кнопку обновить
self.btn_refresh.clicked.connect(self.update_possible_contacts)
# Заполняем список возможных контактов разницей между всеми пользователями и
def possible_contacts_update(self):
self.selector.clear()
# множества всех контактов и контактов клиента
contacts_list = self.database.contacts_list(self.transport.account_name)
users_cont = set(self.database.user_list_client())
possible_contacts_set = users_cont.difference(contacts_list)
# Удалим сами себя из списка пользователей, чтобы нельзя было добавить самого себя
try:
self.database.user_list_client(self.transport.account_name)[0]
except IndexError:
self.database.user_list_client()
user = self.database.user_list_client(self.transport.account_name)[0]
possible_contacts_set.remove(user)
possible_contact_list = []
# Добавляем список возможных контактов
for item in possible_contacts_set:
possible_contact_list.append(item[1])
self.selector.addItems(sorted(possible_contact_list))
# Обновлялка возможных контактов. Обновляет таблицу известных пользователей,
# затем содержимое предполагаемых контактов
def update_possible_contacts(self):
try:
self.transport.user_list_update()
except OSError:
pass
else:
logger.debug('Обновление списка пользователей с сервера выполнено')
self.possible_contacts_update() | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/gym_compat/multidiscrete_gym_actspace.py |
import copy
import warnings
import numpy as np
from grid2op.Action import ActionSpace
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.gym_compat.utils import (ALL_ATTR,
ATTR_DISCRETE,
check_gym_version,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE)
class __AuxMultiDiscreteActSpace:
"""
This class allows to convert a grid2op action space into a gym "MultiDiscrete". This means that the action are
labeled, and instead of describing the action itself, you provide only its ID.
.. note::
This action space is particularly suited for represented discrete actions.
It is possible to represent continuous actions with it. In that case, the continuous actions are "binarized"
thanks to the :class:`ContinuousToDiscreteConverter`. Feel free to consult its documentation for
more information.
In this case it will extract all the features in all the action with:
- "set_line_status": `n_line` dimensions, each containing 3 choices "DISCONNECT", "DONT AFFECT", "FORCE CONNECTION"
and affecting the powerline status (connected / disconnected)
- "change_line_status": `n_line` dimensions, each containing 2 elements "CHANGE", "DONT CHANGE" and
affecting the powerline status (connected / disconnected)
- "set_bus": `dim_topo` dimensions, each containing 4 choices: "DISCONNECT", "DONT AFFECT", "CONNECT TO BUSBAR 1",
or "CONNECT TO BUSBAR 2" and affecting to which busbar an object is connected
- "change_bus": `dim_topo` dimensions, each containing 2 choices: "CHANGE", "DONT CHANGE" and affect
to which busbar an element is connected
- "redispatch": `sum(env.gen_redispatchable)` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["redispatch"]` (by default 7).
- "curtail": `sum(env.gen_renewable)` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["curtail"]` (by default 7). This is
the "conversion to discrete action"
of the curtailment action.
- "curtail_mw": `sum(env.gen_renewable)` dimensions, completely equivalent to "curtail" for this representation.
This is the "conversion to discrete action" of the curtailment action.
- "set_storage": `n_storage` dimensions, each containing a certain number of choices depending on the value
of the keyword argument `nb_bins["set_storage"]` (by default 7). This is the "conversion to discrete action"
of the action on storage units.
- "raise_alarm": TODO
- "raise_alert": TODO
We offer some extra customization, with the keywords:
- "sub_set_bus": `n_sub` dimension. This type of representation encodes each different possible combination
of elements that are possible at each substation. The choice at each component depends on the element connected
at this substation. Only configurations that will not lead to straight game over will be generated.
- "sub_change_bus": `n_sub` dimension. Same comment as for "sub_set_bus"
- "one_sub_set": 1 single dimension. This type of representation differs from the previous one only by the fact
that each step you can perform only one single action on a single substation (so unlikely to be illegal).
- "one_sub_change": 1 single dimension. Same as above.
.. warning::
We recommend to use either "set" or "change" way to look at things (**ie** either you want to target
a given state -in that case use "sub_set_bus", "line_set_status", "one_sub_set", or "set_bus" __**OR**__ you
prefer
reasoning in terms of "i want to change this or that" in that case use "sub_change_bus",
"line_change_status", "one_sub_change" or "change_bus".
Combining a "set" and "change" on the same element will most likely lead to an "ambiguous action". Indeed
what grid2op can do if you "tell element A to go to bus 1" and "tell the same element A to switch to bus 2 if it was
to 1 and to move to bus 1 if it was on bus 2". It's not clear at all (hence the "ambiguous").
No error will be thrown if you mix this, this is your absolute right, be aware it might not
lead to the result you expect though.
.. note::
The arguments "set_bus", "sub_set_bus" and "one_sub_set" will all perform "set_bus" actions. The only
difference if "how you represent these actions":
- In "set_bus" each component represent a single element of the grid. When you sample an action
with this keyword you will possibly change all the elements of the grid at once (this is likely to
be illega). Nothing prevents you to perform "weird" stuff, for example disconnecting a load or a generator
(which is straight game over) or having a load or a generator that will be "alone" on a busbar (which
will also lead to a straight game over). You can do anything with it, but as always "A great power
comes with a great responsibility".
- In "sub_set_bus" each component represent a substation of the grid. When you sample an action
from this, you will possibly change all the elements of the grid at once (because you can act
on all the substation at the same time). As opposed to "set_bus" however this constraint the action
space to "action that will not lead directly to a game over", in practice.
- In "one_sub_set": the single component represent the whole grid. When you sample an action
with this, you will sample a single action acting on a single substation. You will not be able to act
on multiple substation with this.
For this reason, we also do not recommend using only one of these arguments and only provide
only one of "set_bus", "sub_set_bus" and "one_sub_set". Again, no error will be thrown if you mix them
but be warned that the resulting behaviour might not be what you expect.
.. warning::
The same as above holds for "change_bus", "sub_change_bus" and "one_sub_change": Use only one of these !
.. danger::
The keys `set_bus` and `change_bus` does not have the same meaning between this representation of the
action and the DiscreteActSpace.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`MultiDiscreteActSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`MultiDiscreteActSpaceGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`MultiDiscreteActSpaceLegacyGym`)
- :class:`MultiDiscreteActSpaceGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`MultiDiscreteActSpaceLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
Examples
--------
If you simply want to use it you can do:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, MultiDiscreteActSpace
gym_env = GymEnv(env)
gym_env.action_space = MultiDiscreteActSpace(env.action_space)
You can select the attribute you want to keep, for example:
.. code-block:: python
gym_env.action_space = MultiDiscreteActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail", "sub_set_bus"])
You can also apply some basic transformation when you "discretize" continuous action
.. code-block:: python
gym_env.action_space = MultiDiscreteActSpace(env.observation_space,
attr_to_keep=['redispatch', "curtail", "sub_set_bus"],
nb_bins={"redispatch": 3, "curtail": 17},
)
By default it is "discretized" in 7 different "bins". The more "bins" there will be, the more "precise"
you can be in your control, but the higher the dimension of the action space.
"""
ATTR_CHANGE = 0
ATTR_SET = 1
ATTR_NEEDBUILD = 2
ATTR_NEEDBINARIZED = 3
def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None):
check_gym_version(type(self)._gymnasium)
if not isinstance(grid2op_action_space, ActionSpace):
raise RuntimeError(
f"Impossible to create a BoxGymActSpace without providing a "
f"grid2op action_space. You provided {type(grid2op_action_space)}"
f'as the "grid2op_action_space" attribute.'
)
if nb_bins is None:
nb_bins = {"redispatch": 7, "set_storage": 7, "curtail": 7, "curtail_mw": 7}
if attr_to_keep == ALL_ATTR:
# by default, i remove all the attributes that are not supported by the action type
# i do not do that if the user specified specific attributes to keep. This is his responsibility in
# in this case
attr_to_keep = {
el for el in attr_to_keep if grid2op_action_space.supports_type(el)
}
for el in attr_to_keep:
if el not in ATTR_DISCRETE:
warnings.warn(
f'The class "MultiDiscreteActSpace" should mainly be used to consider only discrete '
f"actions (eg. set_line_status, set_bus or change_bus). Though it is possible to use "
f'"{el}" when building it, be aware that this continuous action will be treated '
f"as discrete by splitting it into bins. "
f'Consider using the "BoxGymActSpace" for these attributes.'
)
self._attr_to_keep = sorted(attr_to_keep)
act_sp = grid2op_action_space
self._act_space = copy.deepcopy(grid2op_action_space)
low_gen = -1.0 * act_sp.gen_max_ramp_down
high_gen = 1.0 * act_sp.gen_max_ramp_up
low_gen[~act_sp.gen_redispatchable] = 0.0
high_gen[~act_sp.gen_redispatchable] = 0.0
# nb, dim, []
self.dict_properties = {
"set_line_status": (
[3 for _ in range(act_sp.n_line)],
act_sp.n_line,
self.ATTR_SET,
),
"change_line_status": (
[2 for _ in range(act_sp.n_line)],
act_sp.n_line,
self.ATTR_CHANGE,
),
"set_bus": (
[4 for _ in range(act_sp.dim_topo)],
act_sp.dim_topo,
self.ATTR_SET,
),
"change_bus": (
[2 for _ in range(act_sp.dim_topo)],
act_sp.dim_topo,
self.ATTR_CHANGE,
),
"raise_alarm": (
[2 for _ in range(act_sp.dim_alarms)],
act_sp.dim_alarms,
self.ATTR_CHANGE,
),
"raise_alert": (
[2 for _ in range(act_sp.dim_alerts)],
act_sp.dim_alerts,
self.ATTR_CHANGE,
),
"sub_set_bus": (
None,
act_sp.n_sub,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"sub_change_bus": (
None,
act_sp.n_sub,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"one_sub_set": (
None,
1,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
"one_sub_change": (
None,
1,
self.ATTR_NEEDBUILD,
), # dimension will be computed on the fly, if the stuff is used
}
self._nb_bins = nb_bins
for el in ["redispatch", "set_storage", "curtail", "curtail_mw"]:
if el in attr_to_keep:
if el not in nb_bins:
raise RuntimeError(
f'The attribute you want to keep "{el}" is not present in the '
f'"nb_bins". This attribute is continuous, you have to specify in how '
f"how to convert it to a discrete space. See the documentation "
f"for more information."
)
nb_redispatch = act_sp.gen_redispatchable.sum()
nb_renew = act_sp.gen_renewable.sum()
if el == "redispatch":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(nb_redispatch)],
nb_redispatch,
self.ATTR_NEEDBINARIZED,
)
elif el == "curtail" or el == "curtail_mw":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(nb_renew)],
nb_renew,
self.ATTR_NEEDBINARIZED,
)
elif el == "set_storage":
self.dict_properties[el] = (
[nb_bins[el] for _ in range(act_sp.n_storage)],
act_sp.n_storage,
self.ATTR_NEEDBINARIZED,
)
else:
raise RuntimeError(f'Unknown attribute "{el}"')
self._dims = None
self._functs = None # final functions that is applied to the gym action to map it to a grid2Op action
self._binarizers = None # contains all the stuff to binarize the data
self._types = None
nvec = self._get_info()
# initialize the base container
type(self)._MultiDiscreteType.__init__(self, nvec=nvec)
@staticmethod
def _funct_set(vect):
# gym encodes:
# for set_bus: 0 -> -1, 1-> 0 (don't change)), 2-> 1, 3 -> 2
# for set_status: 0 -> -1, 1-> 0 (don't change)), 2-> 1 [3 do not exist for set_line_status !]
vect -= 1
return vect
@staticmethod
def _funct_change(vect):
# gym encodes 0 -> False, 1 -> True
vect = vect.astype(dt_bool)
return vect
def _funct_substations(self, orig_act, attr_nm, vect):
"""
Used for "sub_set_bus" and "sub_change_bus"
"""
vect_act = self._sub_modifiers[attr_nm]
for sub_id, act_id in enumerate(vect):
orig_act += vect_act[sub_id][act_id]
def _funct_one_substation(self, orig_act, attr_nm, vect):
"""
Used for "one_sub_set" and "one_sub_change"
"""
orig_act += self._sub_modifiers[attr_nm][int(vect)]
def _get_info(self):
nvec = None
self._dims = []
self._functs = []
self._binarizers = {}
self._sub_modifiers = {}
self._types = []
box_space = None
dim = 0
for el in self._attr_to_keep:
if el in self.dict_properties:
nvec_, dim_, type_ = self.dict_properties[el]
if type_ == self.ATTR_CHANGE:
# I can convert them directly into discrete attributes because it's a
# recognize "change" attribute
funct = self._funct_change
elif type_ == self.ATTR_SET:
# I can convert them directly into discrete attributes because it's a
# recognize "set" attribute
funct = self._funct_set
elif type_ == self.ATTR_NEEDBINARIZED:
# base action was continuous, i need to convert it to discrete action thanks
# to "binarization", that is done automatically here
# from grid2op.gym_compat.box_gym_actspace import BoxGymActSpace
# from grid2op.gym_compat.continuous_to_discrete import (
# ContinuousToDiscreteConverter,
# )
if box_space is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
box_space = type(self)._BoxGymActSpaceType(
self._act_space,
attr_to_keep=[
"redispatch",
"set_storage",
"curtail",
"curtail_mw",
],
)
if el not in box_space._dict_properties:
raise RuntimeError(
f"Impossible to dertmine lowest and maximum value for "
f'key "{el}".'
)
low_, high_, shape_, dtype_ = box_space._dict_properties[el]
tmp_box = type(self)._BoxType(low=low_, high=high_, dtype=dtype_)
tmp_binarizer = type(self)._ContinuousToDiscreteConverterType(
init_space=tmp_box, nb_bins=self._nb_bins[el]
)
self._binarizers[el] = tmp_binarizer
funct = tmp_binarizer.gym_to_g2op
elif type_ == self.ATTR_NEEDBUILD:
# attributes comes from substation manipulation, i need to build the entire space
nvec_ = []
self._sub_modifiers[el] = []
if el == "sub_set_bus":
# one action per substations, using "set"
for sub_id in range(self._act_space.n_sub):
act_this_sub = [self._act_space()]
act_this_sub += (
self._act_space.get_all_unitary_topologies_set(
self._act_space, sub_id=sub_id
)
)
nvec_.append(len(act_this_sub))
self._sub_modifiers[el].append(act_this_sub)
funct = self._funct_substations
elif el == "sub_change_bus":
# one action per substation, using "change"
for sub_id in range(self._act_space.n_sub):
acts_this_sub = [self._act_space()]
acts_this_sub += (
self._act_space.get_all_unitary_topologies_change(
self._act_space, sub_id=sub_id
)
)
nvec_.append(len(acts_this_sub))
self._sub_modifiers[el].append(acts_this_sub)
funct = self._funct_substations
elif el == "one_sub_set":
# an action change only one substation, using "set"
self._sub_modifiers[
el
] = self._act_space.get_all_unitary_topologies_set(
self._act_space
)
funct = self._funct_one_substation
nvec_ = [len(self._sub_modifiers[el])]
elif el == "one_sub_change":
# an action change only one substation, using "change"
self._sub_modifiers[
el
] = self._act_space.get_all_unitary_topologies_change(
self._act_space
)
funct = self._funct_one_substation
nvec_ = [len(self._sub_modifiers[el])]
else:
raise RuntimeError(
f'Unsupported attribute "{el}" when dealing with '
f"action on substation"
)
else:
raise RuntimeError(f"Unknown way to build the action.")
else:
li_keys = "\n\t- ".join(sorted(list(self.dict_properties.keys())))
raise RuntimeError(
f'Unknown action attributes "{el}". Supported attributes are: '
f"\n\t- {li_keys}"
)
dim += dim_
if nvec is not None:
nvec += nvec_
else:
nvec = nvec_
self._dims.append(dim)
self._functs.append(funct)
self._types.append(type_)
return nvec
def _handle_attribute(self, res, gym_act_this, attr_nm, funct, type_):
"""
INTERNAL
TODO
Parameters
----------
res
gym_act_this
attr_nm
Returns
-------
"""
# TODO code that !
vect = 1 * gym_act_this
if type_ == self.ATTR_NEEDBUILD:
funct(res, attr_nm, vect)
else:
tmp = funct(vect)
if attr_nm == "redispatch":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_redispatchable] = tmp
tmp = gym_act_this_
elif attr_nm == "curtail" or attr_nm == "curtail_mw":
gym_act_this_ = np.full(
self._act_space.n_gen, fill_value=np.NaN, dtype=dt_float
)
gym_act_this_[self._act_space.gen_renewable] = tmp
tmp = gym_act_this_
setattr(res, attr_nm, tmp)
return res
def from_gym(self, gym_act):
"""
This is the function that is called to transform a gym action (in this case a numpy array!)
sent by the agent
and convert it to a grid2op action that will be sent to the underlying grid2op environment.
Parameters
----------
gym_act: ``numpy.ndarray``
the gym action
Returns
-------
grid2op_act: :class:`grid2op.Action.BaseAction`
The corresponding grid2op action.
"""
res = self._act_space()
prev = 0
for attr_nm, where_to_put, funct, type_ in zip(
self._attr_to_keep, self._dims, self._functs, self._types
):
if not gym_act.shape or not gym_act.shape[0]:
continue
this_part = 1 * gym_act[prev:where_to_put]
if attr_nm in self.dict_properties:
self._handle_attribute(res, this_part, attr_nm, funct, type_)
else:
raise RuntimeError(f'Unknown attribute "{attr_nm}".')
prev = where_to_put
return res
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Box as LegacyGymBox, MultiDiscrete as LegacyGymMultiDiscrete
from grid2op.gym_compat.box_gym_actspace import BoxLegacyGymActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterLegacyGym
MultiDiscreteActSpaceLegacyGym = type("MultiDiscreteActSpaceLegacyGym",
(__AuxMultiDiscreteActSpace, LegacyGymMultiDiscrete, ),
{"_gymnasium": False,
"_BoxType": LegacyGymBox,
"_MultiDiscreteType": LegacyGymMultiDiscrete,
"_BoxGymActSpaceType": BoxLegacyGymActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterLegacyGym,
"__module__": __name__})
MultiDiscreteActSpaceLegacyGym.__doc__ = __AuxMultiDiscreteActSpace.__doc__
MultiDiscreteActSpace = MultiDiscreteActSpaceLegacyGym
MultiDiscreteActSpace.__doc__ = __AuxMultiDiscreteActSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box, MultiDiscrete
from grid2op.gym_compat.box_gym_actspace import BoxGymnasiumActSpace
from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterGymnasium
MultiDiscreteActSpaceGymnasium = type("MultiDiscreteActSpaceGymnasium",
(__AuxMultiDiscreteActSpace, MultiDiscrete, ),
{"_gymnasium": True,
"_BoxType": Box,
"_MultiDiscreteType": MultiDiscrete,
"_BoxGymActSpaceType": BoxGymnasiumActSpace,
"_ContinuousToDiscreteConverterType": ContinuousToDiscreteConverterGymnasium,
"__module__": __name__})
MultiDiscreteActSpaceGymnasium.__doc__ = __AuxMultiDiscreteActSpace.__doc__
MultiDiscreteActSpace = MultiDiscreteActSpaceGymnasium
MultiDiscreteActSpace.__doc__ = __AuxMultiDiscreteActSpace.__doc__ | PypiClean |
/BenchML-0.3.4.tar.gz/BenchML-0.3.4/benchml/logger.py | import argparse
import os
import subprocess
import sys
import time
import numpy as np
try:
from lxml import etree
except ImportError:
pass
boolean_dict = {
"true": True,
"1": True,
"yes": True,
"false": False,
"0": False,
"no": False,
"none": False,
}
# =============================================================================
# XML WRAPPERS
# =============================================================================
class ExtendableNamespace(argparse.Namespace):
def AddNamespace(self, **kwargs):
for name in kwargs:
att = getattr(self, name, None)
if att is None:
setattr(self, name, kwargs[name])
else:
setattr(self, name, kwargs[name].As(type(att)))
return
def Add(self, name, value):
att = getattr(self, name, None)
if att is None:
setattr(self, name, value)
else:
att.Add(name, value)
return value
def GenerateTreeDict(tree, element, path="", paths_rel_to=None):
if type(element) == etree._Comment:
return [], {}
# Update path
if path == "":
if element.tag != paths_rel_to:
path += element.tag
else:
path += "/" + element.tag
# Containers for lower levels
tag_node = {}
nodes = []
# Construct Node
xmlnode = XmlNode(element, path) # tree.getpath(element))
nodes.append(xmlnode)
if len(element) == 0:
tag_node[path] = xmlnode
# Iterate over children
for child in element:
child_elements, childtag_element = GenerateTreeDict(tree, child, path)
nodes = nodes + child_elements
for key in childtag_element.keys():
if key in tag_node:
if type(tag_node[key]) != list:
tag_node[key] = [tag_node[key], childtag_element[key]]
else:
tag_node[key].append(childtag_element[key])
else:
tag_node[key] = childtag_element[key]
return nodes, tag_node
def NamespaceFromDict(tree_dict):
nspace = ExtendableNamespace()
for key in tree_dict.keys():
sections = key.split("/")
values = [None] * len(sections)
values[-1] = tree_dict[key]
add_to_nspace = nspace
for s, v in zip(sections, values):
if v is None:
if getattr(add_to_nspace, s, None):
add_to_nspace = getattr(add_to_nspace, s, None)
else:
sub_nspace = ExtendableNamespace()
add_to_nspace = add_to_nspace.Add(s, sub_nspace)
else:
add_to_nspace.Add(s, v)
return nspace
class XmlTree(list):
def __init__(self, xmlfile, paths_rel_to=None):
self.xmlfile = xmlfile
self.xtree = etree.parse(xmlfile)
self.xroot = self.xtree.getroot()
self.nodes, self.tag_node = GenerateTreeDict(self.xtree, self.xroot, "", paths_rel_to)
self.xspace = NamespaceFromDict(self.tag_node)
def SelectByTag(self, tag):
selection = [e for e in self.nodes if e.tag == tag]
return selection
def __getitem__(self, key):
return self.tag_node[key]
def keys(self):
return self.tag_node.keys()
class XmlNode(object):
def __init__(self, element, path):
self.path = path
self.node = element
self.tag = element.tag
self.value = element.text
self.attributes = element.attrib
def As(self, typ):
if typ == np.array:
sps = self.value.split()
return typ([float(sp) for sp in sps])
elif typ == bool:
return boolean_dict.get(self.value.lower())
else:
return typ(self.value)
def AsArray(self, typ, sep=" ", rep="\t\n"):
for r in rep:
self.value = self.value.replace(r, sep)
sp = self.value.split(sep)
return [typ(s) for s in sp if str(s) != ""]
def SetNodeValue(self, new_value):
self.value = new_value
if self.node is not None:
self.node.firstChild.nodeValue = new_value
return
def __getitem__(self, key):
return self.node.get(key)
# =============================================================================
# COMMAND LINE & XML INPUT INTERFACE
# =============================================================================
class CLIO_HelpFormatter(argparse.HelpFormatter):
def _format_usage(self, usage, actions, groups, prefix):
# default_usage = super()._format_usage(usage, actions, groups, prefix)
# res = f"{self._prog} : Command Line Interface\n{default_usage}\n"
res = f"{self._prog} : Command Line Interface\n"
return res
class OptionsInterface(object):
def __init__(self):
# COMMAND-LINE ARGUMENTS
self.is_connected_to_cmd_ln = False
self.cmd_ln_args = None
self.cmd_ln_opts = None
self.cmd_ln_nicknames = ["-h"]
self.boolean_translator = boolean_dict
self.subtype = str
# XML OPTIONS FILE
self.is_connected_to_xml = False
self.xmlfile = None
self.tree = None
self.xdict = None
self.xspace = None
# JOINED OPTIONS
self.opts = ExtendableNamespace()
def Connect(self, xmlfile=None):
self.ConnectToCmdLn()
self.ConnectToOptionsFile(xmlfile)
def Parse(self, xkey="options"):
if self.is_connected_to_cmd_ln:
self.ParseCmdLn()
if self.is_connected_to_xml:
self.ParseOptionsFileXml(xkey)
if self.is_connected_to_cmd_ln and not self.is_connected_to_xml:
return self.cmd_ln_opts
elif self.is_connected_to_xml and not self.is_connected_to_cmd_ln:
return self.xspace
else:
return self.cmd_ln_opts, self.xspace
def ParseOptionsFile(self, xmlfile, xkey):
self.xmlfile = xmlfile
self.is_connected_to_xml = True
self.ParseOptionsFileXml(xkey)
return self.xspace
# COMMAND-LINE PARSING
def __call__(self):
return self.cmd_ln_opts
def ConnectToCmdLn(self, prog=sys.argv[0], descr=None):
self.cmd_ln_args = argparse.ArgumentParser(
prog=sys.argv[0],
formatter_class=lambda prog: CLIO_HelpFormatter(prog, max_help_position=70),
)
self.is_connected_to_cmd_ln = True
return
def ParseCmdLn(self):
self.cmd_ln_opts = self.cmd_ln_args.parse_args()
def InterpretAsBoolean(self, expr):
try:
return self.boolean_translator.get(expr.lower())
except KeyError:
raise ValueError("CLIO does not know how to convert %s into a boolean." % expr)
def InterpretAsList(self, expr):
array = [self.subtype(e) for e in expr]
return array
def AddArg(self, name, type=str, nickname=None, default=None, destination=None, help=None):
# Sort out <name> (e.g. --time) vs <destination> (e.g., time)
if "--" != name[0:2]:
dest = name
name = "--" + name
else:
dest = name[2:]
# Sort out <default> vs <required>
if default is None:
required = True
else:
required = False
# Construct default <help> it not given
if help is None:
help = "[type=%s default=%s]" % (repr(type), repr(default))
else:
help = "%s [type=%s, default=%s]" % (
help,
repr(type.__name__ if hasattr(type, "__name__") else str(type)),
repr(default),
)
# Construct <nickname> if not given
if nickname is None:
nickname = "-"
for char in dest:
nickname += char
if nickname not in self.cmd_ln_nicknames:
break
if nickname in self.cmd_ln_nicknames:
raise ValueError("CLIO could not construct nickname from %s option" % name)
self.cmd_ln_nicknames.append(nickname)
# Process type
action = "store"
if type in {int, float, str}:
nargs = None
elif type is bool:
type = self.InterpretAsBoolean
nargs = None
elif type is list:
type = str
nargs = "*"
elif type == "toggle":
if not default:
action = "store_true"
else:
action = "store_false"
self.cmd_ln_args.add_argument(
nickname, name, dest=dest, action=action, default=default, help=help
)
return
elif len(type) == 2 and type[0] == list:
type = type[1]
nargs = "*"
else:
raise NotImplementedError("CLIO does not know how to generate type '%s'" % type)
self.cmd_ln_args.add_argument(
nickname,
name,
dest=dest,
action=action,
nargs=nargs,
required=required,
type=type,
metavar=dest[0:1].upper(),
default=default,
help=help,
)
return
# OPTIONS FILE PARSING
def ConnectToOptionsFile(self, xmlfile):
if xmlfile is None or xmlfile == "":
return
self.xmlfile = xmlfile
self.is_connected_to_xml = True
return
def ParseOptionsFileXml(self, xkey="options"):
if self.xmlfile is None:
return
self.tree = XmlTree(self.xmlfile, paths_rel_to=xkey)
self.xdict = self.tree.tag_node
self.xspace = self.tree.xspace
return
def __getitem__(self, key):
try:
item = self.xspace.__dict__[key]
except KeyError:
try:
item = self.cmd_ln_opts.__dict__[key]
except KeyError:
raise AttributeError(f"No such option registered: '{key}'")
return item
class ShellInterface(object):
def __init__(self):
# PRINTER ATTRIBUTES
self.color_dict = {
"pp": "\033[95m",
"mb": "\033[34m",
"lb": "\033[1;34m",
"my": "\033[1;33m",
"mg": "\033[92m",
"mr": "\033[91m",
"ww": "\033[0;1m",
"ok": "\033[92m",
"xx": "\033[91m",
"warning": "\033[93m",
"error": "\033[95m",
"endcolor": "\033[0;1m",
}
self.justify_dict = {"o": " o ", ".": "... ", "r": "\r", "ro": "\r o "}
self.pp = OS_COLOR("pp")
self.lb = OS_COLOR("lb")
self.mb = OS_COLOR("mb")
self.mg = OS_COLOR("mg")
self.my = OS_COLOR("my")
self.mr = OS_COLOR("mr")
self.ww = OS_COLOR("ww")
self.ok = OS_COLOR("ok")
self.xx = OS_COLOR("xx")
self.colors = [OS_COLOR(c) for c in sorted(self.color_dict.keys())]
self.item = " o "
self.iitem = " - "
self.endl = OS_LINE_CHAR("\n")
self.flush = OS_LINE_CHAR("")
self.back = OS_LINE_CHAR("\r")
self.trail = " "
# LOGGING LEVEL
self.default = LOGLEVEL("info")
self.error = LOGLEVEL("error")
self.warn = LOGLEVEL("warn")
self.info = LOGLEVEL("info")
self.debug = LOGLEVEL("debug")
self.loglevel = LOGLEVEL("info")
self.mssglevel = LOGLEVEL("info")
# CURRENT STYLE SELECTION
self.sel_color = None
self.sel_justify = None
self.sel_header = False
self.sel_trim = "="
# EXE ATTRIBUTES
self.catch = OS_EXE_CATCH()
self.assert_zero = OS_EXE_ASSERT()
self.dev = OS_EXE_DEV("")
self.nodev = OS_EXE_DEV("")
self.devnull = OS_EXE_DEV()
self.devfile = OS_EXE_DEV("")
self.os_exe_get = False
self.os_exe_assert_zero = False
self.os_exe_dev = ""
self.os_exe_verbose = False
# LOGGING
self.verbose = False
self.logfile = None
# DIRECTORY HOPPING
self.paths_visited = [os.getcwd()]
self.exe_root_path = self.paths_visited[0]
self.N_store_paths_visited = 1 + 5
def __call__(self, mssg, c=None, j=None, h=False, t="="):
# c=color, j=justify, h=header, t=trim, u=upper-case
if j:
mssg = self.justify_dict[j] + mssg
if c is not None:
mssg = self.color_dict[c] + mssg + self.color_dict["endcolor"]
if h:
mssg = self.os_generate_header(mssg, trim=t)
# LOGFILE ADAPTOR
def ConnectToFile(self, logfile):
self.logfile = logfile
sys.stdout = open(logfile, "w")
self.devfile = OS_EXE_DEV(" >> {log} 2>> {log}".format(log=logfile))
return
def DisconnectFromFile(self):
if self.logfile is not None:
self.devfile = OS_EXE_DEV("")
self.logfile = None
sys.stdout = sys.__stdout__
else:
pass
return
def setLevel(self, name):
self.loglevel = LOGLEVEL(name)
# PRINTER METHODS
def __lshift__(self, mssg):
if type(mssg) == OS_LINE_CHAR:
if self.mssglevel <= self.loglevel:
# <FLUSH MESSAGE HERE>
sys.stdout.write(str(mssg))
sys.stdout.flush()
self.sel_color = None
self.mssglevel = self.default
return self
elif type(mssg) == OS_COLOR:
self.sel_color = str(mssg)
return self
elif type(mssg) == LOGLEVEL:
self.mssglevel = mssg
return self
if self.mssglevel > self.loglevel:
return self
mssg = str(mssg)
if self.sel_justify is not None:
mssg = self.justify_dict[self.sel_justify] + mssg
mssg += self.trail
if self.sel_color is not None:
mssg = self.color_dict[self.sel_color] + mssg + self.color_dict["endcolor"]
if self.sel_header:
mssg = self.os_generate_header(mssg, trim=self.sel_trim)
# <LOG MESSAGE HERE>
sys.stdout.write(mssg)
return self
def os_print(self, mssg, c=None, j=None, h=False, t="="):
# c=color, j=justify, h=header, t=trim, u=upper-case
if j:
mssg = self.justify_dict[j] + mssg
if c is not None:
mssg = self.color_dict[c] + mssg + self.color_dict["endcolor"]
if h:
mssg = self.os_generate_header(mssg, trim=t)
return
def os_print_config(self, c=None, j=None, h=False, t="=", tl=" "):
self.sel_color = c
self.sel_justify = j
self.sel_header = h
self.sel_trim = t
self.trail = tl
return
def os_print_reset(self):
self.sel_color = None
self.sel_justify = None
self.sel_header = False
self.sel_trim = "="
self.trail = " "
return
@staticmethod
def os_generate_header(title, trim="="):
try:
height, width = os.popen("stty size", "r").read().split()
width = int(width)
leftright = int((width - len(title) - 2) / 2)
except ValueError:
leftright = 40
return trim * leftright + " " + title + " " + trim * leftright
# SYSTEM COMMAND WRAPPER
def __rshift__(self, cmmd):
if type(cmmd) == OS_EXE_CATCH:
self.os_exe_get = True
return self
elif type(cmmd) == OS_EXE_DEV:
self.dev = cmmd
return self
elif type(cmmd) == OS_EXE_ASSERT:
self.os_exe_assert_zero = True
return self
# Redirect command as requested
if not self.os_exe_get:
if str(self.dev) != "":
cmmd += str(self.dev)
self.dev = self.nodev
else:
cmmd += str(self.devfile)
# Execute
if self.debug:
self << self.my << "exe:" << cmmd << endl
if self.os_exe_get:
output = subprocess.getoutput(cmmd)
self.os_exe_get = False
return output
else:
sign = os.system(cmmd)
if self.os_exe_assert_zero:
if str(sign) != "0":
raise RuntimeError("<OSIO> '%s' returned '%s'" % (cmmd, sign))
self.os_exe_assert_zero = False
return sign
# PROGRAM EXIT
def okquit(self, what=""):
if what != "":
self << self.ok << what << self.endl
self.DisconnectFromFile()
sys.exit(0)
def xxquit(self, what=""):
if what != "":
self << self.xx << "ERROR" << what << self.endl
self.DisconnectFromFile()
sys.exit(1)
# DIRECTORY NAVIGATION
def cd(self, d):
# Current working directory, for archiving ... =>
cwd = os.getcwd()
if type(d) == int:
# Change to previously visited path
os.chdir(self.paths_visited[d])
elif type(d) == str:
# Change to path as specified explicitly
os.chdir(d)
else:
raise NotImplementedError
# <= ... previous path
self.paths_visited.append(cwd)
if len(self.paths_visited) > self.N_store_paths_visited:
self.paths_visited.pop(1) # 0 stores root
if self.debug:
self << self.my << "cd: " << os.getcwd() << self.endl
return
def pwd(self):
return self.cwd()
@staticmethod
def cwd():
return os.getcwd()
def root(self):
self.cd(self.exe_root_path)
return
def abspath(self, file):
if not os.path.exists(file):
raise IOError("<osio::abspath> No such item in local directory: '%s'" % file)
return os.path.join(self.cwd(), file)
def mkcd(self, directory):
os.makedirs(directory, exist_ok=True)
self.cd(directory)
return directory
class OS_EXE_DEV(object):
def __init__(self, dev=" > /dev/null 2> /dev/null"):
self.dev = dev
def __str__(self):
return self.dev
class OS_EXE_CATCH(object):
def __init__(self):
self.catch = True
class OS_EXE_ASSERT(object):
def __init__(self):
self.assert_0 = True
class OS_COLOR(object):
def __init__(self, colstr):
self.colstr = colstr
def __str__(self):
return self.colstr
class LOGLEVEL(object):
levels = {"error": 0, "warn": 1, "info": 2, "debug": 3}
def __init__(self, name):
self.name = name
self.rank = self.levels[name]
def __ge__(self, other):
return self.rank >= other.rank
def __gt__(self, other):
return self.rank > other.rank
def __le__(self, other):
return self.rank <= other.rank
class OS_LINE_CHAR(object):
def __init__(self, char):
self.char = char
def __str__(self):
return self.char
class LOGGER(ShellInterface, OptionsInterface):
def __init__(self):
self.debug = False
ShellInterface.__init__(self)
OptionsInterface.__init__(self)
return
@staticmethod
def sleep(dt):
time.sleep(dt)
return
log = LOGGER()
endl = OS_LINE_CHAR("\n")
flush = OS_LINE_CHAR("")
back = OS_LINE_CHAR("\r")
catch = OS_EXE_CATCH()
devnull = OS_EXE_DEV()
Mock = ExtendableNamespace
Args = ExtendableNamespace | PypiClean |
/BaiduSpider-1.0.2.6.tar.gz/BaiduSpider-1.0.2.6/README.md | <!--
*** Thanks for checking out the Best-README-Template. If you have a suggestion
*** that would make this better, please fork the repo and create a pull request
*** or simply open an issue with the tag "enhancement".
*** Thanks again! Now go create something AMAZING! :D
-->
<!-- PROJECT SHIELDS -->
<!--
*** I'm using markdown "reference style" links for readability.
*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).
*** See the bottom of this document for the declaration of the reference variables
*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.
*** https://www.markdownguide.org/basic-syntax/#reference-style-links
-->
[![Contributors][contributors-shield]][contributors-url]
[![Forks][forks-shield]][forks-url]
[![Stargazers][stars-shield]][stars-url]
[![Issues][issues-shield]][issues-url]
[![MIT License][license-shield]][license-url]
<!-- PROJECT LOGO -->
<br />
<p align="center">
<a href="https://github.com/BaiduSpider/BaiduSpider">
<img src="https://baiduspider.github.io/assets/logo.png" alt="Logo" width="80" height="80">
</a>
<h3 align="center">BaiduSpider</h3>
<p align="center">
一个爬取百度的利器
<br />
<span>简体中文</span>
|
<a href="https://github.com/BaiduSpider/BaiduSpider/blob/dev/README-zh-tw.md"><strong>繁體中文</strong></a>
|
<a href="https://github.com/BaiduSpider/BaiduSpider/blob/dev/README-en.md"><strong>English</strong></a>
<br />
<a href="https://baiduspider.github.io/"><strong>快速上手 »</strong></a>
<br />
<br />
<a href="https://baiduspider.github.io/usage/get-started/">查看示例</a>
·
<a href="https://github.com/BaiduSpider/BaiduSpider/issues">报告问题</a>
·
<a href="https://github.com/BaiduSpider/BaiduSpider/issues">请求需求</a>
</p>
</p>
<!-- TABLE OF CONTENTS -->
<details open="open">
<summary>目录</summary>
<ol>
<li>
<a href="#关于本项目">关于本项目</a>
<ul>
<li><a href="#依赖库">依赖库</a></li>
</ul>
</li>
<li>
<a href="#起步">起步</a>
<ul>
<li><a href="#预先条件">预先条件</a></li>
<li><a href="#安装">安装</a></li>
</ul>
</li>
<li><a href="#简单使用">简单使用</a></li>
<li><a href="#项目路线图">项目路线图</a></li>
<li><a href="#项目共建">项目共建</a></li>
<li><a href="#开源协议">开源协议</a></li>
<li><a href="#联系方式">联系方式</a></li>
<li><a href="#免责声明">免责声明</a></li>
<li><a href="#贡献者">贡献者</a></li>
<li><a href="#致谢">致谢</a></li>
</ol>
</details>
<!-- ## 警告
此分支为`dev`分支(开发分支)。该分支的所有代码或功能都可能不稳定,仅供参考。不建议在开源项目中使用此分支的 BaiduSpider。 -->
<!-- ABOUT THE PROJECT -->
## 关于本项目
[![项目截图][product-screenshot]](https://baiduspider.github.io)
搜索引擎是一个十分强大的工具,如果能让其他工具集成搜索引擎的众多强大功能,那么这些工具必将变得更加强大。但目前我没有找到一个可以精准提取搜索引擎搜索结果的开源爬虫。于是,我便编写了这个爬取百度搜索引擎的项目:BaiduSpider。
BaiduSpider 的独特功能:
* 节省提取数据的时间,对于类似深度学习项目的数据模型建立与训练起到了良好的帮助。
* 精准提取数据,并删除广告。
* 搜索结果大而全,支持多种搜索类型,支持多种返回类型。
当然,没有一个项目是完美的。任何一个项目的发展都需要社区的帮助。你可以通过发布 Issue 或提交 PR 来帮助 BaiduSpider 进步!:smile:
一些比较有帮助的文档或工具将在最后的致谢部分中列出。
### 依赖库
一些 BaiduSpider 使用的主要开源依赖库。
* [BeautifulSoup 4](https://www.crummy.com/software/BeautifulSoup/)
* [requests](https://docs.python-requests.org/zh_CN/latest/)
<!-- GETTING STARTED -->
## 起步
为了安装 BaiduSpider,请按照以下几个步骤操作。
### 预先条件
在安装 BaiduSpider 之前,请确保你安装了`Python3.6+`:
```sh
$ python --version
```
若版本小于`3.6.0`,请到[Python官网](https://www.python.org/downloads/)下载并安装 Python。
### 安装
#### 使用`pip`安装
请在命令行中键入:
```sh
$ pip install baiduspider
```
#### 从 GitHub 手动安装
```sh
$ git clone [email protected]:BaiduSpider/BaiduSpider.git
# ...
$ python setup.py install
```
<!-- USAGE EXAMPLES -->
## 简单使用
你可以使用以下代码,通过 BaiduSpider 获取百度的网页搜索结果:
```python
# 导入BaiduSpider
from baiduspider import BaiduSpider
from pprint import pprint
# 实例化BaiduSpider
spider = BaiduSpider()
# 搜索网页
pprint(spider.search_web(query='Python'))
```
_更多样例和配置,请参照[文档](https://baiduspider.github.io)_
<!-- ROADMAP -->
## 项目路线图
请参考 [Opening Issues](https://github.com/BaiduSpider/BaiduSpider/issues) 以获取最新的项目规划以及已知问题。
<!-- CONTRIBUTING -->
## 项目共建
社区的贡献是开源项目的灵魂所在,也是整个开源社区学习、交流、获得灵感的方式。我们**极力欢迎**任何人参与本项目的开发与维护。
具体参与步骤如下:
1. Fork 此项目
2. 创建 Feature 分支 (`git checkout -b NewFeatures`)
3. 在每次修改代码后,提交你的更改 (`git commit -m 'Add some AmazingFeature'`)
4. 将更改推送到自己的远程仓库 (`git push origin username/BaiduSpider`)
5. 在 GitHub 上打开你的仓库,根据指引提交 PR
<!-- LICENSE -->
## 开源协议
本项目基于`GPL-V3`开源,详情请参见`LICENSE`。
<!-- CONTACT -->
## 联系方式
samzhangjy - [@samzhangjy](https://twitter.com/samzhangjy) - [email protected]
项目链接:[https://github.com/BaiduSpider/BaiduSpider](https://github.com/BaiduSpider/BaiduSpider)
## 免责声明
此项目仅作为学习用途,不可商用或用于爬取百度大量数据。此外,本项目使用`GPL-V3`版权协议,意味着涉及(使用)此项目的任何其它项目必须开源且注明出处,并且本项目作者不承担滥用导致的任何法律风险。特此说明,违者后果自负。
## 贡献者
<a href="https://github.com/baiduspider/baiduspider/graphs/contributors">
<img src="https://contrib.rocks/image?repo=baiduspider/baiduspider" />
</a>
<!-- ACKNOWLEDGEMENTS -->
## 致谢
* [BeautifulSoup 4](https://www.crummy.com/software/BeautifulSoup/)
* [Requests](https://docs.python-requests.org/zh_CN/latest/)
* [Img Shields](https://shields.io)
* [Gitmoji](https://gitmoji.dev/)
* [Best-README-Template](https://github.com/othneildrew/Best-README-Template)
* [Choose an Open Source License](https://choosealicense.com)
* [GitHub Pages](https://pages.github.com)
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/BaiduSpider/BaiduSpider?style=for-the-badge
[contributors-url]: https://github.com/BaiduSpider/BaiduSpider/graphs/contributors
[forks-shield]: https://img.shields.io/github/forks/BaiduSpider/BaiduSpider?style=for-the-badge
[forks-url]: https://github.com/BaiduSpider/BaiduSpider/network/members
[stars-shield]: https://img.shields.io/github/stars/BaiduSpider/BaiduSpider?style=for-the-badge
[stars-url]: https://github.com/BaiduSpider/BaiduSpider/stargazers
[issues-shield]: https://img.shields.io/github/issues/BaiduSpider/BaiduSpider?style=for-the-badge
[issues-url]: https://github.com/BaiduSpider/BaiduSpider/issues
[license-shield]: https://img.shields.io/github/license/BaiduSpider/BaiduSpider?style=for-the-badge
[license-url]: https://github.com/BaiduSpider/BaiduSpider/blob/master/LICENSE
[product-screenshot]: https://i.loli.net/2021/04/22/V7gGrmTDlfR5U24.png | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/SpliceEnricher.py | import os, sys, string, getopt
import math, numpy
from scipy import stats
import warnings
import time
def importFile(filename,convertToFloat=False):
db={}
firstRow=True
dataset_max=0
for line in open(filename,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
uid = t[0]
if firstRow: samples = t[1:]; firstRow=False
if len(t)>1:
values = t[1:]
if convertToFloat:
try:
values = map(float,values)
max_val = max(values)
if max_val > dataset_max:
dataset_max = max_val
except:
continue ### Header row
else:
values = t[1] ### Just store the gene symbol
else:
values = uid
db[uid]=values
if convertToFloat:
if dataset_max>100: ### Data is not log2
print 'Converting gene expression data to log2 values'
for uid in db:
db[uid] = map(lambda x: math.log(x+1,2), db[uid])
print 'Imported %d gene expression rows' % len(db)
return db, samples
else:
print 'Imported %d splicing factors' % len(db)
return db
def importPSIData(PSI_dir,samples):
"""Import PSI data from either EventAnnotation or PSI value file"""
firstRow=True
PSI_data_db={}
for line in open(PSI_dir,'rU').xreadlines():
data = line.rstrip()
PSI_data = string.split(data,'\t')
if firstRow:
data = string.replace(data,'.bed','')
PSI_data = string.split(data,'\t')
header_row = PSI_data
if 'ProteinPredictions' in PSI_data:
data_index = PSI_data.index('EventAnnotation')+1
uid_index = PSI_data.index('UID')
else:
uid_index = 0
data_index = 1
psi_samples = PSI_data[data_index:]
if psi_samples != samples:
print 'Error: The gene expression sample order does not match the PSI. Exiting';sys.exit()
else:
print 'Confirmed: The sample order of the gene expression and splicing files match.'
firstRow=False
else:
if len(PSI_data) != len(header_row):
empty_offset = len(header_row)-len(PSI_data)
PSI_data+=['']*empty_offset
junctionID = PSI_data[uid_index]
PSI_data = PSI_data[data_index:]
try:
values = map(lambda x: float(x), PSI_data)
except Exception:
values=[]
for value in PSI_data:
try: values.append(float(value))
except:
values.append(0.000101) ### Missing value
values = numpy.ma.masked_values(values,0.000101)
PSI_data_db[junctionID]=values
print 'Imported %d splicing event rows' % len(PSI_data_db)
return PSI_data_db
def findcorrelations(SF_dir, PSI_dir, exp_dir, output_dir, PearsonCutoff):
print ''
### Import the list of splicing factors or other genes of interest
genesToExamine = importFile(SF_dir)
### Import the tab-delimited gene expression matrix
geneExpression_db, samples = importFile(exp_dir,convertToFloat=True)
### Import the PSI data
PSI_data_db = importPSIData(PSI_dir,samples)
### Create an export directory
results_dir = output_dir+'/SFCorrelations_rho-'+str(PearsonCutoff)
try: os.mkdir(results_dir)
except: pass
eo=open(output_dir+'/SFCorrelations/SF_correlations.txt','w')
eo.write('Splicing Factor'+'\t'+'Events Count'+'\n')
counter=0
gene_correlation_time = []
for gene in genesToExamine:
gene_name = genesToExamine[gene]
if gene in geneExpression_db:
start_time = time.time()
### Hence, the gene is a splicing factor
expression_values = geneExpression_db[gene]
Corrsflist=[]
count=0
for junctionID in PSI_data_db:
psi_values = PSI_data_db[junctionID]
if 0.000101 in psi_values:
coefr=numpy.ma.corrcoef(expression_values,psi_values)
rho = coefr[0][1]
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning)
rho,p = stats.pearsonr(expression_values,psi_values)
if abs(rho)>PearsonCutoff:
count+=1
Corrsflist.append([junctionID,rho])
gene_correlation_time.append(time.time()-start_time)
eo.write(gene_name+'\t'+str(count)+'\n')
filename=results_dir+"/"+gene_name+"_"+str(count)+".txt"
if count>20:
eg=open(filename,"w")
eg.write("SplicingEvent\tSystemCode\tPearsonRho\n")
for (junctionID,rho) in Corrsflist:
eg.write(junctionID+"\t"+"Ae\t"+str(rho)+"\n")
eg.close()
counter+=1
print '*',
print '\n...Correlations obtained on average of %d seconds/gene' % numpy.mean(gene_correlation_time)
def performEventEnrichment(output_dir,eventDir,species):
"""Import significant splicing events from metaDataAnalysis.py comparisons and test for their
statistical enrichmet relative to the Splicing Factor correlated events."""
import collections
import mappfinder
event_db = collections.OrderedDict()
import UI
### Import the splice-ICGS significant splicing events per signature
files = UI.read_directory(eventDir)
for file in files:
if '.txt' in file and 'PSI.' in file:
ls=[]
event_db[file[:-4]]=ls ### This list is subsequently updated below
fn = eventDir+'/'+file
firstLine = True
for line in open(fn,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if firstLine:
event_index = t.index('Event-Direction')
firstLine= False
continue
uid = t[0]
if 'U2AF1-like' in file:
if t[1] == "inclusion":
ls.append(uid) #ls.append((uid,t[event_index]))
else:
ls.append(uid) #ls.append((uid,t[event_index]))
### Import the splicing-factor correlated splicing events to identify associated signatures
splicing_factor_correlated_scores={}
gene_to_symbol=None
files = UI.read_directory(output_dir)
for file in files:
if '.txt' in file and '_' in file:
R_ls=[]
if 'ENS' in file:
splicing_factor = file[:-4]
if gene_to_symbol==None: ### Import only once
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
sf = 'ENS'+string.split(splicing_factor,'ENS')[1]
splicing_factor = string.split(sf,'_')[0]
if splicing_factor in gene_to_symbol:
splicing_factor = gene_to_symbol[splicing_factor][0]
else:
splicing_factor = string.split(file[:-4],'_')[0]
fn = output_dir+'/'+file
firstLine = True
for line in open(fn,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
event = t[0]
R_ls.append(event)
R=len(R_ls)
N=80000
for signature in event_db:
n_ls=event_db[signature]
n = len(n_ls)
r_ls=set(R_ls).intersection(n_ls)
r = len(r_ls)
### Calculate a Z-score
try: z = Zscore(r,n,N,R)
except ZeroDivisionError: z = 0.0000
### Calculate a Z-score assuming zero matching entries
try: null_z = Zscore(0,n,N,R)
except ZeroDivisionError: null_z = 0.000
### Calculate a Fischer's Exact P-value
pval = mappfinder.FishersExactTest(r,n,R,N)
### Store these data in an object
zsd = mappfinder.ZScoreData(signature,r,n,z,null_z,n)
zsd.SetP(pval)
zsd.setAssociatedIDs(r_ls)
#print splicing_factor,'\t', signature,'\t', z, pval;sys.exit()
if splicing_factor in splicing_factor_correlated_scores:
signature_db = splicing_factor_correlated_scores[splicing_factor]
signature_db[signature]=zsd ### Necessary format for the permutation function
else:
signature_db={signature:zsd}
splicing_factor_correlated_scores[splicing_factor] = signature_db
results_dir = output_dir+'/SFEnrichmentResults'
result_file = results_dir+'/SF-correlated_SignatureScores.txt'
try: os.mkdir(results_dir)
except: pass
eo=open(result_file,'w')
eo.write(string.join(['Splicing Factor','Signature', 'Number Changed', 'Number Measured', 'Z-score','FisherExactP','AdjustedP'],'\t')+'\n') #'Events'
### Perform a permutation analysis to get BH adjusted p-values
for splicing_factor in splicing_factor_correlated_scores:
sorted_results=[]
signature_db = splicing_factor_correlated_scores[splicing_factor]
### Updates the adjusted p-value instances
mappfinder.adjustPermuteStats(signature_db)
for signature in signature_db:
zsd = signature_db[signature]
if float(zsd.ZScore())>1.96 and float(zsd.Changed())>2 and float(zsd.PermuteP())<0.05:
enriched_SFs={}
results = [splicing_factor,signature, zsd.Changed(), zsd.Measured(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP()] #string.join(zsd.AssociatedIDs(),'|')
sorted_results.append([float(zsd.PermuteP()),results])
sorted_results.sort() ### Sort by p-value
for (p,values) in sorted_results:
eo.write(string.join(values,'\t')+'\n')
if len(sorted_results)==0:
eo.write(string.join([splicing_factor,'NONE','NONE','NONE','NONE','NONE','NONE'],'\t')+'\n')
eo.close()
def Zscore(r,n,N,R):
"""where N is the total number of events measured:
R is the total number of events meeting the criterion:
n is the total number of events in this specific reference gene-set:
r is the number of events meeting the criterion in the examined reference gene-set: """
N=float(N) ### This bring all other values into float space
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1))))
return z
if __name__ == '__main__':
try:
import multiprocessing as mlp
mlp.freeze_support()
except Exception:
mpl = None
################ Default Variables ################
species = 'Hs'
platform = "RNASeq"
useMulti = False
output_dir = None
eventDir = None
PSI_dir = None
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a tab-delimited input expression file in the command-line"
print 'Example: SpliceEnricher.py --PSI "/Data/PSI_data.txt" --geneExp "/Data/GeneExp_data.txt" --geneList "/Data/SplicingFactors.txt" --rho 0.5'
else:
try:
options, remainder = getopt.getopt(sys.argv[1:],'', ['PSI=','species=','o=','platform=','useMulti=',
'geneExp=','geneList=','rho=','eventDir='])
except Exception,e:
print "Error",e
for opt, arg in options:
if opt == '--PSI': PSI_dir=arg
elif opt == '--geneExp': exp_dir=arg
elif opt == '--geneList': SF_dir=arg
elif opt == '--species': species=arg
elif opt == '--o': output_dir=arg
elif opt == '--platform': platform=arg
elif opt == '--rho': PearsonCutoff=float(arg)
elif opt == '--eventDir': eventDir=arg
if output_dir==None:
output_dir = string.replace(PSI_dir,'\\','/')
output_dir = string.join(string.split(output_dir,'/')[:-1],'/')
if PSI_dir !=None:
findcorrelations(SF_dir, PSI_dir, exp_dir, output_dir, PearsonCutoff)
if eventDir !=None:
performEventEnrichment(output_dir,eventDir,species) | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/plugins/link/plugin.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.add( 'link',
{
init : function( editor )
{
// Add the link and unlink buttons.
editor.addCommand( 'link', new CKEDITOR.dialogCommand( 'link' ) );
editor.addCommand( 'anchor', new CKEDITOR.dialogCommand( 'anchor' ) );
editor.addCommand( 'unlink', new CKEDITOR.unlinkCommand() );
editor.ui.addButton( 'Link',
{
label : editor.lang.link.toolbar,
command : 'link'
} );
editor.ui.addButton( 'Unlink',
{
label : editor.lang.unlink,
command : 'unlink'
} );
editor.ui.addButton( 'Anchor',
{
label : editor.lang.anchor.toolbar,
command : 'anchor'
} );
CKEDITOR.dialog.add( 'link', this.path + 'dialogs/link.js' );
CKEDITOR.dialog.add( 'anchor', this.path + 'dialogs/anchor.js' );
// Add the CSS styles for anchor placeholders.
editor.addCss(
'img.cke_anchor' +
'{' +
'background-image: url(' + CKEDITOR.getUrl( this.path + 'images/anchor.gif' ) + ');' +
'background-position: center center;' +
'background-repeat: no-repeat;' +
'border: 1px solid #a9a9a9;' +
'width: 18px !important;' +
'height: 18px !important;' +
'}\n' +
'a.cke_anchor' +
'{' +
'background-image: url(' + CKEDITOR.getUrl( this.path + 'images/anchor.gif' ) + ');' +
'background-position: 0 center;' +
'background-repeat: no-repeat;' +
'border: 1px solid #a9a9a9;' +
'padding-left: 18px;' +
'}'
);
// Register selection change handler for the unlink button.
editor.on( 'selectionChange', function( evt )
{
/*
* Despite our initial hope, document.queryCommandEnabled() does not work
* for this in Firefox. So we must detect the state by element paths.
*/
var command = editor.getCommand( 'unlink' ),
element = evt.data.path.lastElement && evt.data.path.lastElement.getAscendant( 'a', true );
if ( element && element.getName() == 'a' && element.getAttribute( 'href' ) )
command.setState( CKEDITOR.TRISTATE_OFF );
else
command.setState( CKEDITOR.TRISTATE_DISABLED );
} );
editor.on( 'doubleclick', function( evt )
{
var element = CKEDITOR.plugins.link.getSelectedLink( editor ) || evt.data.element;
if ( element.is( 'a' ) )
evt.data.dialog = ( element.getAttribute( 'name' ) && !element.getAttribute( 'href' ) ) ? 'anchor' : 'link';
else if ( element.is( 'img' ) && element.getAttribute( '_cke_real_element_type' ) == 'anchor' )
evt.data.dialog = 'anchor';
});
// If the "menu" plugin is loaded, register the menu items.
if ( editor.addMenuItems )
{
editor.addMenuItems(
{
anchor :
{
label : editor.lang.anchor.menu,
command : 'anchor',
group : 'anchor'
},
link :
{
label : editor.lang.link.menu,
command : 'link',
group : 'link',
order : 1
},
unlink :
{
label : editor.lang.unlink,
command : 'unlink',
group : 'link',
order : 5
}
});
}
// If the "contextmenu" plugin is loaded, register the listeners.
if ( editor.contextMenu )
{
editor.contextMenu.addListener( function( element, selection )
{
if ( !element || element.isReadOnly() )
return null;
var isAnchor = ( element.is( 'img' ) && element.getAttribute( '_cke_real_element_type' ) == 'anchor' );
if ( !isAnchor )
{
if ( !( element = CKEDITOR.plugins.link.getSelectedLink( editor ) ) )
return null;
isAnchor = ( element.getAttribute( 'name' ) && !element.getAttribute( 'href' ) );
}
return isAnchor ?
{ anchor : CKEDITOR.TRISTATE_OFF } :
{ link : CKEDITOR.TRISTATE_OFF, unlink : CKEDITOR.TRISTATE_OFF };
});
}
},
afterInit : function( editor )
{
// Register a filter to displaying placeholders after mode change.
var dataProcessor = editor.dataProcessor,
dataFilter = dataProcessor && dataProcessor.dataFilter;
if ( dataFilter )
{
dataFilter.addRules(
{
elements :
{
a : function( element )
{
var attributes = element.attributes;
if ( attributes.name && !attributes.href )
return editor.createFakeParserElement( element, 'cke_anchor', 'anchor' );
}
}
});
}
},
requires : [ 'fakeobjects' ]
} );
CKEDITOR.plugins.link =
{
/**
* Get the surrounding link element of current selection.
* @param editor
* @example CKEDITOR.plugins.link.getSelectedLink( editor );
* @since 3.2.1
* The following selection will all return the link element.
* <pre>
* <a href="#">li^nk</a>
* <a href="#">[link]</a>
* text[<a href="#">link]</a>
* <a href="#">li[nk</a>]
* [<b><a href="#">li]nk</a></b>]
* [<a href="#"><b>li]nk</b></a>
* </pre>
*/
getSelectedLink : function( editor )
{
var range;
try
{
range = editor.getSelection().getRanges( true )[ 0 ];
range.shrink( CKEDITOR.SHRINK_TEXT );
var root = range.getCommonAncestor();
return root.getAscendant( 'a', true );
}
catch( e ) { return null; }
}
};
CKEDITOR.unlinkCommand = function(){};
CKEDITOR.unlinkCommand.prototype =
{
/** @ignore */
exec : function( editor )
{
/*
* execCommand( 'unlink', ... ) in Firefox leaves behind <span> tags at where
* the <a> was, so again we have to remove the link ourselves. (See #430)
*
* TODO: Use the style system when it's complete. Let's use execCommand()
* as a stopgap solution for now.
*/
var selection = editor.getSelection(),
bookmarks = selection.createBookmarks(),
ranges = selection.getRanges(),
rangeRoot,
element;
for ( var i = 0 ; i < ranges.length ; i++ )
{
rangeRoot = ranges[i].getCommonAncestor( true );
element = rangeRoot.getAscendant( 'a', true );
if ( !element )
continue;
ranges[i].selectNodeContents( element );
}
selection.selectRanges( ranges );
editor.document.$.execCommand( 'unlink', false, null );
selection.selectBookmarks( bookmarks );
},
startDisabled : true
};
CKEDITOR.tools.extend( CKEDITOR.config,
{
linkShowAdvancedTab : true,
linkShowTargetTab : true
} ); | PypiClean |
/Canto-curses-0.9.9.tar.gz/Canto-curses-0.9.9/canto_curses/main.py |
CANTO_PROTOCOL_COMPATIBLE = 0.9
from canto_next.client import CantoClient
from canto_next.plugins import try_plugins, set_program
from canto_next.rwlock import alllocks
from canto_next.hooks import call_hook
from .config import config, finalize_eval_settings
from .tagcore import tag_updater, alltagcores
from .gui import CantoCursesGui, GraphicalLog
from threading import Thread
from queue import Queue
import logging
logging.basicConfig(
format = "%(asctime)s : %(name)s -> %(message)s",
datefmt = "%H:%M:%S",
level = logging.INFO
)
log = logging.getLogger("CANTO-CURSES")
import traceback
import locale
import getopt
import signal
import errno
import fcntl
import time
import sys
import os
# It's the CantoCurses class' responsibility to provide the subsequent Gui
# object with a solid foundation with other components. This includes parsing
# command line arguments, starting a canto-daemon instance if necessary, signal
# handling, and wrapping the socket communication.
class CantoCurses(CantoClient):
def init(self):
# For good curses behavior.
locale.setlocale(locale.LC_ALL, '')
# Used for GUI-signalled death.
self.pid = os.getpid()
self.done = False
# Whether or not to append pid to logfile
# (debug option)
self.log_fname_pid = False
version = "canto-curses " + VERSION + " " + GIT_HASH
optl = self.common_args('hl', ["help"], version)
if optl == -1:
sys.exit(-1)
if self.args(optl):
sys.exit(-1)
rootlog = logging.getLogger()
rootlog.setLevel(max(rootlog.level - 10 * self.verbosity,0))
self.glog_handler = GraphicalLog()
try:
if self.port < 0:
# If we're running locally, ensure daemon is running
self.start_daemon()
CantoClient.__init__(self, self.socket_path)
else:
CantoClient.__init__(self, None,\
port = self.port, address = self.addr)
except Exception as e:
log.error("Error: %s" % e)
sys.exit(-1)
# __init__ above started one connection, start another
# for priority stuff.
self.connect()
# Make sure we have permissions on the relevant, non-daemon files in
# the target directory (None of these will be used until we set_log)
if self.ensure_paths():
sys.exit(-1)
self.set_log()
log.info(version)
# Evaluate anything in the target /plugins directory.
set_program("canto-curses")
self.plugin_errors = try_plugins(self.conf_dir, self.plugin_default, self.disabled_plugins,
self.enabled_plugins)
def print_help(self):
print("USAGE: canto-curses [options]")
print("\t-h/--help\tThis help")
print("\t-V/--version\tPrint version")
print("\t-v/\t\tVerbose logging (for debug)")
print("\t-D/--dir <dir>\tSet configuration directory.")
print("\t-l\t\tAppend pid to log file name")
print("\nPlugin control\n")
print("\t--noplugins\t\t\t\tDisable plugins")
print("\t--enableplugins 'plugin1 plugin2...'\tEnable single plugins (overrides --noplugins)")
print("\t--disableplugins 'plugin1 plugin2...'\tDisable single plugins")
print("\nNetwork control\n")
print("NOTE: These should be used in conjunction with SSH port forwarding to be secure\n")
print("\t-a/--address <IP>\tConnect to this address")
print("\t-p/--port <port>\tConnect to this port")
def args(self, optlist):
for opt, arg in optlist:
if opt in ["-h", "--help"]:
self.print_help()
return 1
elif opt in ["-l"]:
self.log_fname_pid = True
return 0
def winch(self, a = None, b = None):
if self.gui.alive:
self.gui.winch()
def sigusr1(self, a = None, b = None):
import threading
held_locks = {}
code = {}
curthreads = threading.enumerate()
for threadId, stack in sys._current_frames().items():
name = str(threadId)
for ct in curthreads:
if ct.ident == threadId:
name = ct.name
code[name] = ["NAME: %s" % name]
for filename, lineno, fname, line in traceback.extract_stack(stack):
code[name].append('FILE: "%s", line %d, in %s' % (filename, lineno, fname))
if line:
code[name].append(" %s" % (line.strip()))
held_locks[name] = ""
for lock in alllocks:
if lock.writer_id == threadId:
held_locks[name] += ("%s(w)" % lock.name)
continue
for reader_id, reader_stack in lock.reader_stacks:
if reader_id == threadId:
held_locks[name] += ("%s(r)" % lock.name)
for k in code:
log.info('\n\nLOCKS: %s \n%s' % (held_locks[k], '\n'.join(code[k])))
log.info("\n\nSTACKS:")
for lock in alllocks:
for (reader_id, reader_stack) in lock.reader_stacks:
log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
log.info("Lock reader (thread %s):" % (reader_id,))
log.info(''.join(reader_stack))
for writer_stack in lock.writer_stacks:
log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
log.info("Lock writer (thread %s):" % (lock.writer_id,))
log.info(''.join(writer_stack))
log.info("VARS: %s" % config.vars)
log.info("OPTS: %s" % config.config)
def child(self, a = None, b = None):
try:
while True:
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
break
log.debug("CHLD %d has died: %d", pid, status)
except Exception as e:
if e.errno == errno.ECHILD:
log.debug("CHLD no children?")
else:
raise
def run(self):
# We want this as early as possible
signal.signal(signal.SIGUSR1, self.sigusr1)
# Get config from daemon
if not config.init(self, CANTO_PROTOCOL_COMPATIBLE):
print("Invalid daemon version")
print("Wanted: %s" % CANTO_PROTOCOL_COMPATIBLE)
print("Got: %s" % config.version)
sys.exit(-1)
else:
log.info("Version check passed: %s" % CANTO_PROTOCOL_COMPATIBLE)
# Create Tags for each TagCore
self.gui = CantoCursesGui(self, self.glog_handler)
tag_updater.init(self)
# Initial signal setup.
signal.signal(signal.SIGWINCH, self.winch)
signal.signal(signal.SIGCHLD, self.child)
finalize_eval_settings()
call_hook("curses_start", [])
if self.plugin_errors:
log.error("The following error occurred loading plugins:\n\n%s" % self.plugin_errors)
while self.gui.alive:
self.gui.tick()
time.sleep(1)
def ensure_paths(self):
if os.path.exists(self.conf_dir):
if not os.path.isdir(self.conf_dir):
log.error("Error: %s is not a directory." % self.conf_dir)
return -1
if not os.access(self.conf_dir, os.R_OK):
log.error("Error: %s is not readable." % self.conf_dir)
return -1
if not os.access(self.conf_dir, os.W_OK):
log.error("Error: %s is not writable." % self.conf_dir)
return -1
else:
try:
os.makedirs(self.conf_dir)
except Exception as e:
log.error("Exception making %s : %s" % (self.conf_dir, e))
return -1
return self.ensure_files()
def ensure_files(self):
logname = "curses-log"
if self.log_fname_pid:
logname += ".%d" % os.getpid()
for f in [ logname ] :
p = self.conf_dir + "/" + f
if os.path.exists(p):
if not os.path.isfile(p):
log.error("Error: %s is not a file." % p)
return -1
if not os.access(p, os.R_OK):
log.error("Error: %s is not readable." % p)
return -1
if not os.access(p, os.W_OK):
log.error("Error: %s is not writable." % p)
return -1
self.log_path = self.conf_dir + "/" + logname
def set_log(self):
f = open(self.log_path, "w")
os.dup2(f.fileno(), sys.stderr.fileno())
def start(self):
try:
self.init()
self.run()
except KeyboardInterrupt:
pass
except Exception as e:
tb = traceback.format_exc()
log.error("Exiting on exception:")
log.error("\n" + "".join(tb))
call_hook("curses_exit", [])
log.info("Exiting.")
sys.exit(0)
def __init__(self):
self.start() | PypiClean |
/JUC-0.0.5.tar.gz/JUC-0.0.5/README.md | # JUC
JUC is a simple Unicode based text crypter
note: *You can find all the examples in the folder `examples`*
# Installation
For now this project is not on [pypi](https://pypi.org) ( pip ), so you have to install it manually by downloading the repo
# Preview
> ### File Encrypter
> 
> ### Original and decrypted file differences
> 
> The quality of the original file and the decrypted file is equal :)
>
> ### Text Crypter
> 
# Examples
> ### Encrypting a text
> ```py
> from JUC import *
> worker = Juc('YourSecretKey')
> print(worker.crypt(b'ehy, hello there'))
> ```
> ### Decrypting a text
> ```py
> from JUC import *
> worker = Juc('YourSecretKey')
> print(worker.decrypt(text).decode())
> ```
> ### Encrypting a file
> ```py
> from JUC import *
>
> worker = Juc('YourSecretKey')
>
> filePath = 'image.png'
>
> with open(f'result.png', 'wb') as f:
> with open(filePath, 'rb') as file:
> content = file.read()
> crypted = worker.crypt(content)
> f.write(crypted.encode())
> ```
> ### Decrypting a file
> ```py
> from JUC import *
>
> worker = Juc('YourSecretKey')
>
> filePath = 'result.png'
> fileType = filePath.split('.')[-1]
>
> with open(filePath, 'r') as file:
> content = file.read()
> with open(f'result-decrypted.{fileType}', 'wb') as f:
> decrypted = worker.decrypt(content, False)
> f.write(decrypted)
> ``` | PypiClean |
/CosmoTech-SupplyChain-5.1.0.tar.gz/CosmoTech-SupplyChain-5.1.0/Supplychain/Generic/adt_writer.py | from Supplychain.Generic.timer import Timer
import os
import uuid
from azure.identity import DefaultAzureCredential
from azure.digitaltwins.core import DigitalTwinsClient
class ADTWriter(Timer):
def send_twin(self, twin: dict) -> dict:
digital_twin_client = DigitalTwinsClient(os.environ["AZURE_DIGITAL_TWINS_URL"], self.azure_credentials)
new_item = dict()
for key in twin.keys():
if type(twin[key]) is not dict or twin[key]:
new_item[key] = twin[key]
new_item.setdefault('$id', str(uuid.uuid1()))
item_id = twin['$id'].replace(" ", "")
return digital_twin_client.upsert_digital_twin(item_id,
new_item)
def send_relation(self, relation: dict) -> dict:
digital_twin_client = DigitalTwinsClient(os.environ["AZURE_DIGITAL_TWINS_URL"], self.azure_credentials)
relation['$sourceId'] = relation['$sourceId'].replace(" ", "")
relation['$targetId'] = relation['$targetId'].replace(" ", "")
relation.setdefault('$relationshipId', str(uuid.uuid1()))
relation['$relationshipId'] = relation['$relationshipId'].replace(" ", "")
new_item = dict()
for key in relation.keys():
if type(relation[key]) is not dict or relation[key]:
new_item[key] = relation[key]
return digital_twin_client.upsert_relationship(relation['$sourceId'],
relation['$relationshipId'],
new_item)
def send_items(self,
items: list):
self.reset()
for item in items:
_ = self.send_twin(item) if '$sourceId' not in item else self.send_relation(item)
self.display_message(f"Sent {len(items)} items in " + "{time_since_start:6.4f}s")
def purge_adt(self, query: str = 'SELECT * FROM digitaltwins', delete_relation: bool = True):
digital_twin_client = DigitalTwinsClient(os.environ["AZURE_DIGITAL_TWINS_URL"], self.azure_credentials)
self.display_message("Querying twins")
twin_list = []
for item in digital_twin_client.query_twins(query):
twin_id = str(item['$dtId'])
twin_list.append(twin_id)
current_length = len(twin_list)
if current_length % 100 == 0:
self.display_message(f"Found {current_length} twins")
self.display_message(f"Found a total of {len(twin_list)} twins")
if delete_relation:
self.display_message("Deleting relationships")
for twin_id in twin_list:
for relation in digital_twin_client.list_relationships(twin_id):
relation_id = relation['$relationshipId']
digital_twin_client.delete_relationship(twin_id, relation_id)
self.display_message("Deleting twins")
for twin_id in twin_list:
digital_twin_client.delete_digital_twin(twin_id)
self.display_message("Purge complete")
def __init__(self, force_purge: bool = False):
Timer.__init__(self, "[ADT Writer]")
self.azure_credentials = DefaultAzureCredential()
if force_purge:
self.display_message("Forcing purge of ADT")
self.purge_adt() | PypiClean |
/ConferenceCorpus-0.1.1.tar.gz/ConferenceCorpus-0.1.1/corpus/utils/download.py | import os
import urllib
import gzip
import shutil
import time
class Download:
'''
Utility functions for downloading data
'''
@staticmethod
def getURLContent(url:str):
with urllib.request.urlopen(url) as urlResponse:
content = urlResponse.read().decode()
return content
@staticmethod
def getFileContent(path:str):
with open(path, "r") as file:
content = file.read()
return content
@staticmethod
def needsDownload(filePath:str,force:bool=False)->bool:
'''
check if a download of the given filePath is necessary that is the file
does not exist has a size of zero or the download should be forced
Args:
filePath(str): the path of the file to be checked
force(bool): True if the result should be forced to True
Return:
bool: True if a download for this file needed
'''
if not os.path.isfile(filePath):
result=True
else:
stats=os.stat(filePath)
size=stats.st_size
result=force or size==0
return result
@staticmethod
def downloadBackupFile(url:str, fileName:str, targetDirectory:str, force:bool=False,profile:bool=True):
'''
Downloads from the given url the zip-file and extracts the file corresponding to the given fileName.
Args:
url: url linking to a downloadable gzip file
fileName: Name of the file that should be extracted from gzip file
targetDirectory(str): download the file to this directory
force (bool): True if the download should be forced
profile(bool): if True show profiling information
Returns:
Name of the extracted file with path to the backup directory
'''
extractTo = f"{targetDirectory}/{fileName}"
# we might want to check whether a new version is available
if Download.needsDownload(extractTo,force=force):
if not os.path.isdir(targetDirectory):
os.makedirs(targetDirectory)
zipped = f"{extractTo}.gz"
msg=f"Downloading {zipped} from {url} ... this might take a few seconds ..."
profiler=Profiler(msg=msg,profile=profile)
urllib.request.urlretrieve(url, zipped)
profiler.time(extraMsg=f" unzipping {extractTo} from {zipped}")
with gzip.open(zipped, 'rb') as gzipped:
with open(extractTo, 'wb') as unzipped:
shutil.copyfileobj(gzipped, unzipped)
if not os.path.isfile(extractTo):
raise (f"could not extract {fileName} from {zipped}")
return extractTo
class Profiler:
'''
simple profiler
'''
def __init__(self,msg:str=None,profile=True):
'''
construct me with the given msg and profile active flag
Args:
msg(str): the message to show if profiling is active
profile(bool): True if messages should be shown
'''
if msg is not None:
self.msg=msg
else:
self.msg=""
self.profile=profile
self.starttime=time.time()
if profile:
print(f"Starting {msg} ...")
def time(self,extraMsg=""):
'''
time the action and print if profile is active
'''
elapsed=time.time()-self.starttime
if self.profile:
print(f"{self.msg}{extraMsg} took {elapsed:5.1f} s")
return elapsed | PypiClean |
/Mikado-2.3.4.tar.gz/Mikado-2.3.4/util/class_run.py |
import subprocess,sys,argparse,os
import shutil
import logging
def main():
logging.basicConfig(format=" %(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO)
parser=argparse.ArgumentParser("Quick utility to rewrite the wrapper for CLASS.")
parser.add_argument("--clean", default=False, action="store_true",
help="Flag. If set, remove tepmorary files.")
parser.add_argument("--force", default=False, action="store_true",
help="Flag. If set, it forces recalculation of all intermediate files.")
parser.add_argument("-c","--class_options", type=str, default='',
help="Additional options to be passed to CLASS. Default: no additional options.")
parser.add_argument("-p", "--processors", type=int, default=1,
help="Number of processors to use with class.")
parser.add_argument("--class_help", action="store_true",
default=False,
help="If called, the wrapper will ask class to display its help and exit.")
parser.add_argument("-v", "--verbose", action="store_true", default=False)
parser.add_argument("bam", type=argparse.FileType('rb'), default=None, nargs="?", help="Input BAM file.")
parser.add_argument("out", nargs="?", type=argparse.FileType('wt'), default=sys.stdout,
help="Optional output file.")
args=parser.parse_args()
if args.class_help:
print("Calling CLASS help..", file=sys.stderr)
subprocess.call("class", shell=True)
sys.exit(0)
if args.bam is None:
parser.error("The input BAM is required as an argument.")
args.class_options+=" -p {0} ".format(args.processors) #Add the processors as argument
logging.info("CLI: {0}".format(
" ".join(sys.argv)))
args.bam.close() #Quick and dirty check that the file exists.
args.bam=os.path.abspath(args.bam.name) #Absolute position
prefix=os.path.splitext(args.bam)[0] #Prefix without the .bam extension (comprehensive of dirs)
# if shutil.which("samtools") is None:
# logging.debug("Loading the SAMTOOLS utility")
# subprocess.call('source samtools-1.1', shell=True) #Load samtools if it is not present already
# else: pass
# # if shutil.which("junc") is None or shutil.which("class") is None:
# logging.debug("Loading CLASS")
# subprocess.call("source class-2.12", shell=True)
# # else: pass
depth_file="{0}.depth".format(prefix)
if not os.path.exists(depth_file) or args.force:
if os.path.exists(depth_file):
logging.warning("Deleting old depth file, because of --force option")
with open(depth_file, 'wt') as depth_buffer:
logging.info("Calculating depth with samtools")
subprocess.call('samtools depth {0}'.format(args.bam), stdout=depth_buffer, shell=True)
else:
logging.warning("Depth file already present. Skipping this phase.")
splice_file="{0}.splice".format(prefix)
if not os.path.exists(splice_file) or args.force:
if os.path.exists(splice_file):
logging.warning("Deleting old splice file, because of --force option")
with open(splice_file, 'wt') as splice_buffer:
logging.info("Recovering junctions with the internal utility junc")
subprocess.call("junc {0}".format(args.bam), stdout=splice_buffer, shell=True)
else:
logging.warning("Splice file already present. Skipping this phase.")
logging.info("Launching the main process")
class_cli="class {0} {1}".format(prefix, args.class_options)
if args.verbose:
class_cli+=" --verbose "
logging.info("CLASS CLI:\n\t{0}".format(class_cli))
class_sub=subprocess.Popen(class_cli, shell=True,
stdout=args.out, stderr=subprocess.PIPE)
for line in class_sub.stderr:
line=line.decode().rstrip()
logging.info("CLASS message:\n{0}".format(line))
if args.clean:
logging.info("Removing temporary files..")
os.remove(depth_file)
logging.debug("Removed the DEPTH file")
os.remove(splice_file)
logging.debug("Removed the SPLICE file")
logging.info("CLASS finished!")
return
if __name__=='__main__': main() | PypiClean |
/CCC-2.0.1.tar.gz/CCC-2.0.1/ccc/packages/migrations/0004_setup_default_packages.py |
from django.db import migrations
def setup_default_packages(apps, schema_editor):
"""Setup the current default/productive packages on CCC"""
packages = [
{
"model": "packages.packagetype",
"pk": 1,
"fields": {
"title": "Basic",
"sku": "basic",
"type": "2",
"local": True,
"is_active": True,
"cost": "99.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 100,
"talktime": 100,
"mms": 100,
"email": 250,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 0,
"digital_att": 500,
"phones": 1,
"scanner": True,
"scanner_cost": 24,
"creator": None,
"date_created": "2014-03-16T23:43:27Z",
"last_updated": "2018-03-15T18:48:38.842Z",
"ordering": 1
}
},
{
"model": "packages.packagetype",
"pk": 2,
"fields": {
"title": "Pro",
"sku": "pro",
"type": "2",
"local": True,
"is_active": True,
"cost": "199.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 250,
"talktime": 250,
"mms": 250,
"email": 600,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 0,
"digital_att": 1200,
"phones": 2,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2014-03-18T03:54:14Z",
"last_updated": "2016-09-13T18:24:43.112Z",
"ordering": 2
}
},
{
"model": "packages.packagetype",
"pk": 3,
"fields": {
"title": "Executive",
"sku": "executive",
"type": "2",
"local": True,
"is_active": True,
"cost": "349.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 450,
"talktime": 450,
"mms": 450,
"email": 1500,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 0,
"digital_att": 2500,
"phones": 4,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2014-03-18T03:55:40Z",
"last_updated": "2016-09-13T18:26:33.105Z",
"ordering": 3
}
},
{
"model": "packages.packagetype",
"pk": 4,
"fields": {
"title": "Test Drive",
"sku": "test-drive-2",
"type": "2",
"local": True,
"is_active": True,
"cost": "1.00",
"trial_days": 7,
"package_type_after_trial": 1,
"sms": 5,
"talktime": 5,
"mms": 5,
"email": 5,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 0,
"digital_att": 0,
"phones": 1,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2014-03-24T20:27:32Z",
"last_updated": "2016-08-09T08:25:59.068Z",
"ordering": 0
}
},
{
"model": "packages.packagetype",
"pk": 5,
"fields": {
"title": "Basic (archived)",
"sku": "basic-3",
"type": "1",
"local": True,
"is_active": False,
"cost": "99.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 200,
"talktime": 200,
"mms": 200,
"email": 500,
"is_twilio_number_included": False,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": False,
"teams": 0,
"digital_att": 0,
"phones": 0,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2014-07-17T13:43:08Z",
"last_updated": "2016-08-08T17:02:36.396Z",
"ordering": 6
}
},
{
"model": "packages.packagetype",
"pk": 6,
"fields": {
"title": "Test Drive (Archived)",
"sku": "test-drive-1",
"type": "2",
"local": True,
"is_active": False,
"cost": "29.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 20,
"talktime": 20,
"mms": 20,
"email": 100,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 29,
"digital_att": 500,
"phones": 1,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2015-07-24T00:51:10Z",
"last_updated": "2016-08-08T17:02:35.705Z",
"ordering": 5
}
},
{
"model": "packages.packagetype",
"pk": 7,
"fields": {
"title": "Basic (Archived)",
"sku": "basic-2",
"type": "2",
"local": True,
"is_active": False,
"cost": "99.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 200,
"talktime": 200,
"mms": 200,
"email": 500,
"is_twilio_number_included": True,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": True,
"teams": 10,
"digital_att": 500,
"phones": 5,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2015-07-24T03:51:10Z",
"last_updated": "2016-08-08T17:02:35.071Z",
"ordering": 4
}
},
{
"model": "packages.packagetype",
"pk": 9,
"fields": {
"title": "SMS ",
"sku": "sms",
"type": "1",
"local": True,
"is_active": True,
"cost": "50.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 250,
"talktime": 0,
"mms": 0,
"email": 0,
"is_twilio_number_included": False,
"note": "",
"social_media": False,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": False,
"teams": 0,
"digital_att": 0,
"phones": 0,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2017-01-12T06:54:26.402Z",
"last_updated": "2017-01-12T15:25:12.635Z",
"ordering": 7
}
},
{
"model": "packages.packagetype",
"pk": 10,
"fields": {
"title": "MMS",
"sku": "mms",
"type": "1",
"local": True,
"is_active": True,
"cost": "75.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 0,
"talktime": 0,
"mms": 250,
"email": 0,
"is_twilio_number_included": False,
"note": "",
"social_media": False,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": False,
"teams": 0,
"digital_att": 0,
"phones": 0,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2017-01-12T15:32:15.698Z",
"last_updated": "2017-01-12T15:32:15.698Z",
"ordering": 8
}
},
{
"model": "packages.packagetype",
"pk": 11,
"fields": {
"title": "Voice",
"sku": "voice",
"type": "1",
"local": True,
"is_active": True,
"cost": "25.00",
"trial_days": 0,
"package_type_after_trial": None,
"sms": 0,
"talktime": 250,
"mms": 0,
"email": 0,
"is_twilio_number_included": False,
"note": "",
"social_media": True,
"campaigns": True,
"team": True,
"rover_min": True,
"recurring": False,
"teams": 0,
"digital_att": 0,
"phones": 0,
"scanner": True,
"scanner_cost": 0,
"creator": None,
"date_created": "2017-01-12T15:33:38.059Z",
"last_updated": "2017-01-12T15:33:38.059Z",
"ordering": 9
}
}
]
PackageTypeModel = apps.get_model('packages', 'PackageType')
for record in packages:
package_after_trial = None
if record['fields']['package_type_after_trial'] is not None:
package_after_trial = PackageTypeModel.objects.get(pk=record['fields']['package_type_after_trial'])
entry = {
"pk": record['pk'],
"title": record['fields']['title'],
"sku": record['fields']['sku'],
"type": record['fields']['type'],
"local": record['fields']['local'],
"is_active": record['fields']['is_active'],
"cost": record['fields']['cost'],
"trial_days": record['fields']['trial_days'],
"package_type_after_trial": package_after_trial,
"sms": record['fields']['sms'],
"talktime": record['fields']['talktime'],
"mms": record['fields']['mms'],
"email": record['fields']['email'],
"is_twilio_number_included": record['fields']['is_twilio_number_included'],
"note": record['fields']['note'],
"social_media": record['fields']['social_media'],
"campaigns": record['fields']['campaigns'],
"team": record['fields']['team'],
"rover_min": record['fields']['rover_min'],
"recurring": record['fields']['recurring'],
"teams":record['fields']['teams'],
"digital_att": record['fields']['digital_att'],
"phones": record['fields']['phones'],
"scanner": record['fields']['scanner'],
"scanner_cost": record['fields']['scanner_cost'],
"creator": record['fields']['creator'],
"ordering": record['fields']['ordering'],
}
pack = PackageTypeModel(**entry)
pack.save()
class Migration(migrations.Migration):
dependencies = [
('packages', '0003_auto_20180828_1440'),
]
operations = [
migrations.RunPython(setup_default_packages)
] | PypiClean |
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/Orange/widgets/visualize/owradviz.py | import warnings
from itertools import islice, permutations, chain
from math import factorial
import numpy as np
import pyqtgraph as pg
from AnyQt.QtCore import Qt, QRectF, QPoint, pyqtSignal as Signal
from AnyQt.QtGui import QStandardItem, QColor
import pyqtgraph as pg
from pyqtgraph.graphicsItems.ScatterPlotItem import ScatterPlotItem
from scipy.spatial import distance
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from Orange.data import Table, Domain
from Orange.preprocess.score import ReliefF, RReliefF
from Orange.projection import RadViz
from Orange.widgets import widget, gui
from Orange.widgets.gui import OWComponent
from Orange.widgets.settings import Setting, ContextSetting, SettingProvider
from Orange.widgets.utils.plot.owplotgui import VariableSelectionModel, \
variables_selection
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.visualize.utils import VizRankDialog
from Orange.widgets.visualize.utils.component import OWGraphWithAnchors
from Orange.widgets.visualize.utils.plotutils import TextItem
from Orange.widgets.visualize.utils.widget import OWAnchorProjectionWidget
from Orange.i18n_config import *
MAX_DISPLAYED_VARS = 20
MAX_LABEL_LEN = 16
def __(key):
return i18n.t("widget.visualize.visualize.owradviz." + key)
class RadvizVizRank(VizRankDialog, OWComponent):
captionTitle = __("caption_title")
n_attrs = Setting(3)
minK = 10
attrsSelected = Signal([])
_AttrRole = next(gui.OrangeUserRole)
percent_data_used = Setting(100)
def __init__(self, master):
"""Add the spin box for maximal number of attributes"""
VizRankDialog.__init__(self, master)
OWComponent.__init__(self, master)
self.master = master
self.n_neighbors = 10
box = gui.hBox(self)
max_n_attrs = min(MAX_DISPLAYED_VARS, len(master.model_selected))
self.n_attrs_spin = gui.spin(
box, self, "n_attrs", 3, max_n_attrs, label=__("row_maximum_variable"),
controlWidth=50, alignment=Qt.AlignRight, callback=self._n_attrs_changed)
gui.rubber(box)
self.last_run_n_attrs = None
self.attr_color = master.attr_color
self.attr_ordering = None
self.data = None
self.valid_data = None
self.rank_table.clicked.connect(self.on_row_clicked)
self.rank_table.verticalHeader().sectionClicked.connect(
self.on_header_clicked)
def initialize(self):
super().initialize()
self.attr_color = self.master.attr_color
def _compute_attr_order(self):
"""
used by VizRank to evaluate attributes
"""
master = self.master
attrs = [v for v in master.primitive_variables
if v is not self.attr_color]
data = self.master.data.transform(Domain(attributes=attrs, class_vars=self.attr_color))
self.data = data
self.valid_data = np.hstack((~np.isnan(data.X), ~np.isnan(data.Y.reshape(len(data.Y), 1))))
relief = ReliefF if self.attr_color.is_discrete else RReliefF
weights = relief(n_iterations=100, k_nearest=self.minK)(data)
attrs = sorted(zip(weights, attrs), key=lambda x: (-x[0], x[1].name))
self.attr_ordering = attr_ordering = [a for _, a in attrs]
return attr_ordering
def _evaluate_projection(self, x, y):
"""
kNNEvaluate - evaluate class separation in the given projection using a k-NN method
Parameters
----------
x - variables to evaluate
y - class
Returns
-------
scores
"""
if self.percent_data_used != 100:
rand = np.random.choice(len(x), int(len(x) * self.percent_data_used / 100),
replace=False)
x = x[rand]
y = y[rand]
neigh = KNeighborsClassifier(n_neighbors=3) if self.attr_color.is_discrete else \
KNeighborsRegressor(n_neighbors=3)
assert ~(np.isnan(x).any(axis=None) | np.isnan(x).any(axis=None))
neigh.fit(x, y)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
scores = cross_val_score(neigh, x, y, cv=3)
return scores.mean()
def _n_attrs_changed(self):
"""
Change the button label when the number of attributes changes. The method does not reset
anything so the user can still see the results until actually restarting the search.
"""
if self.n_attrs != self.last_run_n_attrs or self.saved_state is None:
self.button.setText(__("btn.start"))
else:
self.button.setText(__("btn.continue"))
self.button.setEnabled(self.check_preconditions())
def progressBarSet(self, value):
self.setWindowTitle(self.captionTitle + __("caption_title_evaluated").format(value))
def check_preconditions(self):
master = self.master
if not super().check_preconditions():
return False
elif not master.btn_vizrank.isEnabled():
return False
self.n_attrs_spin.setMaximum(min(MAX_DISPLAYED_VARS,
len(master.model_selected)))
return True
def on_selection_changed(self, selected, _):
self.on_row_clicked(selected.indexes()[0])
def on_row_clicked(self, index):
self.selectionChanged.emit(index.data(self._AttrRole))
def on_header_clicked(self, section):
self.on_row_clicked(self.rank_model.index(section, 0))
def iterate_states(self, state):
if state is None: # on the first call, compute order
self.attrs = self._compute_attr_order()
state = list(range(3))
else:
state = list(state)
def combinations(n, s):
while True:
yield s
for up, _ in enumerate(s):
s[up] += 1
if up + 1 == len(s) or s[up] < s[up + 1]:
break
s[up] = up
if s[-1] == n:
if len(s) < self.n_attrs:
s = list(range(len(s) + 1))
else:
break
for c in combinations(len(self.attrs), state):
for p in islice(permutations(c[1:]), factorial(len(c) - 1) // 2):
yield (c[0],) + p
def compute_score(self, state):
attrs = [self.attrs[i] for i in state]
domain = Domain(attributes=attrs, class_vars=[self.attr_color])
data = self.data.transform(domain)
projector = RadViz()
projection = projector(data)
radviz_xy = projection(data).X
y = projector.preprocess(data).Y
return -self._evaluate_projection(radviz_xy, y)
def bar_length(self, score):
return -score
def row_for_state(self, score, state):
attrs = [self.attrs[s] for s in state]
item = QStandardItem("[{:0.6f}] ".format(-score) + ", ".join(a.name for a in attrs))
item.setData(attrs, self._AttrRole)
return [item]
def _update_progress(self):
self.progressBarSet(int(self.saved_progress))
def before_running(self):
"""
Disable the spin for number of attributes before running and
enable afterwards. Also, if the number of attributes is different than
in the last run, reset the saved state (if it was paused).
"""
if self.n_attrs != self.last_run_n_attrs:
self.saved_state = None
self.saved_progress = 0
if self.saved_state is None:
self.scores = []
self.rank_model.clear()
self.last_run_n_attrs = self.n_attrs
self.n_attrs_spin.setDisabled(True)
def stopped(self):
self.n_attrs_spin.setDisabled(False)
class OWRadvizGraph(OWGraphWithAnchors):
def __init__(self, scatter_widget, parent):
super().__init__(scatter_widget, parent)
self.anchors_scatter_item = None
self.padding = 0.025
def clear(self):
super().clear()
self.anchors_scatter_item = None
def set_view_box_range(self):
self.view_box.setRange(QRectF(-1, -1, 2, 2), padding=self.padding)
def closest_draggable_item(self, pos):
points, _ = self.master.get_anchors()
if points is None:
return None
np_pos = np.array([[pos.x(), pos.y()]])
distances = distance.cdist(np_pos, points[:, :2])[0]
if len(distances) and np.min(distances) < self.DISTANCE_DIFF:
return np.argmin(distances)
return None
def update_anchors(self):
points, labels = self.master.get_anchors()
if points is None:
return
if self.anchor_items is not None:
for anchor in self.anchor_items:
self.plot_widget.removeItem(anchor)
self.anchor_items = []
label_len = 1
for point, label in zip(points, labels):
anchor = TextItem()
anchor.textItem.setToolTip(f"<b>{label}</b>")
if len(label) > MAX_LABEL_LEN:
i = label.rfind(" ", 0, MAX_LABEL_LEN)
if i != -1:
first_row = label[:i] + "\n"
second_row = label[i + 1:]
if len(second_row) > MAX_LABEL_LEN:
j = second_row.rfind(" ", 0, MAX_LABEL_LEN)
if j != -1:
second_row = second_row[:j + 1] + "..."
else:
second_row = second_row[:MAX_LABEL_LEN - 3] + "..."
label = first_row + second_row
else:
label = label[:MAX_LABEL_LEN - 3] + "..."
anchor.setText(label)
anchor.setFont(self.parameter_setter.anchor_font)
label_len = min(MAX_LABEL_LEN, len(label))
anchor.setColor(QColor(0, 0, 0))
x, y = point
angle = np.rad2deg(np.arctan2(y, x))
anchor.setPos(x * 1.025, y * 1.025)
if abs(angle) < 90:
anchor.setAngle(angle)
anchor.setAnchor((0, 0.5))
else:
anchor.setAngle(angle + 180)
anchor.setAnchor((1, 0.5))
anchor.textItem.setTextWidth(anchor.textItem.boundingRect().width())
option = anchor.textItem.document().defaultTextOption()
option.setAlignment(Qt.AlignRight)
anchor.textItem.document().setDefaultTextOption(option)
self.plot_widget.addItem(anchor)
self.anchor_items.append(anchor)
self.padding = label_len * 0.0175
self._update_anchors_scatter_item(points)
def _update_anchors_scatter_item(self, points):
if self.anchors_scatter_item is not None:
self.plot_widget.removeItem(self.anchors_scatter_item)
self.anchors_scatter_item = None
self.anchors_scatter_item = ScatterPlotItem(x=points[:, 0],
y=points[:, 1])
self.plot_widget.addItem(self.anchors_scatter_item)
def _add_indicator_item(self, anchor_idx):
if anchor_idx is None:
return
x, y = self.anchor_items[anchor_idx].get_xy()
col = self.view_box.mouse_state
dx = (self.view_box.childGroup.mapToDevice(QPoint(1, 0)) -
self.view_box.childGroup.mapToDevice(QPoint(-1, 0))).x()
self.indicator_item = MoveIndicator(np.arctan2(y, x), col, 6000 / dx)
self.plot_widget.addItem(self.indicator_item)
class OWRadviz(OWAnchorProjectionWidget):
name = __("name")
description = __("desc")
icon = "icons/Radviz.svg"
priority = 241
keywords = ["viz"]
settings_version = 3
selected_vars = ContextSetting([])
vizrank = SettingProvider(RadvizVizRank)
GRAPH_CLASS = OWRadvizGraph
graph = SettingProvider(OWRadvizGraph)
class Warning(OWAnchorProjectionWidget.Warning):
invalid_embedding = widget.Msg(__("msg.no_projection"))
removed_vars = widget.Msg(__("msg.show_error"))
max_vars_selected = widget.Msg(__("msg.number_error"))
def _add_controls(self):
box = gui.vBox(self.controlArea, box=i18n.t("common.general.features"))
self.model_selected = VariableSelectionModel(self.selected_vars,
max_vars=20)
variables_selection(box, self, self.model_selected)
self.model_selected.selection_changed.connect(
self.__model_selected_changed)
self.vizrank, self.btn_vizrank = RadvizVizRank.add_vizrank(
None, self, __("btn.suggest_feature"), self.vizrank_set_attrs)
box.layout().addWidget(self.btn_vizrank)
super()._add_controls()
def _add_buttons(self):
self.gui.box_zoom_select(self.buttonsArea)
gui.auto_send(self.buttonsArea, self, "auto_commit")
@property
def primitive_variables(self):
if self.data is None or self.data.domain is None:
return []
dom = self.data.domain
return [v for v in chain(dom.variables, dom.metas)
if v.is_continuous or v.is_discrete and len(v.values) == 2]
@property
def effective_variables(self):
return self.selected_vars
@property
def effective_data(self):
return self.data.transform(Domain(self.effective_variables))
def vizrank_set_attrs(self, *attrs):
if not attrs:
return
self.selected_vars[:] = attrs
# Ugly, but the alternative is to have yet another signal to which
# the view will have to connect
self.model_selected.selection_changed.emit()
def __model_selected_changed(self):
if self.model_selected.is_full():
self.Warning.max_vars_selected()
else:
self.Warning.max_vars_selected.clear()
self.init_projection()
self.setup_plot()
self.commit.deferred()
def colors_changed(self):
super().colors_changed()
self._init_vizrank()
def set_data(self, data):
super().set_data(data)
self._init_vizrank()
self.init_projection()
def _init_vizrank(self):
is_enabled = self.data is not None and \
len(self.primitive_variables) > 3 and \
self.attr_color is not None and \
not np.isnan(self.data.get_column_view(
self.attr_color)[0].astype(float)).all() and \
np.sum(np.all(np.isfinite(self.data.X), axis=1)) > 1 and \
np.all(np.nan_to_num(np.nanstd(self.data.X, 0)) != 0)
self.btn_vizrank.setEnabled(is_enabled)
if is_enabled:
self.vizrank.initialize()
def check_data(self):
super().check_data()
if self.data is not None:
domain = self.data.domain
vars_ = chain(domain.variables, domain.metas)
n_vars = sum(v.is_primitive() for v in vars_)
if len(self.primitive_variables) < n_vars:
self.Warning.removed_vars()
def init_attr_values(self):
super().init_attr_values()
self.selected_vars[:] = self.primitive_variables[:5]
self.model_selected[:] = self.primitive_variables
def _manual_move(self, anchor_idx, x, y):
angle = np.arctan2(y, x)
super()._manual_move(anchor_idx, np.cos(angle), np.sin(angle))
def _send_components_x(self):
components_ = super()._send_components_x()
angle = np.arctan2(*components_[::-1])
return np.row_stack((components_, angle))
def _send_components_metas(self):
return np.vstack((super()._send_components_metas(), ["angle"]))
def clear(self):
super().clear()
self.projector = RadViz()
@classmethod
def migrate_context(cls, context, version):
values = context.values
if version < 2:
values["attr_color"] = values["graph"]["attr_color"]
values["attr_size"] = values["graph"]["attr_size"]
values["attr_shape"] = values["graph"]["attr_shape"]
values["attr_label"] = values["graph"]["attr_label"]
if version < 3 and "selected_vars" in values:
values["selected_vars"] = (values["selected_vars"], -3)
class MoveIndicator(pg.GraphicsObject):
def __init__(self, angle, col, dangle=5, parent=None):
super().__init__(parent)
color = QColor(0, 0, 0) if col else QColor(128, 128, 128)
angle_d = np.rad2deg(angle)
angle_2 = 90 - angle_d - dangle
angle_1 = 270 - angle_d + dangle
dangle = np.deg2rad(dangle)
arrow1 = pg.ArrowItem(
parent=self, angle=angle_1, brush=color, pen=pg.mkPen(color)
)
arrow1.setPos(np.cos(angle - dangle), np.sin(angle - dangle))
arrow2 = pg.ArrowItem(
parent=self, angle=angle_2, brush=color, pen=pg.mkPen(color)
)
arrow2.setPos(np.cos(angle + dangle), np.sin(angle + dangle))
arc_x = np.fromfunction(
lambda i: np.cos((angle - dangle) + (2 * dangle) * i / 120.),
(121,), dtype=int
)
arc_y = np.fromfunction(
lambda i: np.sin((angle - dangle) + (2 * dangle) * i / 120.),
(121,), dtype=int
)
pg.PlotCurveItem(
parent=self, x=arc_x, y=arc_y, pen=pg.mkPen(color), antialias=False
)
def paint(self, painter, option, widget):
pass
def boundingRect(self):
return QRectF()
if __name__ == "__main__": # pragma: no cover
data = Table("brown-selected")
WidgetPreview(OWRadviz).run(set_data=data, set_subset_data=data[::10]) | PypiClean |
/Klampt-0.9.0-cp36-cp36m-win_amd64.whl/klampt/control/blocks/utils.py | from .core import Block
from klampt.math import vectorops
import inspect
class BlockSignal(RuntimeError):
"""An exception raised by a block if it wishes to raise a signal to a
parent.
Attributes:
signal (str): the identifier of the signal
"""
def __init__(self,signal,text):
self.signal = signal
RuntimeError.__init__(self,text)
class SignalBlock(Block):
"""A block that raises a signal if its input is nonzero"""
def __init__(self,type,text):
self.type = type
self.text = text
Block.__init__(self,['signal'],0)
def advance(self,signal):
if signal:
raise BlockSignal(self.type,self.text)
return
class LambdaBlock(Block):
"""A fixed-function controller that simply evaluates a function. The
function arguments and return values are mapped from/to the input/output
dictionaries.
"""
def __init__(self,f,inputs='auto',outputs='auto'):
self.f = f
if inputs == 'auto':
inputs = inspect.getargspec(f).args
if outputs == 'auto':
outputs = 1
Block.__init__(self,inputs,outputs)
def advance(self,*args):
return self.f(*args)
class LinearBlock(Block):
"""Implements a linear function
output = A*input + b
The user must fill out the self.gains member using the addGain()
method.
To use this, Numpy must be available on your system.
"""
def __init__(self,A,b=None):
Block.__init__(self,1,1)
import numpy as np
self.A = A
self.b = b
def advance(self,x):
import numpy as np
if self.b is not None:
return np.dot(self.A,x)+self.b
return np.dot(self.A,x)
class Concatenate(Block):
"""Concatenates vectors from multiple items into a single vector.
Useful for when you have one controller for each arm, one for a lower body,
etc.
Arguments:
n (int): the number of items
"""
def __init__(self,inputs):
Block.__init__(self,inputs,1)
def advance(self,*args):
import numpy as np
return np.hstack(args)
class Clamp(Block):
"""Restricts a value to some range"""
def __init__(self):
Block.__init__(self,["x","minimum","maximum"],1)
def advance(self,x,minimum,maximum):
if hasattr(x,'__iter__'):
if hasattr(maximum,'__iter__'):
assert len(x) == len(maximum)
return vectorops.minimum(vectorops.maximum(x,minimum),maximum)
else:
return min(max(x,minimum),maximum)
class LimitExceeded(Block):
"""Returns 1 if a value exceeds some range"""
def __init__(self):
Block.__init__(self,['x','minimum','maximum'],0)
def advance(self,x,minimum,maximum):
assert len(x) == len(maximum)
for (v,a,b) in zip(x,minimum,maximum):
if v < a or v > b:
return 1
return 0
class Distance(Block):
"""Returns the L-p distance between two values"""
def __init__(self,metric=float('inf')):
if metric not in [1,2,float('inf')]:
raise ValueError("Only supports L1, L2, or Linf distances")
if metric == 1:
self.metric = vectorops.norm_L1
elif metric == 2:
self.metric = vectorops.norm
else:
self.metric = vectorops.norm_Linf
Block.__init__(self,2,1)
def advance(self,x1,x2):
assert len(x1) == len(x2)
return self.metric(vectorops.sub(x1,x2))
class WorldCollision(Block):
"""Returns True if a collision occurrs in the world (or a collider)"""
def __init__(self, world_or_collider):
from klampt.model.collide import WorldCollider
if not isinstance(world_or_collider,WorldCollider):
collider = WorldCollider(world_or_collider)
else:
collider = world_or_collider
self.collider = collider
Block.__init__(self,'q',0)
def advance(self,q) -> bool:
robot = self.collider.world.robot(0)
qrobot = robot.configFromDrivers(q)
qrobot.setConfig(q)
for a,b in self.collider.collisions():
return True
return False
class If(Block):
def __init__(self):
Block.__init__(self,['cond','truebranch','falsebranch'],1)
def advance(self,cond,truebranch,falsebranch):
if cond: return truebranch
else: return falsebranch
class Mux(Block):
"""Function (index, case0, case1, ..., casek) returning case[index]
"""
def __init__(self,k):
Block.__init__(self,k+1,1)
def advance(self,*args):
index = int(args[0])
if index < 0 or index >= len(args)-1:
raise RuntimeError("Mux index is invalid")
return args[index+1] | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/policy/sac.py | from typing import List, Dict, Any, Tuple, Union
from collections import namedtuple
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Independent
from ding.torch_utils import Adam, to_device
from ding.rl_utils import v_1step_td_data, v_1step_td_error, get_train_sample, q_v_1step_td_error, q_v_1step_td_data
from ding.model import model_wrap
from ding.utils import POLICY_REGISTRY
from ding.utils.data import default_collate, default_decollate
from .base_policy import Policy
from .common_utils import default_preprocess_learn
@POLICY_REGISTRY.register('sac_discrete')
class SACDiscretePolicy(Policy):
r"""
Overview:
Policy class of discrete SAC algorithm. Paper link: https://arxiv.org/pdf/1910.07207.pdf.
Config:
== ==================== ======== ============= ================================= =======================
ID Symbol Type Default Value Description Other
== ==================== ======== ============= ================================= =======================
1 ``type`` str sac_discrete | RL policy register name, refer | this arg is optional,
| to registry ``POLICY_REGISTRY`` | a placeholder
2 ``cuda`` bool True | Whether to use cuda for network |
3 ``on_policy`` bool False | SACDiscrete is an off-policy |
| algorithm. |
4 ``priority`` bool False | Whether to use priority |
| sampling in buffer. |
5 | ``priority_IS_`` bool False | Whether use Importance Sampling |
| ``weight`` | weight to correct biased update |
6 | ``random_`` int 10000 | Number of randomly collected | Default to 10000 for
| ``collect_size`` | training samples in replay | SAC, 25000 for DDPG/
| | buffer when training starts. | TD3.
7 | ``learn.learning`` float 3e-4 | Learning rate for soft q | Defalut to 1e-3
| ``_rate_q`` | network. |
8 | ``learn.learning`` float 3e-4 | Learning rate for policy | Defalut to 1e-3
| ``_rate_policy`` | network. |
9 | ``learn.alpha`` float 0.2 | Entropy regularization | alpha is initiali-
| | coefficient. | zation for auto
| | | `\alpha`, when
| | | auto_alpha is True
10 | ``learn.`` bool False | Determine whether to use | Temperature parameter
| ``auto_alpha`` | auto temperature parameter | determines the
| | `\alpha`. | relative importance
| | | of the entropy term
| | | against the reward.
11 | ``learn.-`` bool False | Determine whether to ignore | Use ignore_done only
| ``ignore_done`` | done flag. | in env like Pendulum
12 | ``learn.-`` float 0.005 | Used for soft update of the | aka. Interpolation
| ``target_theta`` | target network. | factor in polyak aver
| | | aging for target
| | | networks.
== ==================== ======== ============= ================================= =======================
"""
config = dict(
# (str) RL policy register name (refer to function "POLICY_REGISTRY").
type='sac_discrete',
# (bool) Whether to use cuda for network and loss computation.
cuda=False,
# (bool) Whether to belong to on-policy or off-policy algorithm, SACDiscrete is an off-policy algorithm.
on_policy=False,
# (bool) Whether to use priority sampling in buffer. Default to False in SACDiscrete.
priority=False,
# (bool) Whether use Importance Sampling weight to correct biased update. If True, priority must be True.
priority_IS_weight=False,
# (int) Number of training samples (randomly collected) in replay buffer when training starts.
random_collect_size=10000,
# (bool) Whether to need policy-specific data in process transition.
transition_with_policy_data=True,
# (bool) Whether to enable multi-agent training setting.
multi_agent=False,
model=dict(
# (bool) Whether to use double-soft-q-net for target q computation.
# For more details, please refer to TD3 about Clipped Double-Q Learning trick.
twin_critic=True,
),
learn=dict(
# (int) How many updates (iterations) to train after collector's one collection.
# Bigger "update_per_collect" means bigger off-policy.
update_per_collect=1,
# (int) Minibatch size for one gradient descent.
batch_size=256,
# (float) Learning rate for soft q network.
learning_rate_q=3e-4,
# (float) Learning rate for policy network.
learning_rate_policy=3e-4,
# (float) Learning rate for auto temperature parameter `\alpha`.
learning_rate_alpha=3e-4,
# (float) Used for soft update of the target network,
# aka. Interpolation factor in EMA update for target network.
target_theta=0.005,
# (float) Discount factor for the discounted sum of rewards, aka. gamma.
discount_factor=0.99,
# (float) Entropy regularization coefficient in SAC.
# Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.
# If auto_alpha is set to `True`, alpha is initialization for auto `\alpha`.
alpha=0.2,
# (bool) Whether to use auto temperature parameter `\alpha` .
# Temperature parameter `\alpha` determines the relative importance of the entropy term against the reward.
# Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.
# Note that: Using auto alpha needs to set the above `learning_rate_alpha`.
auto_alpha=True,
# (bool) Whether to use auto `\alpha` in log space.
log_space=True,
# (float) Target policy entropy value for auto temperature (alpha) adjustment.
target_entropy=None,
# (bool) Whether ignore done(usually for max step termination env. e.g. pendulum)
# Note: Gym wraps the MuJoCo envs by default with TimeLimit environment wrappers.
# These limit HalfCheetah, and several other MuJoCo envs, to max length of 1000.
# However, interaction with HalfCheetah always gets done with done is False,
# Since we inplace done==True with done==False to keep
# TD-error accurate computation(``gamma * (1 - done) * next_v + reward``),
# when the episode step is greater than max episode step.
ignore_done=False,
# (float) Weight uniform initialization max range in the last output layer
init_w=3e-3,
),
collect=dict(
# (int) How many training samples collected in one collection procedure.
n_sample=1,
# (int) Split episodes or trajectories into pieces with length `unroll_len`.
unroll_len=1,
# (bool) Whether to collect logit in `process_transition`.
# In some algorithm like guided cost learning, we need to use logit to train the reward model.
collector_logit=False,
),
other=dict(
replay_buffer=dict(
# (int) Maximum size of replay buffer. Usually, larger buffer size is good
# for SAC but cost more storage.
replay_buffer_size=1000000,
),
),
)
def default_model(self) -> Tuple[str, List[str]]:
if self._cfg.multi_agent:
return 'maqac', ['ding.model.template.maqac']
else:
return 'discrete_qac', ['ding.model.template.qac']
def _init_learn(self) -> None:
"""
Overview:
Learn mode init method. Called by ``self.__init__``.
Init q function and policy's optimizers, algorithm config, main and target models.
"""
self._priority = self._cfg.priority
self._priority_IS_weight = self._cfg.priority_IS_weight
self._twin_critic = self._cfg.model.twin_critic
self._optimizer_q = Adam(
self._model.critic.parameters(),
lr=self._cfg.learn.learning_rate_q,
)
self._optimizer_policy = Adam(
self._model.actor.parameters(),
lr=self._cfg.learn.learning_rate_policy,
)
# Algorithm-Specific Config
self._gamma = self._cfg.learn.discount_factor
if self._cfg.learn.auto_alpha:
if self._cfg.learn.target_entropy is None:
assert 'action_shape' in self._cfg.model, "SACDiscrete need network model with action_shape variable"
self._target_entropy = -np.prod(self._cfg.model.action_shape)
else:
self._target_entropy = self._cfg.learn.target_entropy
if self._cfg.learn.log_space:
self._log_alpha = torch.log(torch.FloatTensor([self._cfg.learn.alpha]))
self._log_alpha = self._log_alpha.to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._log_alpha], lr=self._cfg.learn.learning_rate_alpha)
assert self._log_alpha.shape == torch.Size([1]) and self._log_alpha.requires_grad
self._alpha = self._log_alpha.detach().exp()
self._auto_alpha = True
self._log_space = True
else:
self._alpha = torch.FloatTensor([self._cfg.learn.alpha]).to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._alpha], lr=self._cfg.learn.learning_rate_alpha)
self._auto_alpha = True
self._log_space = False
else:
self._alpha = torch.tensor(
[self._cfg.learn.alpha], requires_grad=False, device=self._device, dtype=torch.float32
)
self._auto_alpha = False
# Main and target models
self._target_model = copy.deepcopy(self._model)
self._target_model = model_wrap(
self._target_model,
wrapper_name='target',
update_type='momentum',
update_kwargs={'theta': self._cfg.learn.target_theta}
)
self._learn_model = model_wrap(self._model, wrapper_name='base')
self._learn_model.reset()
self._target_model.reset()
def _forward_learn(self, data: dict) -> Dict[str, Any]:
loss_dict = {}
data = default_preprocess_learn(
data,
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=False
)
if self._cuda:
data = to_device(data, self._device)
self._learn_model.train()
self._target_model.train()
obs = data['obs']
next_obs = data['next_obs']
reward = data['reward']
done = data['done']
logit = data['logit']
action = data['action']
# 1. predict q value
q_value = self._learn_model.forward({'obs': obs}, mode='compute_critic')['q_value']
dist = torch.distributions.categorical.Categorical(logits=logit)
dist_entropy = dist.entropy()
entropy = dist_entropy.mean()
# 2. predict target value
# target q value. SARSA: first predict next action, then calculate next q value
with torch.no_grad():
policy_output_next = self._learn_model.forward({'obs': next_obs}, mode='compute_actor')
if self._cfg.multi_agent:
policy_output_next['logit'][policy_output_next['action_mask'] == 0.0] = -1e8
prob = F.softmax(policy_output_next['logit'], dim=-1)
log_prob = torch.log(prob + 1e-8)
target_q_value = self._target_model.forward({'obs': next_obs}, mode='compute_critic')['q_value']
# the value of a policy according to the maximum entropy objective
if self._twin_critic:
# find min one as target q value
target_value = (
prob * (torch.min(target_q_value[0], target_q_value[1]) - self._alpha * log_prob.squeeze(-1))
).sum(dim=-1)
else:
target_value = (prob * (target_q_value - self._alpha * log_prob.squeeze(-1))).sum(dim=-1)
# 3. compute q loss
if self._twin_critic:
q_data0 = q_v_1step_td_data(q_value[0], target_value, action, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample0 = q_v_1step_td_error(q_data0, self._gamma)
q_data1 = q_v_1step_td_data(q_value[1], target_value, action, reward, done, data['weight'])
loss_dict['twin_critic_loss'], td_error_per_sample1 = q_v_1step_td_error(q_data1, self._gamma)
td_error_per_sample = (td_error_per_sample0 + td_error_per_sample1) / 2
else:
q_data = q_v_1step_td_data(q_value, target_value, action, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample = q_v_1step_td_error(q_data, self._gamma)
# 4. update q network
self._optimizer_q.zero_grad()
loss_dict['critic_loss'].backward()
if self._twin_critic:
loss_dict['twin_critic_loss'].backward()
self._optimizer_q.step()
# 5. evaluate to get action distribution
policy_output = self._learn_model.forward({'obs': data['obs']}, mode='compute_actor')
# 6. apply discrete action mask in multi_agent setting
if self._cfg.multi_agent:
policy_output['logit'][policy_output['action_mask'] == 0.0] = -1e8
logit = policy_output['logit']
prob = F.softmax(logit, dim=-1)
log_prob = F.log_softmax(logit, dim=-1)
with torch.no_grad():
new_q_value = self._learn_model.forward({'obs': data['obs']}, mode='compute_critic')['q_value']
if self._twin_critic:
new_q_value = torch.min(new_q_value[0], new_q_value[1])
# 7. compute policy loss
# we need to sum different actions' policy loss and calculate the average value of a batch
policy_loss = (prob * (self._alpha * log_prob - new_q_value)).sum(dim=-1).mean()
loss_dict['policy_loss'] = policy_loss
# 8. update policy network
self._optimizer_policy.zero_grad()
loss_dict['policy_loss'].backward()
self._optimizer_policy.step()
# 9. compute alpha loss
if self._auto_alpha:
if self._log_space:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = (-prob.detach() * (self._log_alpha * log_prob.detach())).sum(dim=-1).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
else:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = (-prob.detach() * (self._alpha * log_prob.detach())).sum(dim=-1).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha.data = torch.where(self._alpha > 0, self._alpha,
torch.zeros_like(self._alpha)).requires_grad_()
loss_dict['total_loss'] = sum(loss_dict.values())
# target update
self._target_model.update(self._learn_model.state_dict())
return {
'total_loss': loss_dict['total_loss'].item(),
'policy_loss': loss_dict['policy_loss'].item(),
'critic_loss': loss_dict['critic_loss'].item(),
'cur_lr_q': self._optimizer_q.defaults['lr'],
'cur_lr_p': self._optimizer_policy.defaults['lr'],
'priority': td_error_per_sample.abs().tolist(),
'td_error': td_error_per_sample.detach().mean().item(),
'alpha': self._alpha.item(),
'q_value_1': target_q_value[0].detach().mean().item(),
'q_value_2': target_q_value[1].detach().mean().item(),
'target_value': target_value.detach().mean().item(),
'entropy': entropy.item(),
}
def _state_dict_learn(self) -> Dict[str, Any]:
ret = {
'model': self._learn_model.state_dict(),
'optimizer_q': self._optimizer_q.state_dict(),
'optimizer_policy': self._optimizer_policy.state_dict(),
}
if self._auto_alpha:
ret.update({'optimizer_alpha': self._alpha_optim.state_dict()})
return ret
def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:
self._learn_model.load_state_dict(state_dict['model'])
self._optimizer_q.load_state_dict(state_dict['optimizer_q'])
self._optimizer_policy.load_state_dict(state_dict['optimizer_policy'])
if self._auto_alpha:
self._alpha_optim.load_state_dict(state_dict['optimizer_alpha'])
def _init_collect(self) -> None:
self._unroll_len = self._cfg.collect.unroll_len
# Empirically, we found that eps_greedy_multinomial_sample works better than multinomial_sample
# and eps_greedy_sample, and we don't divide logit by alpha,
# for the details please refer to ding/model/wrapper/model_wrappers
self._collect_model = model_wrap(self._model, wrapper_name='eps_greedy_multinomial_sample')
self._collect_model.reset()
def _forward_collect(self, data: dict, eps: float) -> dict:
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._collect_model.eval()
with torch.no_grad():
output = self._collect_model.forward({'obs': data}, mode='compute_actor', eps=eps)
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _process_transition(self, obs: Any, model_output: dict, timestep: namedtuple) -> dict:
transition = {
'obs': obs,
'next_obs': timestep.obs,
'action': model_output['action'],
'logit': model_output['logit'],
'reward': timestep.reward,
'done': timestep.done,
}
return transition
def _get_train_sample(self, data: list) -> Union[None, List[Any]]:
return get_train_sample(data, self._unroll_len)
def _init_eval(self) -> None:
self._eval_model = model_wrap(self._model, wrapper_name='argmax_sample')
self._eval_model.reset()
def _forward_eval(self, data: dict) -> dict:
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._eval_model.eval()
with torch.no_grad():
output = self._eval_model.forward({'obs': data}, mode='compute_actor')
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _monitor_vars_learn(self) -> List[str]:
twin_critic = ['twin_critic_loss'] if self._twin_critic else []
if self._auto_alpha:
return super()._monitor_vars_learn() + [
'alpha_loss', 'policy_loss', 'critic_loss', 'cur_lr_q', 'cur_lr_p', 'target_q_value', 'q_value_1',
'q_value_2', 'alpha', 'td_error', 'target_value', 'entropy'
] + twin_critic
else:
return super()._monitor_vars_learn() + [
'policy_loss', 'critic_loss', 'cur_lr_q', 'cur_lr_p', 'target_q_value', 'q_value_1', 'q_value_2',
'alpha', 'td_error', 'target_value', 'entropy'
] + twin_critic
@POLICY_REGISTRY.register('sac')
class SACPolicy(Policy):
r"""
Overview:
Policy class of continuous SAC algorithm. Paper link: https://arxiv.org/pdf/1801.01290.pdf
Config:
== ==================== ======== ============= ================================= =======================
ID Symbol Type Default Value Description Other
== ==================== ======== ============= ================================= =======================
1 ``type`` str sac | RL policy register name, refer | this arg is optional,
| to registry ``POLICY_REGISTRY`` | a placeholder
2 ``cuda`` bool True | Whether to use cuda for network |
3 ``on_policy`` bool False | SAC is an off-policy |
| algorithm. |
4 ``priority`` bool False | Whether to use priority |
| sampling in buffer. |
5 | ``priority_IS_`` bool False | Whether use Importance Sampling |
| ``weight`` | weight to correct biased update |
6 | ``random_`` int 10000 | Number of randomly collected | Default to 10000 for
| ``collect_size`` | training samples in replay | SAC, 25000 for DDPG/
| | buffer when training starts. | TD3.
7 | ``learn.learning`` float 3e-4 | Learning rate for soft q | Defalut to 1e-3
| ``_rate_q`` | network. |
8 | ``learn.learning`` float 3e-4 | Learning rate for policy | Defalut to 1e-3
| ``_rate_policy`` | network. |
9 | ``learn.alpha`` float 0.2 | Entropy regularization | alpha is initiali-
| | coefficient. | zation for auto
| | | `\alpha`, when
| | | auto_alpha is True
10 | ``learn.`` bool False | Determine whether to use | Temperature parameter
| ``auto_alpha`` | auto temperature parameter | determines the
| | `\alpha`. | relative importance
| | | of the entropy term
| | | against the reward.
11 | ``learn.-`` bool False | Determine whether to ignore | Use ignore_done only
| ``ignore_done`` | done flag. | in env like Pendulum
12 | ``learn.-`` float 0.005 | Used for soft update of the | aka. Interpolation
| ``target_theta`` | target network. | factor in polyak aver
| | | aging for target
| | | networks.
== ==================== ======== ============= ================================= =======================
"""
config = dict(
# (str) RL policy register name (refer to function "POLICY_REGISTRY").
type='sac',
# (bool) Whether to use cuda for network and loss computation.
cuda=False,
# (bool) Whether to belong to on-policy or off-policy algorithm, SAC is an off-policy algorithm.
on_policy=False,
# (bool) Whether to use priority sampling in buffer. Default to False in SAC.
priority=False,
# (bool) Whether use Importance Sampling weight to correct biased update. If True, priority must be True.
priority_IS_weight=False,
# (int) Number of training samples (randomly collected) in replay buffer when training starts.
random_collect_size=10000,
# (bool) Whether to need policy-specific data in process transition.
transition_with_policy_data=True,
# (bool) Whether to enable multi-agent training setting.
multi_agent=False,
model=dict(
# (bool) Whether to use double-soft-q-net for target q computation.
# For more details, please refer to TD3 about Clipped Double-Q Learning trick.
twin_critic=True,
# (str) Use reparameterization trick for continous action.
action_space='reparameterization',
),
learn=dict(
# (int) How many updates (iterations) to train after collector's one collection.
# Bigger "update_per_collect" means bigger off-policy.
update_per_collect=1,
# (int) Minibatch size for one gradient descent.
batch_size=256,
# (float) Learning rate for soft q network.
learning_rate_q=3e-4,
# (float) Learning rate for policy network.
learning_rate_policy=3e-4,
# (float) Learning rate for auto temperature parameter `\alpha`.
learning_rate_alpha=3e-4,
# (float) Used for soft update of the target network,
# aka. Interpolation factor in EMA update for target network.
target_theta=0.005,
# (float) discount factor for the discounted sum of rewards, aka. gamma.
discount_factor=0.99,
# (float) Entropy regularization coefficient in SAC.
# Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.
# If auto_alpha is set to `True`, alpha is initialization for auto `\alpha`.
alpha=0.2,
# (bool) Whether to use auto temperature parameter `\alpha` .
# Temperature parameter `\alpha` determines the relative importance of the entropy term against the reward.
# Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.
# Note that: Using auto alpha needs to set the above `learning_rate_alpha`.
auto_alpha=True,
# (bool) Whether to use auto `\alpha` in log space.
log_space=True,
# (float) Target policy entropy value for auto temperature (alpha) adjustment.
target_entropy=None,
# (bool) Whether ignore done(usually for max step termination env. e.g. pendulum)
# Note: Gym wraps the MuJoCo envs by default with TimeLimit environment wrappers.
# These limit HalfCheetah, and several other MuJoCo envs, to max length of 1000.
# However, interaction with HalfCheetah always gets done with False,
# Since we inplace done==True with done==False to keep
# TD-error accurate computation(``gamma * (1 - done) * next_v + reward``),
# when the episode step is greater than max episode step.
ignore_done=False,
# (float) Weight uniform initialization max range in the last output layer.
init_w=3e-3,
),
collect=dict(
# (int) How many training samples collected in one collection procedure.
n_sample=1,
# (int) Split episodes or trajectories into pieces with length `unroll_len`.
unroll_len=1,
# (bool) Whether to collect logit in `process_transition`.
# In some algorithm like guided cost learning, we need to use logit to train the reward model.
collector_logit=False,
),
other=dict(
replay_buffer=dict(
# (int) Maximum size of replay buffer. Usually, larger buffer size is good
# for SAC but cost more storage.
replay_buffer_size=1000000,
),
),
)
def default_model(self) -> Tuple[str, List[str]]:
if self._cfg.multi_agent:
return 'maqac_continuous', ['ding.model.template.maqac']
else:
return 'qac', ['ding.model.template.qac']
def _init_learn(self) -> None:
self._priority = self._cfg.priority
self._priority_IS_weight = self._cfg.priority_IS_weight
self._twin_critic = self._cfg.model.twin_critic
# Weight Init for the last output layer
init_w = self._cfg.learn.init_w
self._model.actor[-1].mu.weight.data.uniform_(-init_w, init_w)
self._model.actor[-1].mu.bias.data.uniform_(-init_w, init_w)
self._model.actor[-1].log_sigma_layer.weight.data.uniform_(-init_w, init_w)
self._model.actor[-1].log_sigma_layer.bias.data.uniform_(-init_w, init_w)
self._optimizer_q = Adam(
self._model.critic.parameters(),
lr=self._cfg.learn.learning_rate_q,
)
self._optimizer_policy = Adam(
self._model.actor.parameters(),
lr=self._cfg.learn.learning_rate_policy,
)
# Algorithm-Specific Config
self._gamma = self._cfg.learn.discount_factor
if self._cfg.learn.auto_alpha:
if self._cfg.learn.target_entropy is None:
assert 'action_shape' in self._cfg.model, "SAC need network model with action_shape variable"
self._target_entropy = -np.prod(self._cfg.model.action_shape)
else:
self._target_entropy = self._cfg.learn.target_entropy
if self._cfg.learn.log_space:
self._log_alpha = torch.log(torch.FloatTensor([self._cfg.learn.alpha]))
self._log_alpha = self._log_alpha.to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._log_alpha], lr=self._cfg.learn.learning_rate_alpha)
assert self._log_alpha.shape == torch.Size([1]) and self._log_alpha.requires_grad
self._alpha = self._log_alpha.detach().exp()
self._auto_alpha = True
self._log_space = True
else:
self._alpha = torch.FloatTensor([self._cfg.learn.alpha]).to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._alpha], lr=self._cfg.learn.learning_rate_alpha)
self._auto_alpha = True
self._log_space = False
else:
self._alpha = torch.tensor(
[self._cfg.learn.alpha], requires_grad=False, device=self._device, dtype=torch.float32
)
self._auto_alpha = False
# Main and target models
self._target_model = copy.deepcopy(self._model)
self._target_model = model_wrap(
self._target_model,
wrapper_name='target',
update_type='momentum',
update_kwargs={'theta': self._cfg.learn.target_theta}
)
self._learn_model = model_wrap(self._model, wrapper_name='base')
self._learn_model.reset()
self._target_model.reset()
def _forward_learn(self, data: dict) -> Dict[str, Any]:
loss_dict = {}
data = default_preprocess_learn(
data,
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=False
)
if self._cuda:
data = to_device(data, self._device)
self._learn_model.train()
self._target_model.train()
obs = data['obs']
next_obs = data['next_obs']
reward = data['reward']
done = data['done']
# 1. predict q value
q_value = self._learn_model.forward(data, mode='compute_critic')['q_value']
# 2. predict target value
with torch.no_grad():
(mu, sigma) = self._learn_model.forward(next_obs, mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
pred = dist.rsample()
next_action = torch.tanh(pred)
y = 1 - next_action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
next_log_prob = dist.log_prob(pred).unsqueeze(-1)
next_log_prob = next_log_prob - torch.log(y).sum(-1, keepdim=True)
next_data = {'obs': next_obs, 'action': next_action}
target_q_value = self._target_model.forward(next_data, mode='compute_critic')['q_value']
# the value of a policy according to the maximum entropy objective
if self._twin_critic:
# find min one as target q value
target_q_value = torch.min(target_q_value[0],
target_q_value[1]) - self._alpha * next_log_prob.squeeze(-1)
else:
target_q_value = target_q_value - self._alpha * next_log_prob.squeeze(-1)
# 3. compute q loss
if self._twin_critic:
q_data0 = v_1step_td_data(q_value[0], target_q_value, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample0 = v_1step_td_error(q_data0, self._gamma)
q_data1 = v_1step_td_data(q_value[1], target_q_value, reward, done, data['weight'])
loss_dict['twin_critic_loss'], td_error_per_sample1 = v_1step_td_error(q_data1, self._gamma)
td_error_per_sample = (td_error_per_sample0 + td_error_per_sample1) / 2
else:
q_data = v_1step_td_data(q_value, target_q_value, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample = v_1step_td_error(q_data, self._gamma)
# 4. update q network
self._optimizer_q.zero_grad()
if self._twin_critic:
(loss_dict['critic_loss'] + loss_dict['twin_critic_loss']).backward()
else:
loss_dict['critic_loss'].backward()
self._optimizer_q.step()
# 5. evaluate to get action distribution
(mu, sigma) = self._learn_model.forward(data['obs'], mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
pred = dist.rsample()
action = torch.tanh(pred)
y = 1 - action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
log_prob = dist.log_prob(pred).unsqueeze(-1)
log_prob = log_prob - torch.log(y).sum(-1, keepdim=True)
eval_data = {'obs': obs, 'action': action}
new_q_value = self._learn_model.forward(eval_data, mode='compute_critic')['q_value']
if self._twin_critic:
new_q_value = torch.min(new_q_value[0], new_q_value[1])
# 6. compute policy loss
policy_loss = (self._alpha * log_prob - new_q_value.unsqueeze(-1)).mean()
loss_dict['policy_loss'] = policy_loss
# 7. update policy network
self._optimizer_policy.zero_grad()
loss_dict['policy_loss'].backward()
self._optimizer_policy.step()
# 8. compute alpha loss
if self._auto_alpha:
if self._log_space:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = -(self._log_alpha * log_prob.detach()).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
else:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = -(self._alpha * log_prob.detach()).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha = max(0, self._alpha)
loss_dict['total_loss'] = sum(loss_dict.values())
# target update
self._target_model.update(self._learn_model.state_dict())
return {
'cur_lr_q': self._optimizer_q.defaults['lr'],
'cur_lr_p': self._optimizer_policy.defaults['lr'],
'priority': td_error_per_sample.abs().tolist(),
'td_error': td_error_per_sample.detach().mean().item(),
'alpha': self._alpha.item(),
'target_q_value': target_q_value.detach().mean().item(),
'transformed_log_prob': log_prob.mean().item(),
**loss_dict
}
def _state_dict_learn(self) -> Dict[str, Any]:
ret = {
'model': self._learn_model.state_dict(),
'target_model': self._target_model.state_dict(),
'optimizer_q': self._optimizer_q.state_dict(),
'optimizer_policy': self._optimizer_policy.state_dict(),
}
if self._auto_alpha:
ret.update({'optimizer_alpha': self._alpha_optim.state_dict()})
return ret
def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:
self._learn_model.load_state_dict(state_dict['model'])
self._target_model.load_state_dict(state_dict['target_model'])
self._optimizer_q.load_state_dict(state_dict['optimizer_q'])
self._optimizer_policy.load_state_dict(state_dict['optimizer_policy'])
if self._auto_alpha:
self._alpha_optim.load_state_dict(state_dict['optimizer_alpha'])
def _init_collect(self) -> None:
self._unroll_len = self._cfg.collect.unroll_len
self._collect_model = model_wrap(self._model, wrapper_name='base')
self._collect_model.reset()
def _forward_collect(self, data: dict) -> dict:
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._collect_model.eval()
with torch.no_grad():
(mu, sigma) = self._collect_model.forward(data, mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
action = torch.tanh(dist.rsample())
output = {'logit': (mu, sigma), 'action': action}
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _process_transition(self, obs: Any, policy_output: dict, timestep: namedtuple) -> dict:
if self._cfg.collect.collector_logit:
transition = {
'obs': obs,
'next_obs': timestep.obs,
'logit': policy_output['logit'],
'action': policy_output['action'],
'reward': timestep.reward,
'done': timestep.done,
}
else:
transition = {
'obs': obs,
'next_obs': timestep.obs,
'action': policy_output['action'],
'reward': timestep.reward,
'done': timestep.done,
}
return transition
def _get_train_sample(self, data: list) -> Union[None, List[Any]]:
return get_train_sample(data, self._unroll_len)
def _init_eval(self) -> None:
self._eval_model = model_wrap(self._model, wrapper_name='base')
self._eval_model.reset()
def _forward_eval(self, data: dict) -> dict:
data_id = list(data.keys())
data = default_collate(list(data.values()))
if self._cuda:
data = to_device(data, self._device)
self._eval_model.eval()
with torch.no_grad():
(mu, sigma) = self._eval_model.forward(data, mode='compute_actor')['logit']
action = torch.tanh(mu) # deterministic_eval
output = {'action': action}
if self._cuda:
output = to_device(output, 'cpu')
output = default_decollate(output)
return {i: d for i, d in zip(data_id, output)}
def _monitor_vars_learn(self) -> List[str]:
twin_critic = ['twin_critic_loss'] if self._twin_critic else []
alpha_loss = ['alpha_loss'] if self._auto_alpha else []
return [
'value_loss'
'alpha_loss',
'policy_loss',
'critic_loss',
'cur_lr_q',
'cur_lr_p',
'target_q_value',
'alpha',
'td_error',
'transformed_log_prob',
] + twin_critic + alpha_loss
@POLICY_REGISTRY.register('sqil_sac')
class SQILSACPolicy(SACPolicy):
def _init_learn(self) -> None:
self._priority = self._cfg.priority
self._priority_IS_weight = self._cfg.priority_IS_weight
self._twin_critic = self._cfg.model.twin_critic
# Weight Init for the last output layer
init_w = self._cfg.learn.init_w
self._model.actor[2].mu.weight.data.uniform_(-init_w, init_w)
self._model.actor[2].mu.bias.data.uniform_(-init_w, init_w)
self._model.actor[2].log_sigma_layer.weight.data.uniform_(-init_w, init_w)
self._model.actor[2].log_sigma_layer.bias.data.uniform_(-init_w, init_w)
self._optimizer_q = Adam(
self._model.critic.parameters(),
lr=self._cfg.learn.learning_rate_q,
)
self._optimizer_policy = Adam(
self._model.actor.parameters(),
lr=self._cfg.learn.learning_rate_policy,
)
# Algorithm-Specific Config
self._gamma = self._cfg.learn.discount_factor
if self._cfg.learn.auto_alpha:
if self._cfg.learn.target_entropy is None:
assert 'action_shape' in self._cfg.model, "SACDiscrete need network model with action_shape variable"
self._target_entropy = -np.prod(self._cfg.model.action_shape)
else:
self._target_entropy = self._cfg.learn.target_entropy
if self._cfg.learn.log_space:
self._log_alpha = torch.log(torch.FloatTensor([self._cfg.learn.alpha]))
self._log_alpha = self._log_alpha.to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._log_alpha], lr=self._cfg.learn.learning_rate_alpha)
assert self._log_alpha.shape == torch.Size([1]) and self._log_alpha.requires_grad
self._alpha = self._log_alpha.detach().exp()
self._auto_alpha = True
self._log_space = True
else:
self._alpha = torch.FloatTensor([self._cfg.learn.alpha]).to(self._device).requires_grad_()
self._alpha_optim = torch.optim.Adam([self._alpha], lr=self._cfg.learn.learning_rate_alpha)
self._auto_alpha = True
self._log_space = False
else:
self._alpha = torch.tensor(
[self._cfg.learn.alpha], requires_grad=False, device=self._device, dtype=torch.float32
)
self._auto_alpha = False
# Main and target models
self._target_model = copy.deepcopy(self._model)
self._target_model = model_wrap(
self._target_model,
wrapper_name='target',
update_type='momentum',
update_kwargs={'theta': self._cfg.learn.target_theta}
)
self._learn_model = model_wrap(self._model, wrapper_name='base')
self._learn_model.reset()
self._target_model.reset()
# monitor cossimilarity and entropy switch
self._monitor_cos = True
self._monitor_entropy = True
def _forward_learn(self, data: dict) -> Dict[str, Any]:
loss_dict = {}
if self._monitor_cos:
agent_data = default_preprocess_learn(
data[0:len(data) // 2],
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=False
)
expert_data = default_preprocess_learn(
data[len(data) // 2:],
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=False
)
if self._cuda:
agent_data = to_device(agent_data, self._device)
expert_data = to_device(expert_data, self._device)
data = default_preprocess_learn(
data,
use_priority=self._priority,
use_priority_IS_weight=self._cfg.priority_IS_weight,
ignore_done=self._cfg.learn.ignore_done,
use_nstep=False
)
if self._cuda:
data = to_device(data, self._device)
self._learn_model.train()
self._target_model.train()
obs = data['obs']
next_obs = data['next_obs']
reward = data['reward']
done = data['done']
# 1. predict q value
q_value = self._learn_model.forward(data, mode='compute_critic')['q_value']
# 2. predict target value
with torch.no_grad():
(mu, sigma) = self._learn_model.forward(next_obs, mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
pred = dist.rsample()
next_action = torch.tanh(pred)
y = 1 - next_action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
next_log_prob = dist.log_prob(pred).unsqueeze(-1)
next_log_prob = next_log_prob - torch.log(y).sum(-1, keepdim=True)
next_data = {'obs': next_obs, 'action': next_action}
target_q_value = self._target_model.forward(next_data, mode='compute_critic')['q_value']
# the value of a policy according to the maximum entropy objective
if self._twin_critic:
# find min one as target q value
target_q_value = torch.min(target_q_value[0],
target_q_value[1]) - self._alpha * next_log_prob.squeeze(-1)
else:
target_q_value = target_q_value - self._alpha * next_log_prob.squeeze(-1)
# 3. compute q loss
if self._twin_critic:
q_data0 = v_1step_td_data(q_value[0], target_q_value, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample0 = v_1step_td_error(q_data0, self._gamma)
q_data1 = v_1step_td_data(q_value[1], target_q_value, reward, done, data['weight'])
loss_dict['twin_critic_loss'], td_error_per_sample1 = v_1step_td_error(q_data1, self._gamma)
td_error_per_sample = (td_error_per_sample0 + td_error_per_sample1) / 2
else:
q_data = v_1step_td_data(q_value, target_q_value, reward, done, data['weight'])
loss_dict['critic_loss'], td_error_per_sample = v_1step_td_error(q_data, self._gamma)
# 4. update q network
self._optimizer_q.zero_grad()
if self._twin_critic:
(loss_dict['critic_loss'] + loss_dict['twin_critic_loss']).backward()
else:
loss_dict['critic_loss'].backward()
self._optimizer_q.step()
# 5. evaluate to get action distribution
if self._monitor_cos:
# agent
(mu, sigma) = self._learn_model.forward(agent_data['obs'], mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
pred = dist.rsample()
action = torch.tanh(pred)
y = 1 - action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
agent_log_prob = dist.log_prob(pred).unsqueeze(-1)
agent_log_prob = agent_log_prob - torch.log(y).sum(-1, keepdim=True)
eval_data = {'obs': agent_data['obs'], 'action': action}
agent_new_q_value = self._learn_model.forward(eval_data, mode='compute_critic')['q_value']
if self._twin_critic:
agent_new_q_value = torch.min(agent_new_q_value[0], agent_new_q_value[1])
# expert
(mu, sigma) = self._learn_model.forward(expert_data['obs'], mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
pred = dist.rsample()
action = torch.tanh(pred)
y = 1 - action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
expert_log_prob = dist.log_prob(pred).unsqueeze(-1)
expert_log_prob = expert_log_prob - torch.log(y).sum(-1, keepdim=True)
eval_data = {'obs': expert_data['obs'], 'action': action}
expert_new_q_value = self._learn_model.forward(eval_data, mode='compute_critic')['q_value']
if self._twin_critic:
expert_new_q_value = torch.min(expert_new_q_value[0], expert_new_q_value[1])
(mu, sigma) = self._learn_model.forward(data['obs'], mode='compute_actor')['logit']
dist = Independent(Normal(mu, sigma), 1)
# for monitor the entropy of policy
if self._monitor_entropy:
dist_entropy = dist.entropy()
entropy = dist_entropy.mean()
pred = dist.rsample()
action = torch.tanh(pred)
y = 1 - action.pow(2) + 1e-6
# keep dimension for loss computation (usually for action space is 1 env. e.g. pendulum)
log_prob = dist.log_prob(pred).unsqueeze(-1)
log_prob = log_prob - torch.log(y).sum(-1, keepdim=True)
eval_data = {'obs': obs, 'action': action}
new_q_value = self._learn_model.forward(eval_data, mode='compute_critic')['q_value']
if self._twin_critic:
new_q_value = torch.min(new_q_value[0], new_q_value[1])
# 6. compute policy loss
policy_loss = (self._alpha * log_prob - new_q_value.unsqueeze(-1)).mean()
loss_dict['policy_loss'] = policy_loss
# 7. update policy network
if self._monitor_cos:
agent_policy_loss = (self._alpha * agent_log_prob - agent_new_q_value.unsqueeze(-1)).mean()
expert_policy_loss = (self._alpha * expert_log_prob - expert_new_q_value.unsqueeze(-1)).mean()
loss_dict['agent_policy_loss'] = agent_policy_loss
loss_dict['expert_policy_loss'] = expert_policy_loss
self._optimizer_policy.zero_grad()
loss_dict['agent_policy_loss'].backward()
agent_grad = (list(list(self._learn_model.actor.children())[-1].children())[-1].weight.grad).mean()
self._optimizer_policy.zero_grad()
loss_dict['expert_policy_loss'].backward()
expert_grad = (list(list(self._learn_model.actor.children())[-1].children())[-1].weight.grad).mean()
cos = nn.CosineSimilarity(dim=0)
cos_similarity = cos(agent_grad, expert_grad)
self._optimizer_policy.zero_grad()
loss_dict['policy_loss'].backward()
self._optimizer_policy.step()
# 8. compute alpha loss
if self._auto_alpha:
if self._log_space:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = -(self._log_alpha * log_prob.detach()).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
else:
log_prob = log_prob + self._target_entropy
loss_dict['alpha_loss'] = -(self._alpha * log_prob.detach()).mean()
self._alpha_optim.zero_grad()
loss_dict['alpha_loss'].backward()
self._alpha_optim.step()
self._alpha = max(0, self._alpha)
loss_dict['total_loss'] = sum(loss_dict.values())
# target update
self._target_model.update(self._learn_model.state_dict())
var_monitor = {
'cur_lr_q': self._optimizer_q.defaults['lr'],
'cur_lr_p': self._optimizer_policy.defaults['lr'],
'priority': td_error_per_sample.abs().tolist(),
'td_error': td_error_per_sample.detach().mean().item(),
'agent_td_error': td_error_per_sample.detach().chunk(2, dim=0)[0].mean().item(),
'expert_td_error': td_error_per_sample.detach().chunk(2, dim=0)[1].mean().item(),
'alpha': self._alpha.item(),
'target_q_value': target_q_value.detach().mean().item(),
'mu': mu.detach().mean().item(),
'sigma': sigma.detach().mean().item(),
'q_value0': new_q_value[0].detach().mean().item(),
'q_value1': new_q_value[1].detach().mean().item(),
**loss_dict,
}
if self._monitor_cos:
var_monitor['cos_similarity'] = cos_similarity.item()
if self._monitor_entropy:
var_monitor['entropy'] = entropy.item()
return var_monitor
def _monitor_vars_learn(self) -> List[str]:
twin_critic = ['twin_critic_loss'] if self._twin_critic else []
alpha_loss = ['alpha_loss'] if self._auto_alpha else []
cos_similarity = ['cos_similarity'] if self._monitor_cos else []
entropy = ['entropy'] if self._monitor_entropy else []
return [
'value_loss'
'alpha_loss',
'policy_loss',
'critic_loss',
'cur_lr_q',
'cur_lr_p',
'target_q_value',
'alpha',
'td_error',
'agent_td_error',
'expert_td_error',
'mu',
'sigma',
'q_value0',
'q_value1',
] + twin_critic + alpha_loss + cos_similarity + entropy | PypiClean |
/DevTool-0.0.2-py3-none-any.whl/devtool/utils/common.py | import datetime
import os
import re
import time
from devtool.utils.devtool_exceptions import MemoryOutOfThresException
is_psutil_installed = False
is_pynvml_installed = False
try:
import psutil
is_psutil_installed = True
except:
pass
try:
import pynvml
is_pynvml_installed = True
except:
pass
__DEFAULT_SIZE__ = 1024 * 1024
memorys = []
memory_percents = []
cpu_percents = []
useds = []
def match_datetime(text):
'''正则表达式提取文本所有日期+时间
:param text: 待检索文本
>>> match_datetime('日期是2020-05-20 13:14:15.477062.')
['2020-05-20 13:14:15']
'''
pattern = r'(\d{4}-\d{1,2}-\d{1,2}\s\d{1,2}:\d{1,2}:\d{1,2})'
pattern = re.compile(pattern)
result = pattern.findall(text)
return result
def validate_datetime(text):
'''验证日期+时间格式
:param text: 待检索文本
>>> validate_datetime('2020-05-20 13:14:15')
True
>>> validate_datetime('2020-05-32 13:14:15')
False
'''
try:
if text != datetime.datetime.strptime(
text, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S'):
raise ValueError
return True
except ValueError:
return False
def validate_date(text):
'''验证日期+时间格式
:param text: 待检索文本
>>> validate_date('2020-05-20')
True
>>> validate_date('2020-05-32')
False
'''
try:
if text != datetime.datetime.strptime(text,
'%Y-%m-%d').strftime('%Y-%m-%d'):
raise ValueError
return True
except ValueError:
return False
def showPsInfo_before():
if not is_psutil_installed:
print('Module "psutil" is not installed. Plz install it first.')
return
currentPid = os.getpid()
pids = psutil.pids() # for multi processes maybe
return currentPid, pids
def situation(beforePids: tuple,
gpu=False,
return_dict=dict(),
mThres=float('inf')):
global memorys, memory_percents, cpu_percents, useds
currentPid = beforePids[0]
# print(currentPid)
# print(os.getpid())
p = psutil.Process(currentPid)
usedMemory = int(psutil.virtual_memory()[0] * p.memory_percent() /
(100 * __DEFAULT_SIZE__))
print("memory : {}, memory_persent : {}, cpu_percent : {}".format(
str(usedMemory) + "MB",
str(round(p.memory_percent(), 3)) + "%", p.cpu_percent()))
memorys.append(usedMemory)
memory_percents.append(round(p.memory_percent(), 3))
cpu_percents.append(p.cpu_percent())
return_dict['memorys'] = memorys
return_dict['memory_percents'] = memory_percents
return_dict['cpu_percents'] = cpu_percents
if usedMemory > mThres:
print(
"This function out of memory with threshold {} MB, but got {} MB during runtime."
.format(mThres, usedMemory))
mp = psutil.Process(os.getpid())
p.kill()
mp.kill()
if gpu:
if not is_pynvml_installed:
print('Module "pynvml" is not installed. Plz install it first.')
else:
try:
pynvml.nvmlInit()
driver = pynvml.nvmlSystemGetDriverVersion()
gpunum = pynvml.nvmlDeviceGetCount()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
device = pynvml.nvmlDeviceGetName(handle)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
total = int(info.total / __DEFAULT_SIZE__)
used = int(info.used / __DEFAULT_SIZE__)
pynvml.nvmlShutdown()
print(
"driver:{}, gpunum:{}, device:{}, total:{}MB, used:{}MB .".
format(driver.decode(), gpunum, device.decode(), total,
used))
useds.append(used)
return_dict['useds'] = useds # gpu used not OK
except:
print('"pynvml" not working')
time.sleep(1)
def showPsInfo_after(beforePids: tuple,
psname='python',
gpu=False,
repeat=True,
return_dict=dict(),
mThres=float('inf')):
"""mThres : memory thres
"""
if not is_psutil_installed:
print('Module "psutil" is not installed. Plz install it first.')
return
if not repeat:
situation(beforePids, gpu, return_dict, mThres)
else:
while 1 == 1:
situation(beforePids, gpu, return_dict, mThres)
def plotBeautify(code: str, defaultLength: int = 22):
assert type(
code) is str, "param 'code' must be a str, while got a {}".format(
type(code))
if len(code) >= defaultLength:
return code
else:
s = len(str(code))
if s%2 == 0:
return int(defaultLength / 2 - 0.5 * s - 1) * ' ' + str(
code) + int(defaultLength / 2 - 0.5 * s) * ' '
else:
return int(defaultLength / 2 - 0.5 * s) * ' ' + str(
code) + int(defaultLength / 2 - 0.5 * s) * ' ' | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/gis/gdal/geomtype.py | from django.contrib.gis.gdal.error import OGRException
from django.utils import six
#### OGRGeomType ####
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0 : 'Unknown',
1 : 'Point',
2 : 'LineString',
3 : 'Polygon',
4 : 'MultiPoint',
5 : 'MultiLineString',
6 : 'MultiPolygon',
7 : 'GeometryCollection',
100 : 'None',
101 : 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit : 'MultiLineString25D',
6 + wkb25bit : 'MultiPolygon25D',
7 + wkb25bit : 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = dict([(v.lower(), k) for k, v in _types.items()])
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry': type_input='unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise OGRException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if not type_input in self._types:
raise OGRException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field' | PypiClean |
/CDS-1.0.1.tar.gz/CDS-1.0.1/cds/modules/records/serializers/vtt.py | from __future__ import absolute_import, print_function
from datetime import datetime
from flask import current_app, render_template, url_for
from invenio_rest.errors import FieldError, RESTValidationError
from ...deposit.api import Video
from ..api import CDSVideosFilesIterator
class VTTSerializer(object):
"""Smil serializer for records."""
@staticmethod
def serialize(pid, record, links_factory=None):
"""Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links.
"""
if record['$schema'] != Video.get_record_schema():
raise RESTValidationError(errors=[FieldError(
str(record.id), 'Unsupported format')])
return VTT(record=record).format()
class VTT(object):
"""Smil formatter."""
def __init__(self, record):
"""Initialize Smil formatter with the specific record."""
self.record = record
self.data = ''
def format(self):
thumbnail_data = self._format_frames(self.record)
return render_template('cds_records/thumbnails.vtt',
frames=thumbnail_data)
@staticmethod
def _format_frames(record):
"""Select frames and format the start/end times."""
master_file = CDSVideosFilesIterator.get_master_video_file(record)
frames = [{
'time': float(f['tags']['timestamp']),
'bid': f['bucket_id'],
'key': f['key']
} for f in CDSVideosFilesIterator.get_video_frames(master_file)]
last_time = float(master_file['tags']['duration'])
poster_size = current_app.config['VIDEO_POSTER_SIZE']
frames_tail = frames[1:] + [{'time': last_time}]
return [{
'start_time': VTT.time_format(f['time'] if i > 0 else 0.0),
'end_time': VTT.time_format(next_f['time']),
'file_name': VTT.resize_link(f, poster_size),
} for i, (f, next_f) in enumerate(zip(frames, frames_tail))]
@staticmethod
def resize_link(frame, size):
return url_for('iiifimageapi', version='v2',
uuid='{0}:{1}'.format(frame['bid'], frame['key']),
region='full', size='{0[0]},{0[1]}'.format(size),
rotation='0', quality='default',
image_format='png', _external=True)
@staticmethod
def time_format(seconds):
"""Helper function to convert seconds to vtt time format."""
d = datetime.utcfromtimestamp(seconds)
s = d.strftime('%M:%S.%f')
return s[:-3] | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.7.30.rst | ============================
Djblets 0.7.30 Release Notes
============================
**Release date**: June 6, 2014
Security
========
* Fixed a XSS issue in the gravatars code.
Users could construct a name that would allow for injecting
JavaScript in the page. That name is now properly escaped.
This is :cve:`2014-3995`.
* Fixed a XSS issue in :py:func:`json_dumps.`
JSON payloads constructed based on user input and then injected into
a page could result in custom JavaScript being injected into the
page. Additional escaping is now performed to ensure this does not
happen.
This is :cve:`2014-3994` (discovered by "uchida", :bug:`3406`).
Contributors
============
* Christian Hammond
* David Trowbridge
* Uchida
| PypiClean |
/EggLib-3.3.0.tar.gz/EggLib-3.3.0/README.rst | .. image:: ./doc/logo/banner.png
:target: https://egglib.org
:alt: EggLib Logo
|PythonVersions| |PypiPackage|
.. contents:: Table of Contents
:depth: 3
About
=====
EggLib is a Python library, largely implemented in C++, for evolutionary
genetics and genomics. Main features are sequence data management,
sequence polymorphism analysis, and coalescent simulations. EggLib is a
flexible Python module with a performant underlying C++ library and
allows fast and intuitive development of Python programs and scripts.
**EggLib home page:** `<https://www.egglib.org>`_
Installation
============
EggLib is available on pip. For more information on installing EggLib or
downloading source code please refer to the installation section of the
documentation: `<https://egglib.org/install.html>`_.
Cloning
=======
You can clone the whole package using::
git clone https://gitlab.com/demita/egglib.git
For example, this lets you access to the current version on development::
cd egglib
git checkout dev
Building local documentation
============================
To generate the documentation locally, you should clone the repository,
install EggLib and the python-sphinx package, and run this::
sphinx-build doc/ ../doc
The first argument is the location of the ``doc`` directory within the
EggLib package. The second argument is the destination of the generated
documentation. ``../doc`` is just an example.
Citation
========
Siol M., T. Coudoux, S. Ravel and S. De Mita. 2022. EggLib 3: A python package for population genetics and genomics.
*Mol Ecol. Res.* **22**:3176-3187. `<https://doi.org/10.1111/1755-0998.13672>`_
License
=======
EggLib is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your
option) any later version.
EggLib is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
`<https://www.egglib.org/licence.html>`_
.. |PythonVersions| image:: https://img.shields.io/badge/python-3.6+-blue.svg
:target: https://www.python.org/downloads
:alt: Python 3.6+
.. |PypiPackage| image:: https://badge.fury.io/py/EggLib.svg
:target: https://pypi.org/project/EggLib
:alt: PyPi package
| PypiClean |
/OctoBot-Trading-2.4.23.tar.gz/OctoBot-Trading-2.4.23/octobot_trading/exchange_data/ticker/ticker_manager.py | import math
import octobot_commons.logging as logging
import octobot_trading.enums as enums
import octobot_trading.util as util
class TickerManager(util.Initializable):
def __init__(self):
super().__init__()
self.logger = logging.get_logger(self.__class__.__name__)
self.ticker = {}
self.mini_ticker = {}
self.reset_ticker()
self.reset_mini_ticker()
async def initialize_impl(self):
self.reset_ticker()
self.reset_mini_ticker()
def reset_mini_ticker(self):
self.mini_ticker = {
enums.ExchangeConstantsMiniTickerColumns.HIGH_PRICE.value: math.nan,
enums.ExchangeConstantsMiniTickerColumns.LOW_PRICE.value: math.nan,
enums.ExchangeConstantsMiniTickerColumns.OPEN_PRICE.value: math.nan,
enums.ExchangeConstantsMiniTickerColumns.CLOSE_PRICE.value: math.nan,
enums.ExchangeConstantsMiniTickerColumns.VOLUME.value: math.nan,
enums.ExchangeConstantsMiniTickerColumns.TIMESTAMP.value: 0
}
def reset_ticker(self):
self.ticker = {
enums.ExchangeConstantsTickersColumns.ASK.value: math.nan,
enums.ExchangeConstantsTickersColumns.ASK_VOLUME.value: math.nan,
enums.ExchangeConstantsTickersColumns.BID.value: math.nan,
enums.ExchangeConstantsTickersColumns.BID_VOLUME.value: math.nan,
enums.ExchangeConstantsTickersColumns.OPEN.value: math.nan,
enums.ExchangeConstantsTickersColumns.LOW.value: math.nan,
enums.ExchangeConstantsTickersColumns.HIGH.value: math.nan,
enums.ExchangeConstantsTickersColumns.CLOSE.value: math.nan,
enums.ExchangeConstantsTickersColumns.LAST.value: math.nan,
enums.ExchangeConstantsTickersColumns.AVERAGE.value: math.nan,
enums.ExchangeConstantsTickersColumns.SYMBOL.value: math.nan,
enums.ExchangeConstantsTickersColumns.QUOTE_VOLUME.value: math.nan,
enums.ExchangeConstantsTickersColumns.TIMESTAMP.value: 0,
enums.ExchangeConstantsTickersColumns.VWAP.value: math.nan
}
def ticker_update(self, ticker):
self.ticker.update(ticker)
def mini_ticker_update(self, mini_ticker):
self.mini_ticker.update(mini_ticker) | PypiClean |
/ConfigWrapper-0.4.5.tar.gz/ConfigWrapper-0.4.5/src/configwrapper/options/numeric.py | from decimal import Decimal, InvalidOperation
from configwrapper import ValidationError
from configwrapper.section import ConfigOption
__author__ = 'Lai Tash ([email protected])'
class IntOption(ConfigOption):
def __init__(self, allow=None, *args, **kwargs):
self.allow = allow or (lambda i: True)
super(IntOption, self).__init__(*args, **kwargs)
def _validate_value(self, value):
if not isinstance(value, int):
raise ValidationError('Integer value expected')
if not self.allow(value):
raise ValidationError('Value is not allowed')
def _validate_serialized(self, string_):
if not string_.isdigit():
raise ValidationError('Integer value expected')
def serialize(self, value, instance):
return str(value)
def deserialize(self, string_, instance):
return int(string_)
class DecimalOption(IntOption):
def _validate_value(self, value):
if not isinstance(value, (Decimal, int, long)):
raise ValidationError('Decimal value expected')
if not self.allow(value):
raise ValidationError('Value is not allowed')
def _validate_serialized(self, string_):
try:
Decimal(string_)
except InvalidOperation:
raise ValidationError('Decimal value expected')
def serialize(self, value, instance):
return str(value)
def deserialize(self, string_, instance):
return Decimal(string_)
class FloatOption(IntOption):
def _validate_value(self, value):
if not isinstance(value, (float, Decimal, int, long)):
raise ValidationError('Numeric value expected')
if not self.allow(value):
raise ValidationError('Value is not allowed')
def _validate_serialized(self, string_):
try:
Decimal(string_)
except InvalidOperation:
raise ValidationError('Decimal value expected')
def serialize(self, value, instance):
return str(value)
def deserialize(self, string_, instance):
return float(string_) | PypiClean |
/IES-SCAN-API-1.1.12.zip/IES-SCAN-API-1.1.12/README.txt | This is Python version 3.4.3
============================
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013, 2014, 2015 Python Software Foundation. All rights reserved.
Python 3.x is a new version of the language, which is incompatible with the 2.x
line of releases. The language is mostly the same, but many details, especially
how built-in objects like dictionaries and strings work, have changed
considerably, and a lot of deprecated features have finally been removed.
Build Instructions
------------------
On Unix, Linux, BSD, OSX, and Cygwin:
New text
./configure
make
make test
sudo make install
This will install Python as python3.
You can pass many options to the configure script; run "./configure --help" to
find out more. On OSX and Cygwin, the executable is called python.exe;
elsewhere it's just python.
On Mac OS X, if you have configured Python with --enable-framework, you should
use "make frameworkinstall" to do the installation. Note that this installs the
Python executable in a place that is not normally on your PATH, you may want to
set up a symlink in /usr/local/bin.
On Windows, see PCbuild/readme.txt.
If you wish, you can create a subdirectory and invoke configure from there. For
example:
mkdir debug
cd debug
../configure --with-pydebug
make
make test
(This will fail if you *also* built at the top-level directory. You should do a
"make clean" at the toplevel first.)
What's New
----------
We try to have a comprehensive overview of the changes in the "What's New in
Python 3.4" document, found at
http://docs.python.org/3.4/whatsnew/3.4.html
For a more detailed change log, read Misc/NEWS (though this file, too, is
incomplete, and also doesn't list anything merged in from the 2.7 release under
development).
If you want to install multiple versions of Python see the section below
entitled "Installing multiple versions".
Documentation
-------------
Documentation for Python 3.4 is online, updated daily:
http://docs.python.org/3.4/
It can also be downloaded in many formats for faster access. The documentation
is downloadable in HTML, PDF, and reStructuredText formats; the latter version
is primarily for documentation authors, translators, and people with special
formatting requirements.
If you would like to contribute to the development of Python, relevant
documentation is available at:
http://docs.python.org/devguide/
For information about building Python's documentation, refer to Doc/README.txt.
Converting From Python 2.x to 3.x
---------------------------------
Python starting with 2.6 contains features to help locating code that needs to
be changed, such as optional warnings when deprecated features are used, and
backported versions of certain key Python 3.x features.
A source-to-source translation tool, "2to3", can take care of the mundane task
of converting large amounts of source code. It is not a complete solution but
is complemented by the deprecation warnings in 2.6. See
http://docs.python.org/3.4/library/2to3.html for more information.
Testing
-------
To test the interpreter, type "make test" in the top-level directory. The test
set produces some output. You can generally ignore the messages about skipped
tests due to optional features which can't be imported. If a message is printed
about a failed test or a traceback or core dump is produced, something is wrong.
By default, tests are prevented from overusing resources like disk space and
memory. To enable these tests, run "make testall".
IMPORTANT: If the tests fail and you decide to mail a bug report, *don't*
include the output of "make test". It is useless. Run the failing test
manually, as follows:
./python -m test -v test_whatever
(substituting the top of the source tree for '.' if you built in a different
directory). This runs the test in verbose mode.
Installing multiple versions
----------------------------
On Unix and Mac systems if you intend to install multiple versions of Python
using the same installation prefix (--prefix argument to the configure script)
you must take care that your primary python executable is not overwritten by the
installation of a different version. All files and directories installed using
"make altinstall" contain the major and minor version and can thus live
side-by-side. "make install" also creates ${prefix}/bin/python3 which refers to
${prefix}/bin/pythonX.Y. If you intend to install multiple versions using the
same prefix you must decide which version (if any) is your "primary" version.
Install that version using "make install". Install all other versions using
"make altinstall".
For example, if you want to install Python 2.6, 2.7 and 3.4 with 2.7 being the
primary version, you would execute "make install" in your 2.7 build directory
and "make altinstall" in the others.
Issue Tracker and Mailing List
------------------------------
We're soliciting bug reports about all aspects of the language. Fixes are also
welcome, preferable in unified diff format. Please use the issue tracker:
http://bugs.python.org/
If you're not sure whether you're dealing with a bug or a feature, use the
mailing list:
[email protected]
To subscribe to the list, use the mailman form:
http://mail.python.org/mailman/listinfo/python-dev/
Proposals for enhancement
-------------------------
If you have a proposal to change Python, you may want to send an email to the
comp.lang.python or python-ideas mailing lists for inital feedback. A Python
Enhancement Proposal (PEP) may be submitted if your idea gains ground. All
current PEPs, as well as guidelines for submitting a new PEP, are listed at
http://www.python.org/dev/peps/.
Release Schedule
----------------
See PEP 429 for release details: http://www.python.org/dev/peps/pep-0429/
Copyright and License Information
---------------------------------
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012, 2013, 2014, 2015 Python Software Foundation. All rights reserved.
Copyright (c) 2000 BeOpen.com. All rights reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives. All
rights reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum. All rights reserved.
See the file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
This Python distribution contains *no* GNU General Public License (GPL) code, so
it may be used in proprietary projects. There are interfaces to some GNU code
but these are entirely optional.
All trademarks referenced herein are property of their respective holders.
| PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/launcher.py |
import argparse
import asyncio
import getpass
import os
import platform
import subprocess
import sys
import aiohttp
import pkg_resources
from kronbot import MIN_PYTHON_VERSION
from kronbot.core import VersionInfo, __version__
from kronbot.core import version_info as kron_version_info
from kronbot.core.cli import confirm
from kronbot.setup import (
basic_setup,
create_backup,
load_existing_config,
remove_instance,
remove_instance_interaction,
)
if sys.platform == "linux":
import distro # pylint: disable=import-error
INTERACTIVE_MODE = not len(sys.argv) > 1 # CLI flags = non-interactive
INTRO = "==========================\nKron Discord Bot - Launcher\n==========================\n"
IS_WINDOWS = os.name == "nt"
IS_MAC = sys.platform == "darwin"
PYTHON_OK = sys.version_info >= MIN_PYTHON_VERSION or os.getenv("READTHEDOCS", False)
def is_venv():
"""Return True if the process is in a venv or in a virtualenv."""
# credit to @calebj
return hasattr(sys, "real_prefix") or (
hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix
)
def parse_cli_args():
parser = argparse.ArgumentParser(
description="Kron - Discord Bot's launcher (V3)", allow_abbrev=False
)
instances = load_existing_config()
parser.add_argument(
"instancename",
metavar="instancename",
type=str,
nargs="?",
help="The instance to run",
choices=list(instances.keys()),
)
parser.add_argument("--start", "-s", help="Starts Kron", action="store_true")
parser.add_argument(
"--auto-restart", help="Autorestarts Kron in case of issues", action="store_true"
)
return parser.parse_known_args()
def run_kron(selected_instance, autorestart: bool = False, cliflags=None):
interpreter = sys.executable
while True:
print("Starting {}...".format(selected_instance))
cmd_list = [interpreter, "-m", "kronbot", selected_instance]
if cliflags:
cmd_list += cliflags
status = subprocess.call(cmd_list)
if (not autorestart) or (autorestart and status != 26):
break
def instance_menu():
instances = load_existing_config()
if not instances:
print("No instances found!")
return None
counter = 0
print("Kron instance menu\n")
name_num_map = {}
for name in list(instances.keys()):
print("{}. {}\n".format(counter + 1, name))
name_num_map[str(counter + 1)] = name
counter += 1
while True:
selection = user_choice()
try:
selection = int(selection)
except ValueError:
print("Invalid input! Please enter a number corresponding to an instance.")
else:
if selection not in list(range(1, counter + 1)):
print("Invalid selection! Please try again")
else:
return name_num_map[str(selection)]
def clear_screen():
if IS_WINDOWS:
os.system("cls")
else:
os.system("clear")
def wait():
if INTERACTIVE_MODE:
input("Press enter to continue.")
def user_choice():
return input("> ").lower().strip()
def main_menu(flags_to_pass):
if IS_WINDOWS:
os.system("TITLE Kron - Discord Bot V3 Launcher")
clear_screen()
while True:
print(INTRO)
print("\033[4mCurrent version:\033[0m {}".format(__version__))
print("WARNING: The launcher is scheduled for removal at a later date.")
print("")
print("1. Run Kron w/ autorestart in case of issues")
print("2. Run Kron")
print("0. Exit")
choice = user_choice()
if choice == "1":
instance = instance_menu()
if instance:
run_kron(instance, autorestart=True, cliflags=flags_to_pass)
wait()
elif choice == "2":
instance = instance_menu()
if instance:
run_kron(instance, autorestart=False, cliflags=flags_to_pass)
wait()
elif choice == "0":
break
clear_screen()
def main():
args, flags_to_pass = parse_cli_args()
if not PYTHON_OK:
print(
"Python {req_ver} is required to run Kron, but you have {sys_ver}!".format(
req_ver=".".join(map(str, MIN_PYTHON_VERSION)), sys_ver=sys.version
)
) # Don't make an f-string, these may not exist on the python version being rejected!
sys.exit(1)
if INTERACTIVE_MODE:
main_menu(flags_to_pass)
elif args.start:
print("WARNING: The launcher is scheduled for removal at a later date.")
print("Starting Kron...")
run_kron(args.instancename, autorestart=args.auto_restart, cliflags=flags_to_pass)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Exiting...") | PypiClean |
/NeuroUnits-0.1.2.tar.gz/NeuroUnits-0.1.2/src/neurounits/visitors/common/plot_networkx.py | import networkx as nx
import matplotlib.pyplot as plt
from neurounits.visitors import ASTActionerDefault
from neurounits.visitors import SingleVisitPredicate
from neurounits.visitors import ASTVisitorBase
from neurounits.ast_builder.eqnsetbuilder_symbol_proxy import SymbolProxy
from neurounits.ast import OnEventStateAssignment
import neurounits.ast as ast
import itertools
import collections
class DefaultNodeColors(ASTVisitorBase):
def VisitLibrary(self, o, **kwargs):
return 'green'
def visit(self, o):
if isinstance(o, SymbolProxy):
return 'red'
if isinstance(o, OnEventStateAssignment):
return 'orange'
if isinstance(o, ast.NineMLComponent):
return 'yellow'
if isinstance(o, ast.OnEventTransition):
return 'pink'
if isinstance(o, ast.OnConditionTriggerTransition):
return 'cyan'
if isinstance(o, ast.CompoundPortConnector):
return 'red'
return 'blue'
try:
return ASTVisitorBase.visit(self, o)
except NotImplementedError:
return 'red'
class ActionerPlotNetworkX(object):
def __init__(self, o, labels = None, colors=None):
graph = nx.DiGraph()
connections = ActionerGetConnections(o).get_connections()
for node in connections.keys():
graph.add_node(node, color='green', label='"%s"' % repr(node) )
for (node, connections) in connections.items():
for c in connections:
graph.add_edge(node, c, color='blue')
if isinstance(colors, dict):
color_lut = colors
colors = [ color_lut.get(node,'white') for node in graph]
if colors == None:
nc = DefaultNodeColors()
colors = [nc.visit(v) for v in graph]
elif isinstance( colors, ASTVisitorBase):
colors = [colors.visit(v) for v in graph]
if isinstance(labels, dict):
for node in graph.nodes_iter():
if not node in labels:
labels[node] = repr(node)
if labels == None:
labels=dict( (s, repr(s)) for s in graph.nodes_iter( ) )
graph_nodes = set( graph.nodes_iter() )
labels = dict([(k,v) for (k,v) in labels.items() if k in graph_nodes])
f = plt.figure()
nx.draw_graphviz(graph, font_size=10, iteration=200, node_color=colors,scale=1, labels=labels )
ax = plt.gca()
ax.text(0.5, 0.5, 'Hello')
plt.show()
class ActionerGetConnections(ASTActionerDefault):
def __init__(self, o):
ASTActionerDefault.__init__(self, action_predicates=[SingleVisitPredicate()])
self.connections = collections.defaultdict(list)
self.visit(o)
def get_connections(self):
return self.connections
def ActionAnalogReducePort(self, o, **kwargs):
self.connections[o].extend(o.rhses)
def ActionLibraryManager(self, o, **kwargs):
self.connections[o].extend(o.libraries)
self.connections[o].extend(o.components)
self.connections[o].extend(o.interfaces)
def ActionLibrary(self, o, **kwargs):
self.connections[o].extend(o.functiondefs)
self.connections[o].extend(o.symbolicconstants)
def ActionNineMLComponent(self, o, **kwargs):
self.connections[o].extend(o.assignments)
self.connections[o].extend(o.timederivatives)
self.connections[o].extend(o.functiondefs)
self.connections[o].extend(o.symbolicconstants)
self.connections[o].extend(o.transitions)
self.connections[o].extend(list(o._event_port_connections) )
self.connections[o].extend(list(o._interface_connectors) )
def ActionRegime(self, o, **kwargs):
pass
def ActionRTGraph(self, o, **kwargs):
pass
def ActionOutEventPortParameter(self, o, **kwargs):
pass
def ActionEmitEventParameter(self, o, **kwargs):
self.connections[o].extend([o.rhs, o.port_parameter_obj])
def ActionOutEventPort(self, o, **kwargs):
self.connections[o].extend( list(o.parameters))
def ActionOnEvent(self, o, **kwargs):
self.connections[o].extend(o.parameters.values())
self.connections[o].extend(o.actions)
def ActionOnEventStateAssignment(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionIfThenElse(self, o, **kwargs):
self.connections[o].append(o.predicate)
self.connections[o].append(o.if_true_ast)
self.connections[o].append(o.if_false_ast)
def ActionInEquality(self, o, **kwargs):
self.connections[o].append(o.lesser_than)
self.connections[o].append(o.greater_than)
def ActionBoolAnd(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionBoolOr(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionBoolNot(self, o, **kwargs):
self.connections[o].append(o.lhs)
def ActionFunctionDefUser(self, o, **kwargs):
self.connections[o].extend(o.parameters.values())
self.connections[o].append(o.rhs)
def ActionFunctionDefBuiltIn(self, o, **kwargs):
self.connections[o].extend(o.parameters.values())
def ActionFunctionDefParameter(self, o, **kwargs):
pass
def ActionStateVariable(self, o, **kwargs):
pass
def ActionSymbolicConstant(self, o, **kwargs):
pass
def ActionParameter(self, o, **kwargs):
pass
def ActionConstant(self, o, **kwargs):
pass
def ActionConstantZero(self, o, **kwargs):
pass
def ActionAssignedVariable(self, o, **kwargs):
pass
def ActionSuppliedValue(self, o, **kwargs):
pass
def ActionTimeDerivativeByRegime(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs_map)
def ActionRegimeDispatchMap(self, o, **kwargs):
self.connections[o].extend(o.rhs_map.values())
def ActionEqnAssignmentByRegime(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs_map)
def ActionAddOp(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionSubOp(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionMulOp(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionDivOp(self, o, **kwargs):
self.connections[o].append(o.lhs)
self.connections[o].append(o.rhs)
def ActionExpOp(self, o, **kwargs):
self.connections[o].append(o.lhs)
def ActionFunctionDefUserInstantiation(self, o, **kwargs):
self.connections[o].extend(o.parameters.values())
self.connections[o].append(o.function_def)
def ActionFunctionDefBuiltInInstantiation(self, o, **kwargs):
self.connections[o].extend(o.parameters.values())
self.connections[o].append(o.function_def)
def ActionFunctionDefInstantiationParameter(self, o, **kwargs):
self.connections[o].append(o.rhs_ast)
def ActionOnConditionTriggerTransition(self, o, **kwargs):
self.connections[o].append(o.trigger)
self.connections[o].extend(o.actions)
def ActionOnTransitionEvent(self, o, **kwargs):
self.connections[o].extend(list(o.parameters))
self.connections[o].extend(o.actions)
def ActionOnEventDefParameter(self, o, **kwargs):
pass
def ActionEmitEvent(self, o, **kwargs):
self.connections[o].extend(list(o.parameters))
def ActionInEventPortParameter(self, o, **kwargs):
pass
def ActionInEventPort (self, o, **kwargs):
self.connections[o].extend(list(o.parameters))
def ActionEventPortConnection(self, o, **kwargs):
self.connections[o].extend([o.src_port, o.dst_port])
def ActionInterface(self, o, **kwargs):
self.connections[o].extend(list(o.connections))
def ActionCompoundPortConnectorWireMapping(self, o, **kwargs):
self.connections[o].extend([o.interface_port, o.component_port])
def ActionCompoundPortConnector(self, o, **kwargs):
self.connections[o].extend([o.interface_def] + list(o.wire_mappings))
def ActionRandomVariable(self, o, **kwargs):
self.connections[o].extend( o.parameters)
def ActionRandomVariableParameter(self, o, **kwargs):
self.connections[o].append( o.rhs_ast) | PypiClean |
/ONE-api-2.2.2.tar.gz/ONE-api-2.2.2/README.md | # Open Neurophysiology Environment
[](https://coveralls.io/github/int-brain-lab/ONE?branch=main)

The Open Neurophysiology Environment is a scheme for sharing neurophysiology data in a standardized manner. It is a Python API for searching and loading ONE-standardized data, stored either on a user’s local machine or on a remote server.
Please [Click here](https://int-brain-lab.github.io/ONE/) for the main documentation page. For a quick primer on the file naming convention we use, [click here](https://github.com/int-brain-lab/ONE/blob/main/docs/Open_Neurophysiology_Environment_Filename_Convention.pdf).
**NB**: The API and backend database are still under active development, for the best experience please regularly update the package by running `pip install -U ONE-api`.
## Requirements
ONE runs on Python 3.8 or later, and is tested on the latest Ubuntu and Windows (3.8 and 3.11 only).
## Installing
Installing the package via pip typically takes a few seconds. To install, run
```
pip install ONE-api
```
## Set up
For using ONE with a local cache directory:
```python
from one.api import One
one = One(cache_dir='/home/user/downlaods/ONE/behavior_paper')
```
To use the default setup settings that connect you to the [IBL public database](https://openalyx.internationalbrainlab.org):
```python
from one.api import ONE
ONE.setup(silent=True) # Will use default information
one = ONE(password='international')
```
For setting up ONE for a given database e.g. internal IBL Alyx:
```python
from one.api import ONE
one = ONE(base_url='https://alyx.internationalbrainlab.org')
```
Once you've setup the API for the first time, subsequent calls will use the same parameters:
```python
from one.api import ONE
one = ONE()
```
To set up ONE for another database and make it the default:
```python
from one.api import ONE
ONE.setup(base_url='https://test.alyx.internationalbrainlab.org', make_default=True)
one = ONE() # Connected to https://test.alyx.internationalbrainlab.org
```
## Using ONE
To search for sessions:
```python
from one.api import ONE
one = ONE()
print(one.search_terms()) # A list of search keyword arguments
# Search session with wheel timestamps from January 2021 onward
eids = one.search(date_range=['2021-01-01',], dataset='wheel.timestamps')
['d3372b15-f696-4279-9be5-98f15783b5bb']
# Search for project sessions with two probes
eids = one.search(data=['probe00', 'probe01'], project='brainwide')
```
To load data:
```python
from one.api import ONE
one = ONE()
# Load an ALF object
eid = 'a7540211-2c60-40b7-88c6-b081b2213b21'
wheel = one.load_object(eid, 'wheel')
# Load a specific dataset
eid = 'a7540211-2c60-40b7-88c6-b081b2213b21'
ts = one.load_dataset(eid, 'wheel.timestamps', collection='alf')
# Download, but not load, a dataset
filename = one.load_dataset(eid, 'wheel.timestamps', download_only=True)
```
To [share data](https://int-brain-lab.github.io/ONE/notebooks/data_sharing.html):
```python
from one.api import One
one = One.setup() # Enter the location of the ALF datasets when prompted
```
Further examples and tutorials can be found in the [documentation](https://int-brain-lab.github.io/ONE/).
| PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/doc/latex_doc.py | import re
from os import getenv
from mathics import settings
from mathics.core.evaluation import Message, Print
from mathics.doc.common_doc import (
CONSOLE_RE,
DL_ITEM_RE,
DL_RE,
END_LINE_SENTINAL,
HYPERTEXT_RE,
IMG_PNG_RE,
IMG_RE,
LATEX_RE,
LIST_ITEM_RE,
LIST_RE,
MATHICS_RE,
PYTHON_RE,
QUOTATIONS_RE,
REF_RE,
SPECIAL_COMMANDS,
SUBSECTION_END_RE,
SUBSECTION_RE,
TESTCASE_OUT_RE,
DocChapter,
DocPart,
DocSection,
DocTest,
DocTests,
DocText,
Documentation,
XMLDoc,
_replace_all,
gather_tests,
get_results_by_test,
post_sub,
pre_sub,
sorted_chapters,
)
from mathics.doc.utils import slugify
# We keep track of the number of \begin{asy}'s we see so that
# we can assocation asymptote file numbers with where they are
# in the document
next_asy_number = 1
ITALIC_RE = re.compile(r"(?s)<(?P<tag>i)>(?P<content>.*?)</(?P=tag)>")
LATEX_ARRAY_RE = re.compile(
r"(?s)\\begin\{testresult\}\\begin\{array\}\{l\}(.*?)"
r"\\end\{array\}\\end\{testresult\}"
)
LATEX_CHAR_RE = re.compile(r"(?<!\\)(\^)")
LATEX_CONSOLE_RE = re.compile(r"\\console\{(.*?)\}")
LATEX_INLINE_END_RE = re.compile(r"(?s)(?P<all>\\lstinline'[^']*?'\}?[.,;:])")
LATEX_TEXT_RE = re.compile(
r"(?s)\\text\{([^{}]*?(?:[^{}]*?\{[^{}]*?(?:[^{}]*?\{[^{}]*?\}[^{}]*?)*?"
r"[^{}]*?\}[^{}]*?)*?[^{}]*?)\}"
)
LATEX_TESTOUT_RE = re.compile(
r"(?s)\\begin\{(?P<tag>testmessage|testprint|testresult)\}"
r"(?P<content>.*?)\\end\{(?P=tag)\}"
)
LATEX_TESTOUT_DELIM_RE = re.compile(r", ")
# The goal of the following pattern is to enclose the numbers included in
# expressions produced by tests between ```\allowbreak{}```. The pattern matches
# with negative numbers or positive numbers preceded by a space character.
# To avoid applying the replacement, what is needed if the number is part of a
# LaTeX parameter (for instance ```\includegraphics[width=5cm]{...}```)
# the space before the number must be avoided. For example,
# ```\includegraphics[width= 5cm]{...}``` must be rewritten as
# \includegraphics[width=\allowbreak{}5\allowbreak{}cm]{...} which is not a valid
# LaTeX command.
NUMBER_RE = re.compile(r"([ -])(\d*(?<!\.)\.\d+|\d+\.(?!\.)\d*|\d+)")
OUTSIDE_ASY_RE = re.compile(r"(?s)((?:^|\\end\{asy\}).*?(?:$|\\begin\{asy\}))")
def escape_latex_code(text) -> str:
"""Escape verbatim Mathics input"""
text = escape_latex_output(text)
escape_char = get_latex_escape_char(text)
return "\\lstinline%s%s%s" % (escape_char, text, escape_char)
def escape_latex(text):
"""Escape documentation text"""
def repl_python(match):
return (
r"""\begin{lstlisting}[style=python]
%s
\end{lstlisting}"""
% match.group(1).strip()
)
text, post_substitutions = pre_sub(PYTHON_RE, text, repl_python)
text = _replace_all(
text,
[
("\\", "\\\\"),
("{", "\\{"),
("}", "\\}"),
("~", "\\~{ }"),
("&", "\\&"),
("%", "\\%"),
("#", "\\#"),
],
)
def repl(match):
text = match.group(1)
if text:
text = _replace_all(text, [("\\'", "'"), ("^", "\\^")])
escape_char = get_latex_escape_char(text)
text = LATEX_RE.sub(
lambda m: "%s%s\\codevar{\\textit{%s}}%s\\lstinline%s"
% (escape_char, m.group(1), m.group(2), m.group(3), escape_char),
text,
)
if text.startswith(" "):
text = r"\ " + text[1:]
if text.endswith(" "):
text = text[:-1] + r"\ "
return "\\code{\\lstinline%s%s%s}" % (escape_char, text, escape_char)
else:
# treat double '' literaly
return "''"
text = MATHICS_RE.sub(repl, text)
text = LATEX_RE.sub(
lambda m: "%s\\textit{%s}%s" % (m.group(1), m.group(2), m.group(3)), text
)
text = text.replace("\\\\'", "'")
def repl_dl(match):
text = match.group(1)
text = DL_ITEM_RE.sub(
lambda m: "\\%(tag)s{%(content)s}\n" % m.groupdict(), text
)
return "\\begin{definitions}%s\\end{definitions}" % text
text = DL_RE.sub(repl_dl, text)
def repl_list(match):
tag = match.group("tag")
content = match.group("content")
content = LIST_ITEM_RE.sub(lambda m: "\\item %s\n" % m.group(1), content)
env = "itemize" if tag == "ul" else "enumerate"
return "\\begin{%s}%s\\end{%s}" % (env, content, env)
text = LIST_RE.sub(repl_list, text)
# FIXME: get this from MathicsScanner
text = _replace_all(
text,
[
("$", r"\$"),
("\00f1", r"\~n"),
("\u00e7", r"\c{c}"),
("\u00e9", r"\'e"),
("\u00ea", r"\^e"),
("\u03b3", r"$\gamma$"),
("\u03b8", r"$\theta$"),
("\u03bc", r"$\mu$"),
("\u03c0", r"$\pi$"),
("\u03d5", r"$\phi$"),
("\u2107", r"$\mathrm{e}$"),
("\u222b", r"\int"),
("\u2243", r"$\simeq$"),
("\u2026", r"$\dots$"),
("\u2260", r"$\ne$"),
("\u2264", r"$\le$"),
("\u2265", r"$\ge$"),
("\u22bb", r"$\oplus$"), # The WL veebar-looking symbol isn't in AMSLaTeX
("\u22bc", r"$\barwedge$"),
("\u22bd", r"$\veebar$"),
("\u21d2", r"$\Rightarrow$"),
("\uf74c", r"d"),
],
)
def repl_char(match):
char = match.group(1)
return {
"^": "$^\\wedge$",
}[char]
text = LATEX_CHAR_RE.sub(repl_char, text)
def repl_img(match):
src = match.group("src")
title = match.group("title")
label = match.group("label")
return r"""\begin{figure*}[htp]
\centering
\includegraphics[width=\textwidth]{images/%(src)s}
\caption{%(title)s}
\label{%(label)s}
\end{figure*}""" % {
"src": src,
"title": title,
"label": label,
}
text = IMG_RE.sub(repl_img, text)
def repl_imgpng(match):
src = match.group("src")
return r"\includegraphics[scale=1.0]{images/%(src)s}" % {"src": src}
text = IMG_PNG_RE.sub(repl_imgpng, text)
def repl_ref(match):
return r"figure \ref{%s}" % match.group("label")
text = REF_RE.sub(repl_ref, text)
def repl_quotation(match):
return r"``%s''" % match.group(1)
def repl_hypertext(match) -> str:
tag = match.group("tag")
content = match.group("content")
#
# Sometimes it happens that the URL does not
# fit in 80 characters. Then, to avoid that
# flake8 complains, and also to have a
# nice and readable ASCII representation,
# we would like to split the URL in several,
# lines, having indentation spaces.
#
# The following line removes these extra
# characters, which would spoil the URL,
# producing a single line, space-free string.
#
content = content.replace(" ", "").replace("\n", "")
if tag == "em":
return r"\emph{%s}" % content
elif tag == "url":
text = match.group("text")
if text is None:
return "\\url{%s}" % content
else:
# If we have "/doc" as the beginning the URL link
# then is is a link to a section
# in this manual, so use "\ref" rather than "\href'.
if content.find("/doc/") == 0:
slug = "/".join(content.split("/")[2:]).rstrip("/")
return "%s of section~\\ref{%s}" % (text, latex_label_safe(slug))
else:
return "\\href{%s}{%s}" % (content, text)
return "\\href{%s}{%s}" % (content, text)
text = QUOTATIONS_RE.sub(repl_quotation, text)
text = HYPERTEXT_RE.sub(repl_hypertext, text)
def repl_console(match):
tag = match.group("tag")
content = match.group("content")
content = content.strip()
content = content.replace(r"\$", "$")
if tag == "con":
return "\\console{%s}" % content
else:
return "\\begin{lstlisting}\n%s\n\\end{lstlisting}" % content
text = CONSOLE_RE.sub(repl_console, text)
def repl_italic(match):
content = match.group("content")
return "\\emph{%s}" % content
text = ITALIC_RE.sub(repl_italic, text)
# def repl_asy(match):
# """
# Ensure \begin{asy} and \end{asy} are on their own line,
# but there shall be no extra empty lines
# """
# #tag = match.group(1)
# #return '\n%s\n' % tag
# #print "replace"
# return '\\end{asy}\n\\begin{asy}'
# text = LATEX_BETWEEN_ASY_RE.sub(repl_asy, text)
def repl_subsection(match):
return "\n\\subsection*{%s}\n" % match.group(1)
text = SUBSECTION_RE.sub(repl_subsection, text)
text = SUBSECTION_END_RE.sub("", text)
for key, (xml, tex) in SPECIAL_COMMANDS.items():
# "\" has been escaped already => 2 \
text = text.replace("\\\\" + key, tex)
text = post_sub(text, post_substitutions)
return text
def escape_latex_output(text) -> str:
"""Escape Mathics output"""
text = _replace_all(
text,
[
("\\", "\\\\"),
("{", "\\{"),
("}", "\\}"),
("~", "\\~"),
("&", "\\&"),
("%", "\\%"),
("$", r"\$"),
("_", "\\_"),
],
)
return text
def get_latex_escape_char(text):
for escape_char in ("'", "~", "@"):
if escape_char not in text:
return escape_char
raise ValueError
def latex_label_safe(s: str) -> str:
s = s.replace("\\$", "dollar-")
s = s.replace("$", "dollar-")
return s
def post_process_latex(result):
"""
Some post-processing hacks of generated LaTeX code to handle linebreaks
"""
WORD_SPLIT_RE = re.compile(r"(\s+|\\newline\s*)")
def wrap_word(word):
if word.strip() == r"\newline":
return word
return r"\text{%s}" % word
def repl_text(match):
text = match.group(1)
if not text:
return r"\text{}"
words = WORD_SPLIT_RE.split(text)
assert len(words) >= 1
if len(words) > 1:
text = ""
index = 0
while index < len(words) - 1:
text += "%s%s\\allowbreak{}" % (
wrap_word(words[index]),
wrap_word(words[index + 1]),
)
index += 2
text += wrap_word(words[-1])
else:
text = r"\text{%s}" % words[0]
if not text:
return r"\text{}"
text = text.replace("><", r">}\allowbreak\text{<")
return text
def repl_out_delim(match):
return ",\\allowbreak{} "
def repl_number(match):
guard = r"\allowbreak{}"
inter_groups_pre = r"\,\discretionary{\~{}}{\~{}}{}"
inter_groups_post = r"\discretionary{\~{}}{\~{}}{}"
number = match.group(1) + match.group(2)
parts = number.split(".")
if len(number) <= 3:
return number
assert 1 <= len(parts) <= 2
pre_dec = parts[0]
groups = []
while pre_dec:
groups.append(pre_dec[-3:])
pre_dec = pre_dec[:-3]
pre_dec = inter_groups_pre.join(reversed(groups))
if len(parts) == 2:
post_dec = parts[1]
groups = []
while post_dec:
groups.append(post_dec[:3])
post_dec = post_dec[3:]
post_dec = inter_groups_post.join(groups)
result = pre_dec + "." + post_dec
else:
result = pre_dec
return guard + result + guard
def repl_array(match):
content = match.group(1)
lines = content.split("\\\\")
content = "".join(
r"\begin{dmath*}%s\end{dmath*}" % line for line in lines if line.strip()
)
return r"\begin{testresultlist}%s\end{testresultlist}" % content
def repl_out(match):
tag = match.group("tag")
content = match.group("content")
content = LATEX_TESTOUT_DELIM_RE.sub(repl_out_delim, content)
content = NUMBER_RE.sub(repl_number, content)
content = content.replace(r"\left[", r"\left[\allowbreak{}")
return "\\begin{%s}%s\\end{%s}" % (tag, content, tag)
def repl_inline_end(match):
"""Prevent linebreaks between inline code and sentence delimeters"""
code = match.group("all")
if code[-2] == "}":
code = code[:-2] + code[-1] + code[-2]
return r"\mbox{%s}" % code
def repl_console(match):
code = match.group(1)
code = code.replace("/", r"/\allowbreak{}")
return r"\console{%s}" % code
def repl_nonasy(match):
result = match.group(1)
result = LATEX_TEXT_RE.sub(repl_text, result)
result = LATEX_TESTOUT_RE.sub(repl_out, result)
result = LATEX_ARRAY_RE.sub(repl_array, result)
result = LATEX_INLINE_END_RE.sub(repl_inline_end, result)
result = LATEX_CONSOLE_RE.sub(repl_console, result)
return result
return OUTSIDE_ASY_RE.sub(repl_nonasy, result)
def strip_system_prefix(name):
if name.startswith("System`"):
stripped_name = name[len("System`") :]
# don't return Private`sym for System`Private`sym
if "`" not in stripped_name:
return stripped_name
return name
class LaTeXDocTest(DocTest):
"""
DocTest formatting rules:
* `>>` Marks test case; it will also appear as part of
the documentation.
* `#>` Marks test private or one that does not appear as part of
the documentation.
* `X>` Shows the example in the docs, but disables testing the example.
* `S>` Shows the example in the docs, but disables testing if environment
variable SANDBOX is set.
* `=` Compares the result text.
* `:` Compares an (error) message.
`|` Prints output.
"""
def __init__(self, index, testcase, key_prefix=None):
def strip_sentinal(line):
"""Remove END_LINE_SENTINAL from the end of a line if it appears.
Some editors like to strip blanks at the end of a line.
Since the line ends in END_LINE_SENTINAL which isn't blank,
any blanks that appear before will be preserved.
Some tests require some lines to be blank or entry because
Mathics output can be that way
"""
if line.endswith(END_LINE_SENTINAL):
line = line[: -len(END_LINE_SENTINAL)]
# Also remove any remaining trailing blanks since that
# seems *also* what we want to do.
return line.strip()
self.index = index
self.result = None
self.outs = []
# Private test cases are executed, but NOT shown as part of the docs
self.private = testcase[0] == "#"
# Ignored test cases are NOT executed, but shown as part of the docs
# Sandboxed test cases are NOT executed if environment SANDBOX is set
if testcase[0] == "X" or (testcase[0] == "S" and getenv("SANDBOX", False)):
self.ignore = True
# substitute '>' again so we get the correct formatting
testcase[0] = ">"
else:
self.ignore = False
self.test = strip_sentinal(testcase[1])
self.key = None
if key_prefix:
self.key = tuple(key_prefix + (index,))
outs = testcase[2].splitlines()
for line in outs:
line = strip_sentinal(line)
if line:
if line.startswith("."):
text = line[1:]
if text.startswith(" "):
text = text[1:]
text = "\n" + text
if self.result is not None:
self.result += text
elif self.outs:
self.outs[-1].text += text
continue
match = TESTCASE_OUT_RE.match(line)
if not match:
continue
symbol, text = match.group(1), match.group(2)
text = text.strip()
if symbol == "=":
self.result = text
elif symbol == ":":
out = Message("", "", text)
self.outs.append(out)
elif symbol == "|":
out = Print(text)
self.outs.append(out)
def __str__(self):
return self.test
def latex(self, doc_data: dict) -> str:
"""
Produces the LaTeX-formatted fragment that corresponds the
test sequence and results for a single Builtin that has been run.
The key for doc_data is the part/chapter/section{/subsection} test number
and the value contains Result object data turned into a dictionary.
In particular, each test in the test sequence includes the, input test,
the result produced and any additional error output.
The LaTeX-formatted string fragment is returned.
"""
if self.key is None:
return ""
output_for_key = doc_data.get(self.key, None)
if output_for_key is None:
output_for_key = get_results_by_test(self.test, self.key, doc_data)
text = f"%% Test {'/'.join((str(x) for x in self.key))}\n"
text += "\\begin{testcase}\n"
text += "\\test{%s}\n" % escape_latex_code(self.test)
results = output_for_key.get("results", [])
for result in results:
for out in result["out"]:
kind = "message" if out["message"] else "print"
text += "\\begin{test%s}%s\\end{test%s}" % (
kind,
escape_latex_output(out["text"]),
kind,
)
test_text = result["result"]
if test_text: # is not None and result['result'].strip():
asy_count = test_text.count("\\begin{asy}")
if asy_count >= 0:
global next_asy_number
text += f"%% mathics-{next_asy_number}.asy\n"
next_asy_number += asy_count
text += "\\begin{testresult}%s\\end{testresult}" % result["result"]
text += "\\end{testcase}"
return text
class LaTeXDocumentation(Documentation):
def __str__(self):
return "\n\n\n".join(str(part) for part in self.parts)
def get_section(self, part_slug, chapter_slug, section_slug):
part = self.parts_by_slug.get(part_slug)
if part:
chapter = part.chapters_by_slug.get(chapter_slug)
if chapter:
return chapter.sections_by_slug.get(section_slug)
return None
def latex(
self,
doc_data: dict,
quiet=False,
filter_parts=None,
filter_chapters=None,
filter_sections=None,
) -> str:
"""Render self as a LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
parts = []
appendix = False
for part in self.parts:
if filter_parts:
if part.title not in filter_parts:
continue
text = part.latex(
doc_data,
quiet,
filter_chapters=filter_chapters,
filter_sections=filter_sections,
)
if part.is_appendix and not appendix:
appendix = True
text = "\n\\appendix\n" + text
parts.append(text)
result = "\n\n".join(parts)
result = post_process_latex(result)
return result
class LaTeXDoc(XMLDoc):
"""A class to hold our internal XML-like format data.
The `latex()` method can turn this into LaTeX.
Mathics core also uses this in getting usage strings (`??`).
"""
def __init__(self, doc, title, section):
self.title = title
if section:
chapter = section.chapter
part = chapter.part
# Note: we elide section.title
key_prefix = (part.title, chapter.title, title)
else:
key_prefix = None
self.rawdoc = doc
self.items = gather_tests(
self.rawdoc, LaTeXDocTests, LaTeXDocTest, LaTeXDocText, key_prefix
)
return
def latex(self, doc_data: dict):
if len(self.items) == 0:
if hasattr(self, "rawdoc") and len(self.rawdoc) != 0:
# We have text but no tests
return escape_latex(self.rawdoc)
return "\n".join(
item.latex(doc_data) for item in self.items if not item.is_private()
)
class LaTeXMathicsDocumentation(Documentation):
def __init__(self, want_sorting=False):
self.doc_chapter_fn = LaTeXDocChapter
self.doc_dir = settings.DOC_DIR
self.doc_fn = LaTeXDoc
self.doc_data_file = settings.get_doctest_latex_data_path(
should_be_readable=True
)
self.doc_guide_section_fn = LaTeXDocGuideSection
self.doc_part_fn = LaTeXDocPart
self.doc_section_fn = LaTeXDocSection
self.doc_subsection_fn = LaTeXDocSubsection
self.doctest_latex_pcl_path = settings.DOCTEST_LATEX_DATA_PCL
self.parts = []
self.parts_by_slug = {}
self.title = "Overview"
self.gather_doctest_data()
def latex(
self,
doc_data: dict,
quiet=False,
filter_parts=None,
filter_chapters=None,
filter_sections=None,
) -> str:
"""Render self as a LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
parts = []
appendix = False
for part in self.parts:
if filter_parts:
if part.title not in filter_parts:
continue
text = part.latex(
doc_data,
quiet,
filter_chapters=filter_chapters,
filter_sections=filter_sections,
)
if part.is_appendix and not appendix:
appendix = True
text = "\n\\appendix\n" + text
parts.append(text)
result = "\n\n".join(parts)
result = post_process_latex(result)
return result
class LaTeXDocPart(DocPart):
def latex(
self, doc_data: dict, quiet=False, filter_chapters=None, filter_sections=None
) -> str:
"""Render this Part object as LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
if self.is_reference:
chapter_fn = sorted_chapters
else:
chapter_fn = lambda x: x
result = "\n\n\\part{%s}\n\n" % escape_latex(self.title) + (
"\n\n".join(
chapter.latex(doc_data, quiet, filter_sections=filter_sections)
for chapter in chapter_fn(self.chapters)
if not filter_chapters or chapter.title in filter_chapters
)
)
if self.is_reference:
result = "\n\n\\referencestart" + result
return result
class LaTeXDocChapter(DocChapter):
def latex(self, doc_data: dict, quiet=False, filter_sections=None) -> str:
"""Render this Chapter object as LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
if not quiet:
print(f"Formatting Chapter {self.title}")
intro = self.doc.latex(doc_data).strip()
if intro:
short = "short" if len(intro) < 300 else ""
intro = "\\begin{chapterintro%s}\n%s\n\n\\end{chapterintro%s}" % (
short,
intro,
short,
)
chapter_sections = [
("\n\n\\chapter{%(title)s}\n\\chapterstart\n\n%(intro)s")
% {"title": escape_latex(self.title), "intro": intro},
"\\chaptersections\n",
"\n\n".join(
section.latex(doc_data, quiet)
for section in sorted(self.sections)
if not filter_sections or section.title in filter_sections
),
"\n\\chapterend\n",
]
return "".join(chapter_sections)
class LaTeXDocSection(DocSection):
def __init__(
self,
chapter,
title: str,
text: str,
operator,
installed=True,
in_guide=False,
summary_text="",
):
self.chapter = chapter
self.in_guide = in_guide
self.installed = installed
self.operator = operator
self.slug = slugify(title)
self.subsections = []
self.subsections_by_slug = {}
self.summary_text = summary_text
self.title = title
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
# Needs to come after self.chapter is initialized since
# XMLDoc uses self.chapter.
self.doc = LaTeXDoc(text, title, self)
chapter.sections_by_slug[self.slug] = self
def latex(self, doc_data: dict, quiet=False) -> str:
"""Render this Section object as LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
if not quiet:
# The leading spaces help show chapter level.
print(f" Formatting Section {self.title}")
title = escape_latex(self.title)
if self.operator:
title += " (\\code{%s})" % escape_latex_code(self.operator)
index = (
r"\index{%s}" % escape_latex(self.title)
if self.chapter.part.is_reference
else ""
)
content = self.doc.latex(doc_data)
sections = "\n\n".join(section.latex(doc_data) for section in self.subsections)
slug = f"{self.chapter.part.slug}/{self.chapter.slug}/{self.slug}"
section_string = (
"\n\n\\section*{%s}{%s}\n" % (title, index)
+ "\n\\label{%s}" % latex_label_safe(slug)
+ "\n\\sectionstart\n\n"
+ f"{content}"
+ ("\\addcontentsline{toc}{section}{%s}" % title)
+ sections
+ "\\sectionend"
)
return section_string
class LaTeXDocGuideSection(DocSection):
"""An object for a Documented Guide Section.
A Guide Section is part of a Chapter. "Colors" or "Special Functions"
are examples of Guide Sections, and each contains a number of Sections.
like NamedColors or Orthogonal Polynomials.
"""
def __init__(
self, chapter: str, title: str, text: str, submodule, installed: bool = True
):
self.chapter = chapter
self.doc = LaTeXDoc(text, title, None)
self.in_guide = False
self.installed = installed
self.section = submodule
self.slug = slugify(title)
self.subsections = []
self.subsections_by_slug = {}
self.title = title
# FIXME: Sections never are operators. Subsections can have
# operators though. Fix up the view and searching code not to
# look for the operator field of a section.
self.operator = False
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
# print("YYY Adding section", title)
chapter.sections_by_slug[self.slug] = self
def get_tests(self):
# FIXME: The below is a little weird for Guide Sections.
# Figure out how to make this clearer.
# A guide section's subsection are Sections without the Guide.
# it is *their* subsections where we generally find tests.
for section in self.subsections:
if not section.installed:
continue
for subsection in section.subsections:
# FIXME we are omitting the section title here...
if not subsection.installed:
continue
for doctests in subsection.items:
yield doctests.get_tests()
def latex(self, doc_data: dict, quiet=False):
"""Render this Guide Section object as LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
if not quiet:
# The leading spaces help show chapter level.
print(f" Formatting Guide Section {self.title}")
intro = self.doc.latex(doc_data).strip()
if intro:
short = "short" if len(intro) < 300 else ""
intro = "\\begin{guidesectionintro%s}\n%s\n\n\\end{guidesectionintro%s}" % (
short,
intro,
short,
)
guide_sections = [
(
"\n\n\\section{%(title)s}\n\\sectionstart\n\n%(intro)s"
"\\addcontentsline{toc}{section}{%(title)s}"
)
% {"title": escape_latex(self.title), "intro": intro},
"\n\n".join(section.latex(doc_data) for section in self.subsections),
]
return "".join(guide_sections)
class LaTeXDocSubsection:
"""An object for a Documented Subsection.
A Subsection is part of a Section.
"""
def __init__(
self,
chapter,
section,
title,
text,
operator=None,
installed=True,
in_guide=False,
summary_text="",
):
"""
Information that goes into a subsection object. This can be a written text, or
text extracted from the docstring of a builtin module or class.
About some of the parameters...
Some subsections are contained in a grouping module and need special work to
get the grouping module name correct.
For example the Chapter "Colors" is a module so the docstring text for it is in
mathics/builtin/colors/__init__.py . In mathics/builtin/colors/named-colors.py we have
the "section" name for the class Read (the subsection) inside it.
"""
self.doc = LaTeXDoc(text, title, section)
self.chapter = chapter
self.in_guide = in_guide
self.installed = installed
self.operator = operator
self.section = section
self.slug = slugify(title)
self.subsections = []
self.title = title
if in_guide:
# Tests haven't been picked out yet from the doc string yet.
# Gather them here.
self.items = gather_tests(text, LaTeXDocTests, LaTeXDocTest, LaTeXDocText)
else:
self.items = []
if text.count("<dl>") != text.count("</dl>"):
raise ValueError(
"Missing opening or closing <dl> tag in "
"{} documentation".format(title)
)
self.section.subsections_by_slug[self.slug] = self
def latex(self, doc_data: dict, quiet=False, chapters=None):
"""Render this Subsection object as LaTeX string and return that.
`output` is not used here but passed along to the bottom-most
level in getting expected test results.
"""
if not quiet:
# The leading spaces help show chapter, and section nesting level.
print(f" Formatting Subsection Section {self.title}")
title = escape_latex(self.title)
if self.operator:
title += " (\\code{%s})" % escape_latex_code(self.operator)
index = (
r"\index{%s}" % escape_latex(self.title)
if self.chapter.part.is_reference
else ""
)
content = self.doc.latex(doc_data)
slug = f"{self.chapter.part.slug}/{self.chapter.slug}/{self.section.slug}/{self.slug}"
section_string = (
"\n\n\\subsection*{%(title)s}%(index)s\n"
+ "\n\\label{%s}" % latex_label_safe(slug)
+ "\n\\subsectionstart\n\n%(content)s"
"\\addcontentsline{toc}{subsection}{%(title)s}"
"%(sections)s"
"\\subsectionend"
) % {
"title": title,
"index": index,
"content": content,
"sections": "\n\n".join(
section.latex(doc_data, quiet) for section in self.subsections
),
}
return section_string
class LaTeXDocTests(DocTests):
def latex(self, doc_data: dict):
if len(self.tests) == 0:
return "\n"
testLatexStrings = [
test.latex(doc_data) for test in self.tests if not test.private
]
testLatexStrings = [t for t in testLatexStrings if len(t) > 1]
if len(testLatexStrings) == 0:
return "\n"
return "\\begin{tests}%%\n%s%%\n\\end{tests}" % ("%\n".join(testLatexStrings))
class LaTeXDocText(DocText):
def latex(self, doc_data):
return escape_latex(self.text) | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.