code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import pexpect
import getpass
import sys
#
# Some constants. These are regular expressions.
#
TERMINAL_PROMPT = 'Terminal type?'
TERMINAL_TYPE = 'vt100'
COMMAND_PROMPT = '[$#\>] ' ### This is way too simple for industrial use :-) ...
# This is the prompt we get if SSH does not have
# the remote host's public key stored in the cache.
SSH_NEWKEY = 'Are you sure you want to continue connecting (yes/no)?'
PASSWORD_PROMPT_MYSQL = 'Enter password: '
class remoteCmd:
def __init__(self, host, user, password=None):
self.host = host
self.user = user
self.password = password
self.debug = 1
#
# Login via SSH
#
cmd = '/usr/bin/ssh -l %s %s' % (user, host)
if self.debug:
print cmd
self.child = pexpect.spawn(cmd)
#i = self.child.expect([pexpect.TIMEOUT, SSH_NEWKEY, 'password: '])
##i = self.child.expect([TIMEOUT, SSH_NEWKEY, 'password: '])
#if i == 0: # Timeout
# print 'ERROR!'
# print 'SSH could not login. Here is what SSH said:'
# print self.child.before, self.child.after
# sys.exit (1)
#if i == 1: # SSH does not have the public key. Just accept it.
# self.child.sendline ('yes')
# self.child.expect ('password: ')
#self.child.sendline(password)
# Now we are either at the command prompt or
# the login process is asking for our terminal type.
i = self.child.expect ([COMMAND_PROMPT, TERMINAL_PROMPT])
if i == 1:
self.child.sendline (TERMINAL_TYPE)
self.child.expect (COMMAND_PROMPT)
def rexec(self, cmd, only_output=1):
#
# Now we should be at the command prompt and ready to run some commands.
#
self.child.sendline (cmd)
self.child.expect (COMMAND_PROMPT)
#output = self.child.before.split('\n')
return self.child.before
if only_output:
return output[1]
else:
return output
def close(self):
# Now exit the remote host.
self.child.sendline ('exit')
self.child.expect(pexpect.EOF)
class remoteCopy:
def __init__(self):
self.srchost = ''
self.srcuser = ''
self.srcdir = ''
self.dsthost = ''
self.dstuser = ''
self.dstdir = ''
self.password = ''
def copy(self):
source = ''
dest = ''
if self.srchost and self.srcuser:
source = '%s@%s:' % (self.srcuser, self.srchost)
if self.dsthost and self.dstuser:
dest = '%s@%s:' % (self.dstuser, self.dsthost)
self.cmd = '/usr/bin/scp %s%s %s%s'%(source, self.srcdir, dest, self.dstdir)
self.child = pexpect.spawn(self.cmd)
i = self.child.expect([pexpect.TIMEOUT, SSH_NEWKEY, 'Password: '])
if i == 0: # Timeout
print 'ERROR!'
print 'SSH could not login. Here is what SSH said:'
print self.child.before, self.child.after
sys.exit (1)
if i == 1: # SSH does not have the public key. Just accept it.
self.child.sendline ('yes')
self.child.expect ('Password: ')
self.child.sendline(self.password)
self.child.sendline ('\r\n')
print self.child.before, self.child.after | Adytum-PyMonitor | /Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/net/ssh.py | ssh.py |
from __future__ import nested_scopes
import os, os.path, shutil, glob, re, sys, getopt, stat
try:
import win32file
except:
win32file = None
class Cookie:
def __init__(self):
self.sink_root = ""
self.target_root = ""
self.quiet = 0
self.recursive = 0
self.relative = 0
self.dry_run = 0
self.time = 0
self.update = 0
self.cvs_ignore = 0
self.ignore_time = 0
self.delete = 0
self.delete_excluded = 0
self.size_only = 0
self.modify_window = 2
self.existing = 0
self.filters = []
self.case_sensitivity = 0
if os.name == "nt":
self.case_sensitivity = re.I
def visit(cookie, dirname, names):
"""Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
if os.path.split(cookie.sink_root)[1]: # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
dirname = dirname[len(cookie.sink_root) + 1:]
else:
dirname = dirname[len(cookie.sink_root):]
target_dir = os.path.join(cookie.target_root, dirname)
if not os.path.isdir(target_dir):
makeDir(cookie, target_dir)
sink_dir = os.path.join(cookie.sink_root, dirname)
if cookie.delete and os.path.isdir(target_dir):
# Delete files and folder in target not present in sink.
for name in os.listdir(target_dir):
if not name in names:
target = os.path.join(target_dir, name)
if os.path.isfile(target):
removeFile(cookie, target)
elif os.path.isdir(target):
removeDir(cookie, target)
else:
pass
filters = []
if cookie.cvs_ignore:
ignore = os.path.join(sink_dir, ".cvsignore")
if os.path.isfile(ignore):
filters = convertPatterns(ignore, "-")
filters += cookie.filters
if filters:
name_index = 0
while name_index < len(names):
name = names[name_index]
path = os.path.join(dirname, name)
path = convertPath(path)
if os.path.isdir(os.path.join(sink_dir, name)):
path += "/"
for filter in filters:
if re.search(filter[1], path, cookie.case_sensitivity):
if filter[0] == '-':
sink = os.path.join(sink_dir, name)
if cookie.delete_excluded:
if os.path.isfile(sink):
removeFile(cookie, sink)
elif os.path.isdir(sink):
removeDir(cookie, sink)
else:
raise "sink: %s not file not dir" % sink
del(names[name_index])
name_index -= 1
elif filter[0] == '+':
break
name_index += 1
for name in names:
# Copy files and folder from sink to target.
sink = os.path.join(sink_dir, name)
#print sink
target = os.path.join(target_dir, name)
if os.path.exists(target):
# When target already exit:
if os.path.isfile(sink):
if os.path.isfile(target):
# file-file
if shouldUpdate(cookie, sink, target):
updateFile(cookie, sink, target)
elif os.path.isdir(target):
# file-folder
removeDir(cookie, target)
copyFile(cookie, sink, target)
else:
raise Exception("file-???")
elif os.path.isdir(sink):
if os.path.isfile(target):
# folder-file
removeFile(cookie, target)
makeDir(cookie, target)
else:
raise Exception("???-*")
elif not cookie.existing:
# When target dont exist:
if os.path.isfile(sink):
# file
copyFile(cookie, sink, target)
elif os.path.isdir(sink):
# folder
makeDir(cookie, target)
else:
raise Exception("sink: %s not file not dir" % sink)
def log(cookie, message):
if not cookie.quiet:
try:
print message
except UnicodeEncodeError:
print message.encode("utf8")
def logError(message):
try:
sys.stderr.write(message + "\n")
except UnicodeEncodeError:
sys.stderr.write(message.encode("utf8"))
def shouldUpdate(cookie, sink, target):
sink_st = os.stat(sink)
sink_sz = sink_st.st_size
sink_mt = sink_st.st_mtime
target_st = os.stat(target)
target_sz = target_st.st_size
target_mt = target_st.st_mtime
if cookie.update:
return target_mt < sink_mt - cookie.modify_window
if cookie.ignore_time:
return 1
if target_sz != sink_sz:
return 1
if cookie.size_only:
return 0
return abs(target_mt - sink_mt) > cookie.modify_window
def copyFile(cookie, sink, target):
if not cookie.dry_run:
try:
shutil.copyfile(sink, target)
except:
logError("Fail to copy %s\n" % sink)
if cookie.time:
try:
s = os.stat(sink)
os.utime(target, (s.st_atime, s.st_mtime));
except:
logError("Fail to copy timestamp of %s\n" % sink)
log(cookie, "copy: %s to: %s" % (sink, target))
def updateFile(cookie, sink, target):
if not cookie.dry_run:
# Read only and hidden and system files can not be overridden.
if win32file:
filemode = win32file.GetFileAttributesW(target)
win32file.SetFileAttributesW(target, filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
else:
os.chmod(target, stat.S_IWUSR)
try:
shutil.copyfile(sink, target)
except:
logError("Fail to override %s\n" % sink)
if cookie.time:
try:
s = os.stat(sink)
os.utime(target, (s.st_atime, s.st_mtime));
except:
logError("Fail to copy timestamp of %s\n" % sink) # The utime api of the 2.3 version of python is not unicode compliant.
if win32file:
win32file.SetFileAttributesW(target, filemode)
log(cookie, "update: %s to: %s" % (sink, target))
def removeFile(cookie, target):
# Read only files could not be deleted.
if not cookie.dry_run:
os.chmod(target, stat.S_IWUSR)
os.remove(target)
log(cookie, "remove: %s" % target)
def makeDir(cookie, target):
if not cookie.dry_run:
os.makedirs(target)
log(cookie, "make dir: %s" % target)
def removeDir(cookie, target):
# Read only directory could not be deleted.
if not cookie.dry_run:
shutil.rmtree(target, True)
log(cookie, "remove dir: %s" % target)
def convertPath(path):
# Convert windows, mac path to unix version.
separator = os.path.normpath("/")
if separator != "/":
path = re.sub(re.escape(separator), "/", path)
# Help file, folder pattern to express that it should match the all file or folder name.
path = "/" + path
return path
def convertPattern(pattern, sign):
"""Convert a rsync pattern that match against a path to a filter that match against a converted path."""
# Check for include vs exclude patterns.
if pattern[:2] == "+ ":
pattern = pattern[2:]
sign = "+"
elif pattern[:2] == "- ":
pattern = pattern[2:]
sign = "-"
# Express windows, mac patterns in unix patterns (rsync.py extension).
separator = os.path.normpath("/")
if separator != "/":
pattern = re.sub(re.escape(separator), "/", pattern)
# If pattern contains '/' it should match from the start.
temp = pattern
if pattern[0] == "/":
pattern = pattern[1:]
if temp[-1] == "/":
temp = temp[:-1]
# Convert pattern rules: ** * ? to regexp rules.
pattern = re.escape(pattern)
pattern = pattern.replace("\\*\\*", ".*")
pattern = pattern.replace("\\*", "[^/]*")
pattern = pattern.replace("\\*", ".*")
if "/" in temp:
# If pattern contains '/' it should match from the start.
pattern = "^\\/" + pattern
else:
# Else the pattern should match the all file or folder name.
pattern = "\\/" + pattern
if pattern[-2:] != "\\/" and pattern[-2:] != ".*":
# File patterns should match also folders.
pattern += "\\/?"
# Pattern should match till the end.
pattern += "$"
return (sign, pattern)
def convertPatterns(path, sign):
"""Read the files for pattern and return a vector of filters"""
filters = []
f = open(path, "r")
while 1:
pattern = f.readline()
if not pattern:
break
if pattern[-1] == "\n":
pattern = pattern[:-1]
if re.match("[\t ]*$", pattern):
continue
if pattern[0] == "#":
continue
filters += [convertPattern(pattern, sign)]
f.close()
return filters
def printUsage():
"""Print the help string that should printed by rsync.py -h"""
print "usage: rsync.py [options] source target"
print """
-q, --quiet decrease verbosity
-r, --recursive recurse into directories
-R, --relative use relative path names
-u, --update update only (don't overwrite newer files)
-t, --times preserve times
-n, --dry-run show what would have been transferred
--existing only update files that already exist
--delete delete files that don't exist on the sending side
--delete-excluded also delete excluded files on the receiving side
-I, --ignore-times don't exclude files that match length and time
--size-only only use file size when determining if a file should
be transferred
--modify-window=NUM timestamp window (seconds) for file match (default=2)
-C, --cvs-exclude auto ignore files in the same way CVS does
--exclude=PATTERN exclude files matching PATTERN
--exclude-from=FILE exclude patterns listed in FILE
--include=PATTERN don't exclude files matching PATTERN
--include-from=FILE don't exclude patterns listed in FILE
--version print version number
-h, --help show this help screen
See http://www.vdesmedt.com/~vds2212/rsync.html for informations and updates.
Send an email to [email protected] for comments and bug reports."""
def printVersion():
print "rsync.py version 1.0.6"
def main(argv):
cookie = Cookie()
opts, args = getopt.getopt(argv[1:], "qrRntuCIh", ["quiet", "recursive", "relative", "dry-run", "time", "update", "cvs-ignore", "ignore-times", "help", "delete", "delete-excluded", "existing", "size-only", "modify-window=", "exclude=", "exclude-from=", "include=", "include-from=", "version"])
for o, v in opts:
if o in ["-q", "--quiet"]:
cookie.quiet = 1
if o in ["-r", "--recursive"]:
cookie.recursive = 1
if o in ["-R", "--relative"]:
cookie.relative = 1
elif o in ["-n", "--dry-run"]:
cookie.dry_run = 1
elif o in ["-t", "--time"]:
cookie.time = 1
elif o in ["-u", "--update"]:
cookie.update = 1
elif o in ["-C", "--cvs-ignore"]:
cookie.cvs_ignore = 1
elif o in ["-I", "--ignore-time"]:
cookie.ignore_time = 1
elif o == "--delete":
cookie.delete = 1
elif o == "--delete-excluded":
cookie.delete = 1
cookie.delete_excluded = 1
elif o == "--size-only":
cookie.size_only = 1
elif o == "--modify-window":
cookie.modify_window = int(v)
elif o == "--existing":
cookie.existing = 1
elif o == "--exclude":
cookie.filters += [convertPattern(v, "-")]
elif o == "--exclude-from":
cookie.filters += convertPatterns(v, "-")
elif o == "--include":
cookie.filters += [convertPattern(v, "+")]
elif o == "--include-from":
cookie.filters += convertPatterns(v, "+")
elif o == "--version":
printVersion()
return 0
elif o in ["-h", "--help"]:
printUsage()
return 0
if len(args) <= 1:
printUsage()
return 1
#print cookie.filters
target_root = args[1]
if os.path.supports_unicode_filenames:
target_root = unicode(target_root, sys.getfilesystemencoding())
cookie.target_root = target_root
sinks = glob.glob(args[0])
if not sinks:
return 0
sink_families = {}
for sink in sinks:
if os.path.supports_unicode_filenames:
sink = unicode(sink, sys.getfilesystemencoding())
sink_name = ""
sink_root = sink
while not sink_name:
sink_root, sink_name = os.path.split(sink_root)
if not sink_families.has_key(sink_root):
sink_families[sink_root] = []
sink_families[sink_root] += [sink_name]
for sink_root in sink_families.keys():
if cookie.relative:
cookie.sink_root = ""
else:
cookie.sink_root = sink_root
files = filter(lambda x: os.path.isfile(os.path.join(sink_root, x)), sink_families[sink_root])
if files:
visit(cookie, sink_root, files)
folders = filter(lambda x: os.path.isdir(os.path.join(sink_root, x)), sink_families[sink_root])
for folder in folders:
folder_path = os.path.join(sink_root, folder)
if not cookie.recursive:
visit(cookie, folder_path, os.listdir(folder_path))
else:
os.path.walk(folder_path, visit, cookie)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | Adytum-PyMonitor | /Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/net/rsync.py | rsync.py |
import re
XMIT = 0
XCVD = 1
LOSS = 2
TIME = 3
IPHOST_LINE = 0
LOSS_LINE = -2
AVE_LINE = -1
MAC_TESTDATA = ''' PING google.com (216.239.57.99): 56 data bytes
64 bytes from 216.239.57.99: icmp_seq=0 ttl=240 time=111.108 ms
64 bytes from 216.239.57.99: icmp_seq=1 ttl=240 time=104.348 ms
64 bytes from 216.239.57.99: icmp_seq=2 ttl=240 time=99.88 ms
64 bytes from 216.239.57.99: icmp_seq=3 ttl=240 time=103.395 ms
--- google.com ping statistics ---
4 packets transmitted, 3 packets received, 25% packet loss
round-trip min/avg/max = 99.88/104.682/111.108 ms
'''
LNX_TESTDATA = ''' PING google.com (216.239.57.99) 56(84) bytes of data.
64 bytes from 216.239.57.99: icmp_seq=1 ttl=241 time=88.7 ms
64 bytes from 216.239.57.99: icmp_seq=2 ttl=241 time=89.1 ms
64 bytes from 216.239.57.99: icmp_seq=3 ttl=241 time=89.2 ms
64 bytes from 216.239.57.99: icmp_seq=4 ttl=241 time=88.9 ms
--- google.com ping statistics ---
4 packets transmitted, 2 received, 50% packet loss, time 9300ms
rtt min/avg/max/mdev = 88.745/89.046/89.265/0.290 ms
'''
FLL_TESTDATA = ''' PING google.com (216.239.57.99) 56(84) bytes of data.
64 bytes from 216.239.57.99: icmp_seq=1 ttl=241 time=88.7 ms
64 bytes from 216.239.57.99: icmp_seq=2 ttl=241 time=89.1 ms
64 bytes from 216.239.57.99: icmp_seq=3 ttl=241 time=89.2 ms
64 bytes from 216.239.57.99: icmp_seq=4 ttl=241 time=88.9 ms
--- google.com ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 9300ms
rtt min/avg/max/mdev = 88.745/89.046/89.265/0.290 ms
'''
BAD_TESTDATA = ''' PING www.divorce-md.com (67.94.174.12): 56 data bytes
--- www.divorce-md.com ping statistics ---
3 packets transmitted, 0 packets received, 100% packet loss
'''
class OutputParser(object):
'''
# test instantiation
>>> mac = OutputParser(MAC_TESTDATA)
>>> lnx = OutputParser(LNX_TESTDATA)
>>> fll = OutputParser(FLL_TESTDATA)
>>> bad = OutputParser(BAD_TESTDATA)
# test hostnames
>>> mac.getHostname()
'google.com'
>>> lnx.getHostname()
'google.com'
>>> fll.getHostname()
'google.com'
>>> bad.getHostname()
'www.divorce-md.com'
# test lines
>>> type(mac.pingLines())
<type 'list'>
>>> type(lnx.pingLines())
<type 'list'>
>>> type(fll.pingLines())
<type 'list'>
>>> type(bad.pingLines())
<type 'list'>
# test loss line
>>> mac.getPingLossLine()
'4 packets transmitted, 3 packets received, 25% packet loss'
>>> lnx.getPingLossLine()
'4 packets transmitted, 2 received, 50% packet loss, time 9300ms'
>>> fll.getPingLossLine()
'4 packets transmitted, 4 received, 0% packet loss, time 9300ms'
>>> bad.getPingLossLine()
# test peak line
>>> mac.getPingPeakLine()
'round-trip min/avg/max = 99.88/104.682/111.108 ms'
>>> lnx.getPingPeakLine()
'rtt min/avg/max/mdev = 88.745/89.046/89.265/0.290 ms'
>>> fll.getPingPeakLine()
'rtt min/avg/max/mdev = 88.745/89.046/89.265/0.290 ms'
>>> bad.getPingPeakLine()
# test packet counts
>>> mac.getPacketsXmit()
'4 packets transmitted'
>>> lnx.getPacketsXmit()
'4 packets transmitted'
>>> bad.getPacketsXmit()
>>> mac.getPacketsXcvd()
'3 packets received'
>>> lnx.getPacketsXcvd()
'2 received'
>>> bad.getPacketsXcvd()
# test packet loss
>>> mac.getPingLoss()
25
>>> lnx.getPingLoss()
50
>>> fll.getPingLoss()
0
>>> bad.getPingLoss()
100
# test packet gain
>>> mac.getPingGain()
75
>>> lnx.getPingGain()
50
>>> fll.getPingGain()
100
>>> bad.getPingGain()
0
# test min/max/ave data
>>> res = mac.getPeakData().items()
>>> res.sort()
>>> res
[('avg', '104.682'), ('max', '111.108'), ('min', '99.88')]
>>> res = lnx.getPeakData().items()
>>> res.sort()
>>> res
[('avg', '89.046'), ('max', '89.265'), ('mdev', '0.290'), ('min', '88.745')]
>>> res = bad.getPeakData().items()
>>> res.sort()
>>> res
[]
'''
def __init__(self, ping_data):
self.data = ping_data
def getHostname(self):
try:
line = self.pingLines()[IPHOST_LINE]
return line.split(' ')[1]
except:
pass
def getHostIp(self):
pass
def pingLines(self):
return self.data.splitlines()
def getPingLossLine(self):
try:
line = self.pingLines()[LOSS_LINE]
if re.match('.*%.*', line):
return line
except:
pass
def getPingPeakLine(self):
try:
line = self.pingLines()[AVE_LINE]
if re.match('.*avg.*', line):
return line
except:
pass
def getPacketsXmit(self):
line = self.getPingLossLine()
if line:
xmit = line.split(',')[XMIT].strip()
if xmit:
return xmit
def getPacketsXcvd(self):
line = self.getPingLossLine()
if line:
xcvd = line.split(',')[XCVD].strip()
if xcvd:
return xcvd
def getPingLoss(self):
'''
this returns a percentage, the percentage packet loss
from the ping command run against the host
'''
line = self.getPingLossLine()
if line:
parts = line.split(',')
loss = int(parts[LOSS].split('%')[0].strip())
if loss or loss == 0:
return loss
return 100
def getPingGain(self):
'''
get the ping counts for each host, where the counts show how
good the network connection is to the host by subtracting the
percent loss from 100%.
'''
return 100 - self.getPingLoss()
def getPeakData(self):
line = self.getPingPeakLine()
if line:
parts = self.getPingPeakLine().split(' ')
EQUALS = parts.index('=')
WORDS = EQUALS - 1
NUMS = EQUALS + 1
return dict(zip(parts[WORDS].split('/'), parts[NUMS].split('/')))
else:
return {}
def _test():
import doctest, ping
return doctest.testmod(ping)
if __name__ == '__main__':
_test() | Adytum-PyMonitor | /Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/net/ping.py | ping.py |
import json , requests
def A_Gmail(email):
url = 'https://android.clients.google.com/setup/checkavail'
headers = {
'Content-Length':'98',
'Content-Type':'text/plain; charset=UTF-8',
'Host':'android.clients.google.com',
'Connection':'Keep-Alive',
'user-agent':'GoogleLoginService/1.3(m0 JSS15J)',
}
data = json.dumps({
'username':f'{email}',
'version':'3',
'firstName':'Aegos',
'lastName':'Codeing'
})
response = requests.post(url,headers=headers,data=data)
if response.json()['status'] == 'SUCCESS':
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Yahoo(email):
email2 = email.split('@')[0]
url2 = "https://login.yahoo.com/account/module/create?validateField=userId"
headers2 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '7979',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A3=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A1S=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w&j=WORLD; cmp=t=1682254514&j=0&u=1---; B=9qgodcpi4aalb&b=3&s=7t; GUC=AQEBAQFkRnxkT0IiCATl; AS=v=1&s=yWa5asCx&d=A64467c3b|dcjw_0n.2SoXBbaywfJ6pOKLuxGKrtyyLsUqPKnDloZ4PzLBcZineGWbyj4SSiaHVn.6gkyCaIlqSJGryRwnshefN43hbdPocziZnuN6cUMiC9Ls7jght5ak90PZbx8rt9nghZTUPpDYSsMNpii5aA9xWBEhMq__TTmv.rfLHzlCE8rgi5dk5PJouLBujcieRBtI7i.7PwU1jFkaeDhxE4dRMjpAQrjJKc6XqfbTBc5K9QaF6r1YVIVWHEpNrUzbZ_7sSzQ5QFoQNwVBgRzaFtm48hiQlg6S.xsMMdDWkw5xtlG7GZUC.V2jgWNgLScSwqCU_3ntveI_BrcuBy_XAXWQsUzNv3grKBv3qzhOMH3pl8DgTDV3wOo.GqdTtcsaaUn7O0i1hSoA0_EqNIXvRBBdePtBAjPWFZt6sK1Dy8S.kVvW9rIWxonS8GYw6jAw3FrkvM_xk8gxU4oKX1pk3h4m0iJVDQhlr0OOLGW7vBxnzYqidDFi01xQe608kLkJO9qx2X1Xv6XORvYJTNAOVfOMWV83D75M_7L4FOjog8f8F5EkOTU7LymG8GTXY2g4K1xBfGHyzAOPDv9NMjc0I_7wLdATcbn2axvwj5I2xiSqrxK8DYnqTVGqEt.tusj07ij4sobwY0FePNGjLOHICdau9tCajCSqBxtly23flz3iYPQ22Va6uuSaQ.c9mtXsBd0NTlWvlOc6zRdQK.uYkiCYg719UyeIFzDDWeFvQCbuBrstwX.zAkYz2YPaTs8ZGpogdgQ5OhaduuhR5jzvz2mmHXGh5fJ1kxfeClXFWbvCdu3T77mmXHxLGQpr3UZKnmiPO7VjxJoEd9SjYA_NFz9HPbvimmWgmv0DIXvdNvHKCQMYEUROQlk5XIH7oiQ1BtywZNvoWv1D7Q--~A',
'origin': 'https://login.yahoo.com',
'referer': 'https://login.yahoo.com/account/create?.lang=en-US&src=homepage&activity=ybar-signin&pspid=2023538075&.done=https%3A%2F%2Fwww.yahoo.com%2F&specId=yidregsimplified&done=https%3A%2F%2Fwww.yahoo.com%2F',
'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
'x-requested-with': 'XMLHttpRequest',
}
data2 = {
'specId': 'yidregsimplified',
'cacheStored': '',
'crumb': 'hrxAgkAZ5jX',
'acrumb': 'yWa5asCx',
'sessionIndex': '',
'done': 'https://www.yahoo.com/',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'multiDomain': '',
'tos0': 'oath_freereg|xa|en-JO',
'firstName': 'Aegos',
'lastName': 'coding',
'userid-domain': 'yahoo',
'userId': f'{email2}',
'password': 'szdxfefdgfh',
'birthYear': '1998',
'signup': '',
}
response2 = requests.post(url2,headers=headers2,data=data2).text
if '{"errors":[{"name":"userId","error":"IDENTIFIER_EXISTS"}]}' in response2:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif '{"errors":[]}' in response2:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_Hotmail(email):
url3 = f'https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress={email}&_=1604288577990'
headers3 = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
}
response3 = requests.post(url3, headers=headers3).text
if 'Neither' in response3:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Aol(email):
email3 = email.split('@')[0]
url4 = "https://login.aol.com/account/module/create?validateField=yid"
headers4 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '18430',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; A3=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; cmp=t=1665079824&j=0&u=1---; rxx=5dmbu5em0gs.2w52y1t9&v=1; AS=v=1&s=mE9oz2RU&d=A6340990f|BfPo7D7.2Soeua6Q5.JcZFuTeKDZd.VEwARWGa18pr8Nw39Pbg3lrVe2yFRyh3RRePi__A4A5bs6jgblICTjtwR23Xn2FaKNd3g4n2Nyoe0HUPOPhxc2_MkgSPb3Uv64NNH6b4oIbh0d6GPjVX.u1iE75NeNGVgDykpoV.GJb.ZOyA1hi3D079flz5FnGN3UPl4Jos.LGJjKE5jeRFZVRbTJyV_q0zmHwp0WmwaGpmtr2bKK2pVY_9dMpw5J1u9Wx0e_QeNBnAgpvDP_E02PBbuxEQQXAX0GF8IM_gu2g5D1CEPA15ailOgAaPTMDY7plQgXdP3cYarpT20WB0vRVdZXqvfsh7E.m8mX5QyFisDObrlDfLbh6nPbmjU_8BIyAHLvCBoCmF0u4BhXftXCqUgW5SadK6EzXKbn394dWjCdO0YJRStGJo_POkob5FNOWud6u3MY1IZS2ov3OD9LIoJy7w.mSCLZ.M84QgA0UgsGTrDOgTQJWeetwKIYy1RbR8lxFZr0IDwTLBAGflJkaNvnQqWxWbEjftCTvXH2CPXFaCRUnSObHQ2cP1Mb8kro2zkXtaUGmW_cD9oHxidsx6vaOfx4f_fSysGP5Aaa2z6NndXHWh_ium8B45ejj4MFh3F7my8_04UX4WjjiZIqGG0fXcLQxFrB1GY6Vnqo47oSmh4yBcZPV7eQ0CKATeJLshzj2SovAZcIdV1ptsKk9P.LVCZl6MeDskIxd5L6iixeCU6PMq84tz7Gmg6S~A; A1S=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U&j=WORLD',
'origin': 'https://login.aol.com',
'referer': 'https://login.aol.com/account/create?intl=uk&lang=en-gb&specId=yidReg&done=https%3A%2F%2Fwww.aol.com',
'sec-ch-ua': '"Chromium";v="106", "Google Chrome";v="106", "Not;A=Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
data4 = {
'specId': 'yidreg',
'cacheStored': '',
'crumb': 'ks78hCqM4K.',
'acrumb': 'mE9oz2RU',
'done': 'https://www.aol.com',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'tos0': 'oath_freereg|uk|en-GB',
'firstName': 'Aegos',
'lastName': 'Coodeing',
'yid': email3,
'password': '1#$aegos$#1wjdytesre',
'shortCountryCode': 'IQ',
'phone': '7716555876',
'mm': '11',
'dd': '1',
'yyyy': '1998',
'freeformGender': '',
'signup': '',
}
response4 = requests.post(url4,headers=headers4,data=data4).text
if ('{"errors":[{"name":"yid","error":"IDENTIFIER_EXISTS"}]}') in response4:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif ('{"errors":[]}') in response4:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_MailRu(email):
url5 = 'https://account.mail.ru/api/v1/user/exists'
headers5 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
data5 = {
'email': str(email)
}
response5 = requests.post(url5,headers=headers5,data=data5).text
if 'exists":false' in response5:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'} | Aegos-Check | /Aegos-Check-0.2.tar.gz/Aegos-Check-0.2/code/__init__.py | __init__.py |
import json , requests
def A_Gmail(email):
url = 'https://android.clients.google.com/setup/checkavail'
headers = {
'Content-Length':'98',
'Content-Type':'text/plain; charset=UTF-8',
'Host':'android.clients.google.com',
'Connection':'Keep-Alive',
'user-agent':'GoogleLoginService/1.3(m0 JSS15J)',
}
data = json.dumps({
'username':f'{email}',
'version':'3',
'firstName':'Aegos',
'lastName':'Codeing'
})
response = requests.post(url,headers=headers,data=data)
if response.json()['status'] == 'SUCCESS':
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Yahoo(email):
email2 = email.split('@')[0]
url2 = "https://login.yahoo.com/account/module/create?validateField=userId"
headers2 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '7979',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A3=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A1S=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w&j=WORLD; cmp=t=1682254514&j=0&u=1---; B=9qgodcpi4aalb&b=3&s=7t; GUC=AQEBAQFkRnxkT0IiCATl; AS=v=1&s=yWa5asCx&d=A64467c3b|dcjw_0n.2SoXBbaywfJ6pOKLuxGKrtyyLsUqPKnDloZ4PzLBcZineGWbyj4SSiaHVn.6gkyCaIlqSJGryRwnshefN43hbdPocziZnuN6cUMiC9Ls7jght5ak90PZbx8rt9nghZTUPpDYSsMNpii5aA9xWBEhMq__TTmv.rfLHzlCE8rgi5dk5PJouLBujcieRBtI7i.7PwU1jFkaeDhxE4dRMjpAQrjJKc6XqfbTBc5K9QaF6r1YVIVWHEpNrUzbZ_7sSzQ5QFoQNwVBgRzaFtm48hiQlg6S.xsMMdDWkw5xtlG7GZUC.V2jgWNgLScSwqCU_3ntveI_BrcuBy_XAXWQsUzNv3grKBv3qzhOMH3pl8DgTDV3wOo.GqdTtcsaaUn7O0i1hSoA0_EqNIXvRBBdePtBAjPWFZt6sK1Dy8S.kVvW9rIWxonS8GYw6jAw3FrkvM_xk8gxU4oKX1pk3h4m0iJVDQhlr0OOLGW7vBxnzYqidDFi01xQe608kLkJO9qx2X1Xv6XORvYJTNAOVfOMWV83D75M_7L4FOjog8f8F5EkOTU7LymG8GTXY2g4K1xBfGHyzAOPDv9NMjc0I_7wLdATcbn2axvwj5I2xiSqrxK8DYnqTVGqEt.tusj07ij4sobwY0FePNGjLOHICdau9tCajCSqBxtly23flz3iYPQ22Va6uuSaQ.c9mtXsBd0NTlWvlOc6zRdQK.uYkiCYg719UyeIFzDDWeFvQCbuBrstwX.zAkYz2YPaTs8ZGpogdgQ5OhaduuhR5jzvz2mmHXGh5fJ1kxfeClXFWbvCdu3T77mmXHxLGQpr3UZKnmiPO7VjxJoEd9SjYA_NFz9HPbvimmWgmv0DIXvdNvHKCQMYEUROQlk5XIH7oiQ1BtywZNvoWv1D7Q--~A',
'origin': 'https://login.yahoo.com',
'referer': 'https://login.yahoo.com/account/create?.lang=en-US&src=homepage&activity=ybar-signin&pspid=2023538075&.done=https%3A%2F%2Fwww.yahoo.com%2F&specId=yidregsimplified&done=https%3A%2F%2Fwww.yahoo.com%2F',
'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
'x-requested-with': 'XMLHttpRequest',
}
data2 = {
'specId': 'yidregsimplified',
'cacheStored': '',
'crumb': 'hrxAgkAZ5jX',
'acrumb': 'yWa5asCx',
'sessionIndex': '',
'done': 'https://www.yahoo.com/',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'multiDomain': '',
'tos0': 'oath_freereg|xa|en-JO',
'firstName': 'Aegos',
'lastName': 'coding',
'userid-domain': 'yahoo',
'userId': f'{email2}',
'password': 'szdxfefdgfh',
'birthYear': '1998',
'signup': '',
}
response2 = requests.post(url2,headers=headers2,data=data2).text
if '{"errors":[{"name":"userId","error":"IDENTIFIER_EXISTS"}]}' in response2:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif '{"errors":[]}' in response2:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_Hotmail(email):
url3 = f'https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress={email}&_=1604288577990'
headers3 = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
}
response3 = requests.post(url3, headers=headers3).text
if 'Neither' in response3:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Aol(email):
email3 = email.split('@')[0]
url4 = "https://login.aol.com/account/module/create?validateField=yid"
headers4 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '18430',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; A3=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; cmp=t=1665079824&j=0&u=1---; rxx=5dmbu5em0gs.2w52y1t9&v=1; AS=v=1&s=mE9oz2RU&d=A6340990f|BfPo7D7.2Soeua6Q5.JcZFuTeKDZd.VEwARWGa18pr8Nw39Pbg3lrVe2yFRyh3RRePi__A4A5bs6jgblICTjtwR23Xn2FaKNd3g4n2Nyoe0HUPOPhxc2_MkgSPb3Uv64NNH6b4oIbh0d6GPjVX.u1iE75NeNGVgDykpoV.GJb.ZOyA1hi3D079flz5FnGN3UPl4Jos.LGJjKE5jeRFZVRbTJyV_q0zmHwp0WmwaGpmtr2bKK2pVY_9dMpw5J1u9Wx0e_QeNBnAgpvDP_E02PBbuxEQQXAX0GF8IM_gu2g5D1CEPA15ailOgAaPTMDY7plQgXdP3cYarpT20WB0vRVdZXqvfsh7E.m8mX5QyFisDObrlDfLbh6nPbmjU_8BIyAHLvCBoCmF0u4BhXftXCqUgW5SadK6EzXKbn394dWjCdO0YJRStGJo_POkob5FNOWud6u3MY1IZS2ov3OD9LIoJy7w.mSCLZ.M84QgA0UgsGTrDOgTQJWeetwKIYy1RbR8lxFZr0IDwTLBAGflJkaNvnQqWxWbEjftCTvXH2CPXFaCRUnSObHQ2cP1Mb8kro2zkXtaUGmW_cD9oHxidsx6vaOfx4f_fSysGP5Aaa2z6NndXHWh_ium8B45ejj4MFh3F7my8_04UX4WjjiZIqGG0fXcLQxFrB1GY6Vnqo47oSmh4yBcZPV7eQ0CKATeJLshzj2SovAZcIdV1ptsKk9P.LVCZl6MeDskIxd5L6iixeCU6PMq84tz7Gmg6S~A; A1S=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U&j=WORLD',
'origin': 'https://login.aol.com',
'referer': 'https://login.aol.com/account/create?intl=uk&lang=en-gb&specId=yidReg&done=https%3A%2F%2Fwww.aol.com',
'sec-ch-ua': '"Chromium";v="106", "Google Chrome";v="106", "Not;A=Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
data4 = {
'specId': 'yidreg',
'cacheStored': '',
'crumb': 'ks78hCqM4K.',
'acrumb': 'mE9oz2RU',
'done': 'https://www.aol.com',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'tos0': 'oath_freereg|uk|en-GB',
'firstName': 'Aegos',
'lastName': 'Coodeing',
'yid': email3,
'password': '1#$aegos$#1wjdytesre',
'shortCountryCode': 'IQ',
'phone': '7716555876',
'mm': '11',
'dd': '1',
'yyyy': '1998',
'freeformGender': '',
'signup': '',
}
response4 = requests.post(url4,headers=headers4,data=data4).text
if ('{"errors":[{"name":"yid","error":"IDENTIFIER_EXISTS"}]}') in response4:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif ('{"errors":[]}') in response4:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_MailRu(email):
url5 = 'https://account.mail.ru/api/v1/user/exists'
headers5 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
data5 = {
'email': str(email)
}
response5 = requests.post(url5,headers=headers5,data=data5).text
if 'exists":false' in response5:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'} | AegosCode | /AegosCode-0.2.tar.gz/AegosCode-0.2/code/__init__.py | __init__.py |
import json , requests , os
def A_Gmail(email):
url = 'https://android.clients.google.com/setup/checkavail'
headers = {
'Content-Length':'98',
'Content-Type':'text/plain; charset=UTF-8',
'Host':'android.clients.google.com',
'Connection':'Keep-Alive',
'user-agent':'GoogleLoginService/1.3(m0 JSS15J)',
}
data = json.dumps({
'username':f'{email}',
'version':'3',
'firstName':'Aegos',
'lastName':'Codeing'
})
response = requests.post(url,headers=headers,data=data)
if response.json()['status'] == 'SUCCESS':
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Yahoo(email):
email2 = email.split('@')[0]
url2 = "https://login.yahoo.com/account/module/create?validateField=userId"
headers2 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '7979',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A3=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A1S=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w&j=WORLD; cmp=t=1682254514&j=0&u=1---; B=9qgodcpi4aalb&b=3&s=7t; GUC=AQEBAQFkRnxkT0IiCATl; AS=v=1&s=yWa5asCx&d=A64467c3b|dcjw_0n.2SoXBbaywfJ6pOKLuxGKrtyyLsUqPKnDloZ4PzLBcZineGWbyj4SSiaHVn.6gkyCaIlqSJGryRwnshefN43hbdPocziZnuN6cUMiC9Ls7jght5ak90PZbx8rt9nghZTUPpDYSsMNpii5aA9xWBEhMq__TTmv.rfLHzlCE8rgi5dk5PJouLBujcieRBtI7i.7PwU1jFkaeDhxE4dRMjpAQrjJKc6XqfbTBc5K9QaF6r1YVIVWHEpNrUzbZ_7sSzQ5QFoQNwVBgRzaFtm48hiQlg6S.xsMMdDWkw5xtlG7GZUC.V2jgWNgLScSwqCU_3ntveI_BrcuBy_XAXWQsUzNv3grKBv3qzhOMH3pl8DgTDV3wOo.GqdTtcsaaUn7O0i1hSoA0_EqNIXvRBBdePtBAjPWFZt6sK1Dy8S.kVvW9rIWxonS8GYw6jAw3FrkvM_xk8gxU4oKX1pk3h4m0iJVDQhlr0OOLGW7vBxnzYqidDFi01xQe608kLkJO9qx2X1Xv6XORvYJTNAOVfOMWV83D75M_7L4FOjog8f8F5EkOTU7LymG8GTXY2g4K1xBfGHyzAOPDv9NMjc0I_7wLdATcbn2axvwj5I2xiSqrxK8DYnqTVGqEt.tusj07ij4sobwY0FePNGjLOHICdau9tCajCSqBxtly23flz3iYPQ22Va6uuSaQ.c9mtXsBd0NTlWvlOc6zRdQK.uYkiCYg719UyeIFzDDWeFvQCbuBrstwX.zAkYz2YPaTs8ZGpogdgQ5OhaduuhR5jzvz2mmHXGh5fJ1kxfeClXFWbvCdu3T77mmXHxLGQpr3UZKnmiPO7VjxJoEd9SjYA_NFz9HPbvimmWgmv0DIXvdNvHKCQMYEUROQlk5XIH7oiQ1BtywZNvoWv1D7Q--~A',
'origin': 'https://login.yahoo.com',
'referer': 'https://login.yahoo.com/account/create?.lang=en-US&src=homepage&activity=ybar-signin&pspid=2023538075&.done=https%3A%2F%2Fwww.yahoo.com%2F&specId=yidregsimplified&done=https%3A%2F%2Fwww.yahoo.com%2F',
'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
'x-requested-with': 'XMLHttpRequest',
}
data2 = {
'specId': 'yidregsimplified',
'cacheStored': '',
'crumb': 'hrxAgkAZ5jX',
'acrumb': 'yWa5asCx',
'sessionIndex': '',
'done': 'https://www.yahoo.com/',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'multiDomain': '',
'tos0': 'oath_freereg|xa|en-JO',
'firstName': 'Aegos',
'lastName': 'coding',
'userid-domain': 'yahoo',
'userId': f'{email2}',
'password': 'szdxfefdgfh',
'birthYear': '1998',
'signup': '',
}
response2 = requests.post(url2,headers=headers2,data=data2).text
if '{"errors":[{"name":"userId","error":"IDENTIFIER_EXISTS"}]}' in response2:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif '{"errors":[]}' in response2:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_Hotmail(email):
url3 = f'https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress={email}&_=1604288577990'
headers3 = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
}
response3 = requests.post(url3, headers=headers3).text
if 'Neither' in response3:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Aol(email):
email3 = email.split('@')[0]
url4 = "https://login.aol.com/account/module/create?validateField=yid"
headers4 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '18430',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; A3=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; cmp=t=1665079824&j=0&u=1---; rxx=5dmbu5em0gs.2w52y1t9&v=1; AS=v=1&s=mE9oz2RU&d=A6340990f|BfPo7D7.2Soeua6Q5.JcZFuTeKDZd.VEwARWGa18pr8Nw39Pbg3lrVe2yFRyh3RRePi__A4A5bs6jgblICTjtwR23Xn2FaKNd3g4n2Nyoe0HUPOPhxc2_MkgSPb3Uv64NNH6b4oIbh0d6GPjVX.u1iE75NeNGVgDykpoV.GJb.ZOyA1hi3D079flz5FnGN3UPl4Jos.LGJjKE5jeRFZVRbTJyV_q0zmHwp0WmwaGpmtr2bKK2pVY_9dMpw5J1u9Wx0e_QeNBnAgpvDP_E02PBbuxEQQXAX0GF8IM_gu2g5D1CEPA15ailOgAaPTMDY7plQgXdP3cYarpT20WB0vRVdZXqvfsh7E.m8mX5QyFisDObrlDfLbh6nPbmjU_8BIyAHLvCBoCmF0u4BhXftXCqUgW5SadK6EzXKbn394dWjCdO0YJRStGJo_POkob5FNOWud6u3MY1IZS2ov3OD9LIoJy7w.mSCLZ.M84QgA0UgsGTrDOgTQJWeetwKIYy1RbR8lxFZr0IDwTLBAGflJkaNvnQqWxWbEjftCTvXH2CPXFaCRUnSObHQ2cP1Mb8kro2zkXtaUGmW_cD9oHxidsx6vaOfx4f_fSysGP5Aaa2z6NndXHWh_ium8B45ejj4MFh3F7my8_04UX4WjjiZIqGG0fXcLQxFrB1GY6Vnqo47oSmh4yBcZPV7eQ0CKATeJLshzj2SovAZcIdV1ptsKk9P.LVCZl6MeDskIxd5L6iixeCU6PMq84tz7Gmg6S~A; A1S=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U&j=WORLD',
'origin': 'https://login.aol.com',
'referer': 'https://login.aol.com/account/create?intl=uk&lang=en-gb&specId=yidReg&done=https%3A%2F%2Fwww.aol.com',
'sec-ch-ua': '"Chromium";v="106", "Google Chrome";v="106", "Not;A=Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
data4 = {
'specId': 'yidreg',
'cacheStored': '',
'crumb': 'ks78hCqM4K.',
'acrumb': 'mE9oz2RU',
'done': 'https://www.aol.com',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'tos0': 'oath_freereg|uk|en-GB',
'firstName': 'Aegos',
'lastName': 'Coodeing',
'yid': email3,
'password': '1#$aegos$#1wjdytesre',
'shortCountryCode': 'IQ',
'phone': '7716555876',
'mm': '11',
'dd': '1',
'yyyy': '1998',
'freeformGender': '',
'signup': '',
}
response4 = requests.post(url4,headers=headers4,data=data4).text
if ('{"errors":[{"name":"yid","error":"IDENTIFIER_EXISTS"}]}') in response4:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif ('{"errors":[]}') in response4:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_MailRu(email):
url5 = 'https://account.mail.ru/api/v1/user/exists'
headers5 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
data5 = {
'email': str(email)
}
response5 = requests.post(url5,headers=headers5,data=data5).text
if 'exists":false' in response5:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'} | AegosLib | /AegosLib-0.1.tar.gz/AegosLib-0.1/code/Aegos.py | Aegos.py |
import json , requests , os
def A_Gmail(email):
url = 'https://android.clients.google.com/setup/checkavail'
headers = {
'Content-Length':'98',
'Content-Type':'text/plain; charset=UTF-8',
'Host':'android.clients.google.com',
'Connection':'Keep-Alive',
'user-agent':'GoogleLoginService/1.3(m0 JSS15J)',
}
data = json.dumps({
'username':f'{email}',
'version':'3',
'firstName':'Aegos',
'lastName':'Codeing'
})
response = requests.post(url,headers=headers,data=data)
if response.json()['status'] == 'SUCCESS':
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Yahoo(email):
email2 = email.split('@')[0]
url2 = "https://login.yahoo.com/account/module/create?validateField=userId"
headers2 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '7979',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A3=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A1S=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w&j=WORLD; cmp=t=1682254514&j=0&u=1---; B=9qgodcpi4aalb&b=3&s=7t; GUC=AQEBAQFkRnxkT0IiCATl; AS=v=1&s=yWa5asCx&d=A64467c3b|dcjw_0n.2SoXBbaywfJ6pOKLuxGKrtyyLsUqPKnDloZ4PzLBcZineGWbyj4SSiaHVn.6gkyCaIlqSJGryRwnshefN43hbdPocziZnuN6cUMiC9Ls7jght5ak90PZbx8rt9nghZTUPpDYSsMNpii5aA9xWBEhMq__TTmv.rfLHzlCE8rgi5dk5PJouLBujcieRBtI7i.7PwU1jFkaeDhxE4dRMjpAQrjJKc6XqfbTBc5K9QaF6r1YVIVWHEpNrUzbZ_7sSzQ5QFoQNwVBgRzaFtm48hiQlg6S.xsMMdDWkw5xtlG7GZUC.V2jgWNgLScSwqCU_3ntveI_BrcuBy_XAXWQsUzNv3grKBv3qzhOMH3pl8DgTDV3wOo.GqdTtcsaaUn7O0i1hSoA0_EqNIXvRBBdePtBAjPWFZt6sK1Dy8S.kVvW9rIWxonS8GYw6jAw3FrkvM_xk8gxU4oKX1pk3h4m0iJVDQhlr0OOLGW7vBxnzYqidDFi01xQe608kLkJO9qx2X1Xv6XORvYJTNAOVfOMWV83D75M_7L4FOjog8f8F5EkOTU7LymG8GTXY2g4K1xBfGHyzAOPDv9NMjc0I_7wLdATcbn2axvwj5I2xiSqrxK8DYnqTVGqEt.tusj07ij4sobwY0FePNGjLOHICdau9tCajCSqBxtly23flz3iYPQ22Va6uuSaQ.c9mtXsBd0NTlWvlOc6zRdQK.uYkiCYg719UyeIFzDDWeFvQCbuBrstwX.zAkYz2YPaTs8ZGpogdgQ5OhaduuhR5jzvz2mmHXGh5fJ1kxfeClXFWbvCdu3T77mmXHxLGQpr3UZKnmiPO7VjxJoEd9SjYA_NFz9HPbvimmWgmv0DIXvdNvHKCQMYEUROQlk5XIH7oiQ1BtywZNvoWv1D7Q--~A',
'origin': 'https://login.yahoo.com',
'referer': 'https://login.yahoo.com/account/create?.lang=en-US&src=homepage&activity=ybar-signin&pspid=2023538075&.done=https%3A%2F%2Fwww.yahoo.com%2F&specId=yidregsimplified&done=https%3A%2F%2Fwww.yahoo.com%2F',
'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
'x-requested-with': 'XMLHttpRequest',
}
data2 = {
'specId': 'yidregsimplified',
'cacheStored': '',
'crumb': 'hrxAgkAZ5jX',
'acrumb': 'yWa5asCx',
'sessionIndex': '',
'done': 'https://www.yahoo.com/',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'multiDomain': '',
'tos0': 'oath_freereg|xa|en-JO',
'firstName': 'Aegos',
'lastName': 'coding',
'userid-domain': 'yahoo',
'userId': f'{email2}',
'password': 'szdxfefdgfh',
'birthYear': '1998',
'signup': '',
}
response2 = requests.post(url2,headers=headers2,data=data2).text
if '{"errors":[{"name":"userId","error":"IDENTIFIER_EXISTS"}]}' in response2:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif '{"errors":[]}' in response2:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_Hotmail(email):
url3 = f'https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress={email}&_=1604288577990'
headers3 = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
}
response3 = requests.post(url3, headers=headers3).text
if 'Neither' in response3:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Aol(email):
email3 = email.split('@')[0]
url4 = "https://login.aol.com/account/module/create?validateField=yid"
headers4 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '18430',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; A3=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; cmp=t=1665079824&j=0&u=1---; rxx=5dmbu5em0gs.2w52y1t9&v=1; AS=v=1&s=mE9oz2RU&d=A6340990f|BfPo7D7.2Soeua6Q5.JcZFuTeKDZd.VEwARWGa18pr8Nw39Pbg3lrVe2yFRyh3RRePi__A4A5bs6jgblICTjtwR23Xn2FaKNd3g4n2Nyoe0HUPOPhxc2_MkgSPb3Uv64NNH6b4oIbh0d6GPjVX.u1iE75NeNGVgDykpoV.GJb.ZOyA1hi3D079flz5FnGN3UPl4Jos.LGJjKE5jeRFZVRbTJyV_q0zmHwp0WmwaGpmtr2bKK2pVY_9dMpw5J1u9Wx0e_QeNBnAgpvDP_E02PBbuxEQQXAX0GF8IM_gu2g5D1CEPA15ailOgAaPTMDY7plQgXdP3cYarpT20WB0vRVdZXqvfsh7E.m8mX5QyFisDObrlDfLbh6nPbmjU_8BIyAHLvCBoCmF0u4BhXftXCqUgW5SadK6EzXKbn394dWjCdO0YJRStGJo_POkob5FNOWud6u3MY1IZS2ov3OD9LIoJy7w.mSCLZ.M84QgA0UgsGTrDOgTQJWeetwKIYy1RbR8lxFZr0IDwTLBAGflJkaNvnQqWxWbEjftCTvXH2CPXFaCRUnSObHQ2cP1Mb8kro2zkXtaUGmW_cD9oHxidsx6vaOfx4f_fSysGP5Aaa2z6NndXHWh_ium8B45ejj4MFh3F7my8_04UX4WjjiZIqGG0fXcLQxFrB1GY6Vnqo47oSmh4yBcZPV7eQ0CKATeJLshzj2SovAZcIdV1ptsKk9P.LVCZl6MeDskIxd5L6iixeCU6PMq84tz7Gmg6S~A; A1S=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U&j=WORLD',
'origin': 'https://login.aol.com',
'referer': 'https://login.aol.com/account/create?intl=uk&lang=en-gb&specId=yidReg&done=https%3A%2F%2Fwww.aol.com',
'sec-ch-ua': '"Chromium";v="106", "Google Chrome";v="106", "Not;A=Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
data4 = {
'specId': 'yidreg',
'cacheStored': '',
'crumb': 'ks78hCqM4K.',
'acrumb': 'mE9oz2RU',
'done': 'https://www.aol.com',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'tos0': 'oath_freereg|uk|en-GB',
'firstName': 'Aegos',
'lastName': 'Coodeing',
'yid': email3,
'password': '1#$aegos$#1wjdytesre',
'shortCountryCode': 'IQ',
'phone': '7716555876',
'mm': '11',
'dd': '1',
'yyyy': '1998',
'freeformGender': '',
'signup': '',
}
response4 = requests.post(url4,headers=headers4,data=data4).text
if ('{"errors":[{"name":"yid","error":"IDENTIFIER_EXISTS"}]}') in response4:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif ('{"errors":[]}') in response4:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_MailRu(email):
url5 = 'https://account.mail.ru/api/v1/user/exists'
headers5 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
data5 = {
'email': str(email)
}
response5 = requests.post(url5,headers=headers5,data=data5).text
if 'exists":false' in response5:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'} | AegosLib2 | /AegosLib2-0.1.tar.gz/AegosLib2-0.1/code/Aegos.py | Aegos.py |
import json , requests
def A_Gmail(email):
url = 'https://android.clients.google.com/setup/checkavail'
headers = {
'Content-Length':'98',
'Content-Type':'text/plain; charset=UTF-8',
'Host':'android.clients.google.com',
'Connection':'Keep-Alive',
'user-agent':'GoogleLoginService/1.3(m0 JSS15J)',
}
data = json.dumps({
'username':f'{email}',
'version':'3',
'firstName':'Aegos',
'lastName':'Codeing'
})
response = requests.post(url,headers=headers,data=data)
if response.json()['status'] == 'SUCCESS':
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Yahoo(email):
email2 = email.split('@')[0]
url2 = "https://login.yahoo.com/account/module/create?validateField=userId"
headers2 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '7979',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A3=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w; A1S=d=AQABBKsqRWQCEP0UsV5c9lOx8e5im2YNQ50FEgEBAQF8RmRPZAAAAAAA_eMAAA&S=AQAAApW5iPsgjBo-EVpzITncq1w&j=WORLD; cmp=t=1682254514&j=0&u=1---; B=9qgodcpi4aalb&b=3&s=7t; GUC=AQEBAQFkRnxkT0IiCATl; AS=v=1&s=yWa5asCx&d=A64467c3b|dcjw_0n.2SoXBbaywfJ6pOKLuxGKrtyyLsUqPKnDloZ4PzLBcZineGWbyj4SSiaHVn.6gkyCaIlqSJGryRwnshefN43hbdPocziZnuN6cUMiC9Ls7jght5ak90PZbx8rt9nghZTUPpDYSsMNpii5aA9xWBEhMq__TTmv.rfLHzlCE8rgi5dk5PJouLBujcieRBtI7i.7PwU1jFkaeDhxE4dRMjpAQrjJKc6XqfbTBc5K9QaF6r1YVIVWHEpNrUzbZ_7sSzQ5QFoQNwVBgRzaFtm48hiQlg6S.xsMMdDWkw5xtlG7GZUC.V2jgWNgLScSwqCU_3ntveI_BrcuBy_XAXWQsUzNv3grKBv3qzhOMH3pl8DgTDV3wOo.GqdTtcsaaUn7O0i1hSoA0_EqNIXvRBBdePtBAjPWFZt6sK1Dy8S.kVvW9rIWxonS8GYw6jAw3FrkvM_xk8gxU4oKX1pk3h4m0iJVDQhlr0OOLGW7vBxnzYqidDFi01xQe608kLkJO9qx2X1Xv6XORvYJTNAOVfOMWV83D75M_7L4FOjog8f8F5EkOTU7LymG8GTXY2g4K1xBfGHyzAOPDv9NMjc0I_7wLdATcbn2axvwj5I2xiSqrxK8DYnqTVGqEt.tusj07ij4sobwY0FePNGjLOHICdau9tCajCSqBxtly23flz3iYPQ22Va6uuSaQ.c9mtXsBd0NTlWvlOc6zRdQK.uYkiCYg719UyeIFzDDWeFvQCbuBrstwX.zAkYz2YPaTs8ZGpogdgQ5OhaduuhR5jzvz2mmHXGh5fJ1kxfeClXFWbvCdu3T77mmXHxLGQpr3UZKnmiPO7VjxJoEd9SjYA_NFz9HPbvimmWgmv0DIXvdNvHKCQMYEUROQlk5XIH7oiQ1BtywZNvoWv1D7Q--~A',
'origin': 'https://login.yahoo.com',
'referer': 'https://login.yahoo.com/account/create?.lang=en-US&src=homepage&activity=ybar-signin&pspid=2023538075&.done=https%3A%2F%2Fwww.yahoo.com%2F&specId=yidregsimplified&done=https%3A%2F%2Fwww.yahoo.com%2F',
'sec-ch-ua': '"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.48',
'x-requested-with': 'XMLHttpRequest',
}
data2 = {
'specId': 'yidregsimplified',
'cacheStored': '',
'crumb': 'hrxAgkAZ5jX',
'acrumb': 'yWa5asCx',
'sessionIndex': '',
'done': 'https://www.yahoo.com/',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'multiDomain': '',
'tos0': 'oath_freereg|xa|en-JO',
'firstName': 'Aegos',
'lastName': 'coding',
'userid-domain': 'yahoo',
'userId': f'{email2}',
'password': 'szdxfefdgfh',
'birthYear': '1998',
'signup': '',
}
response2 = requests.post(url2,headers=headers2,data=data2).text
if '{"errors":[{"name":"userId","error":"IDENTIFIER_EXISTS"}]}' in response2:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif '{"errors":[]}' in response2:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_Hotmail(email):
url3 = f'https://odc.officeapps.live.com/odc/emailhrd/getidp?hm=0&emailAddress={email}&_=1604288577990'
headers3 = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
}
response3 = requests.post(url3, headers=headers3).text
if 'Neither' in response3:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
def A_Aol(email):
email3 = email.split('@')[0]
url4 = "https://login.aol.com/account/module/create?validateField=yid"
headers4 = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '18430',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': 'A1=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; A3=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U; cmp=t=1665079824&j=0&u=1---; rxx=5dmbu5em0gs.2w52y1t9&v=1; AS=v=1&s=mE9oz2RU&d=A6340990f|BfPo7D7.2Soeua6Q5.JcZFuTeKDZd.VEwARWGa18pr8Nw39Pbg3lrVe2yFRyh3RRePi__A4A5bs6jgblICTjtwR23Xn2FaKNd3g4n2Nyoe0HUPOPhxc2_MkgSPb3Uv64NNH6b4oIbh0d6GPjVX.u1iE75NeNGVgDykpoV.GJb.ZOyA1hi3D079flz5FnGN3UPl4Jos.LGJjKE5jeRFZVRbTJyV_q0zmHwp0WmwaGpmtr2bKK2pVY_9dMpw5J1u9Wx0e_QeNBnAgpvDP_E02PBbuxEQQXAX0GF8IM_gu2g5D1CEPA15ailOgAaPTMDY7plQgXdP3cYarpT20WB0vRVdZXqvfsh7E.m8mX5QyFisDObrlDfLbh6nPbmjU_8BIyAHLvCBoCmF0u4BhXftXCqUgW5SadK6EzXKbn394dWjCdO0YJRStGJo_POkob5FNOWud6u3MY1IZS2ov3OD9LIoJy7w.mSCLZ.M84QgA0UgsGTrDOgTQJWeetwKIYy1RbR8lxFZr0IDwTLBAGflJkaNvnQqWxWbEjftCTvXH2CPXFaCRUnSObHQ2cP1Mb8kro2zkXtaUGmW_cD9oHxidsx6vaOfx4f_fSysGP5Aaa2z6NndXHWh_ium8B45ejj4MFh3F7my8_04UX4WjjiZIqGG0fXcLQxFrB1GY6Vnqo47oSmh4yBcZPV7eQ0CKATeJLshzj2SovAZcIdV1ptsKk9P.LVCZl6MeDskIxd5L6iixeCU6PMq84tz7Gmg6S~A; A1S=d=AQABBAcaP2MCEDS0lcVAC7jDxca1x2QPSMAFEgEBAQFrQGNIYwAAAAAA_eMAAA&S=AQAAAk66bvBpHLzQZ0n3bQV7x6U&j=WORLD',
'origin': 'https://login.aol.com',
'referer': 'https://login.aol.com/account/create?intl=uk&lang=en-gb&specId=yidReg&done=https%3A%2F%2Fwww.aol.com',
'sec-ch-ua': '"Chromium";v="106", "Google Chrome";v="106", "Not;A=Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
data4 = {
'specId': 'yidreg',
'cacheStored': '',
'crumb': 'ks78hCqM4K.',
'acrumb': 'mE9oz2RU',
'done': 'https://www.aol.com',
'googleIdToken': '',
'authCode': '',
'attrSetIndex': '0',
'tos0': 'oath_freereg|uk|en-GB',
'firstName': 'Aegos',
'lastName': 'Coodeing',
'yid': email3,
'password': '1#$aegos$#1wjdytesre',
'shortCountryCode': 'IQ',
'phone': '7716555876',
'mm': '11',
'dd': '1',
'yyyy': '1998',
'freeformGender': '',
'signup': '',
}
response4 = requests.post(url4,headers=headers4,data=data4).text
if ('{"errors":[{"name":"yid","error":"IDENTIFIER_EXISTS"}]}') in response4:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'}
elif ('{"errors":[]}') in response4:
return {'Status':'Available','AEGOS':'@G_4_2'}
def A_MailRu(email):
url5 = 'https://account.mail.ru/api/v1/user/exists'
headers5 = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
data5 = {
'email': str(email)
}
response5 = requests.post(url5,headers=headers5,data=data5).text
if 'exists":false' in response5:
return {'Status':'Available','AEGOS':'@G_4_2'}
else:
return {'Status':'UnAvailable','AEGOS':'@G_4_2'} | AegosV1 | /AegosV1-0.3.tar.gz/AegosV1-0.3/code/__init__.py | __init__.py |
AeoLiS
======
AeoLiS is a process-based model for simulating aeolian sediment
transport in situations where supply-limiting factors are important,
like in coastal environments. Supply-limitations currently supported
are soil moisture contents, sediment sorting and armouring, bed slope
effects, air humidity and roughness elements.
The maintenance and development is done by the AEOLIS developer team:
Current members are:
`Bart van Westen <[email protected]>`_ at Deltares,
`Nick Cohn <[email protected]>`_ at U.S. Army Engineer Research and Development Center (ERDC),
`Sierd de Vries <[email protected]>`_ (founder) at Delft University of Technology,
`Christa van IJzendoorn <[email protected]>`_ at Delft University of Technology,
`Caroline Hallin <[email protected]>`_ at Delft University of Technology,
`Glenn Strypsteen <[email protected]>`_ at Katholieke Universiteit Leuven and
`Janelle Skaden <[email protected]>`_ at U.S. Army Engineer Research and Development Center (ERDC).
Previous members are:
`Bas Hoonhout <[email protected]>`_ (founder), Tom Pak, Pieter Rauwoens and Lisa Meijer | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/README.rst | README.rst |
from __future__ import absolute_import, division
import logging
import numpy as np
import aeolis.gridparams
from matplotlib import pyplot as plt
# package modules
from aeolis.utils import *
#import matplotlib.pyplot as plt
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s, p):
'''Initialize bathymetry and bed composition
Initialized bathymetry, computes cell sizes and orientation, bed
layer thickness and bed composition.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# get model dimensions
ny = p['ny']
nx = p['nx']
nl = p['nlayers']
nf = p['nfractions']
# initialize bathymetry
s['zb'][:,:] = p['bed_file']
s['zb0'][:,:] = p['bed_file']
s['zne'][:,:] = p['ne_file']
#initialize thickness of erodable or dry top layer
s['zdry'][:,:] = 0.05
# initialize bed layers
s['thlyr'][:,:,:] = p['layer_thickness']
# initialize bed composition
if isinstance(p['grain_dist'], str):
logger.log_and_raise('Grain size file not recognized as array, check file path and whether all values have been filled in.', exc=ValueError)
if p['bedcomp_file'] is None and p['grain_dist'].ndim == 1 and p['grain_dist'].dtype == 'float64' or p['grain_dist'].dtype == 'int':
# Both float and int are included as options for the grain dist to make sure there is no error when grain_dist is filled in as 1 instead of 1.0.
for i in range(nl):
gs = makeiterable(p['grain_dist'])
gs = gs / np.sum(gs)
for j in range(nf):
s['mass'][:,:,i,j] = p['rhog'] * (1. - p['porosity']) \
* s['thlyr'][:,:,i] * gs[j]
elif p['bedcomp_file'] is None and p['grain_dist'].ndim > 1: #allows simple cases with layering, txt file containing distribution per fraction per column and layers in the rows.
if nl != p['grain_dist'].shape[0]:
logger.log_and_raise('Grain size distribution not assigned for each layer, not enough rows for the number of layers', exc=ValueError)
for i in range(nl):
gs = makeiterable(p['grain_dist'][i,:])
gs = gs / np.sum(gs)
for j in range(nf):
s['mass'][:,:,i,j] = p['rhog'] * (1. - p['porosity']) \
* s['thlyr'][:,:,i] * gs[j]
else:
s['mass'][:,:,:,:] = p['bedcomp_file'].reshape(s['mass'].shape)
# initialize masks
for k, v in p.items():
if k.endswith('_mask'):
if v is None:
s[k] = 1.
else:
s[k] = v.reshape(s['zb'].shape)
# initialize threshold
if p['threshold_file'] is not None:
s['uth'] = p['threshold_file'][:,:,np.newaxis].repeat(nf, axis=-1)
return s
def mixtoplayer(s, p):
'''Mix grain size distribution in top layer of the bed.
Simulates mixing of the top layers of the bed by wave action. The
wave action is represented by a local wave height maximized by a
maximum wave hieght over depth ratio ``gamma``. The mixing depth
is a fraction of the local wave height indicated by
``facDOD``. The mixing depth is used to compute the number of bed
layers that should be included in the mixing. The grain size
distribution in these layers is then replaced by the average grain
size distribution over these layers.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
if p['process_mixtoplayer']:
# get model dimensions
nx = p['nx']+1
ny = p['ny']+1
nl = p['nlayers']
nf = p['nfractions']
# compute depth of disturbance for each cell and repeat for each layer
DOD = p['facDOD'] * s['Hsmix']
# compute ratio total layer thickness and depth of disturbance
ix = DOD > 0.
f = np.ones(DOD.shape)
f[ix] = np.minimum(1., s['thlyr'].sum(axis=2)[ix] / DOD[ix])
# correct shapes
DOD = DOD[:,:,np.newaxis].repeat(nl, axis=2)
f = f[:,:,np.newaxis].repeat(nl, axis=2)
# determine what layers are above the depth of disturbance
ix = (s['thlyr'].cumsum(axis=2) <= DOD) & (DOD > 0.)
ix = ix[:,:,:,np.newaxis].repeat(nf, axis=3)
f = f[:,:,:,np.newaxis].repeat(nf, axis=3)
# average mass over layers
if np.any(ix):
ix[:,:,0,:] = True # at least mix the top layer
mass = s['mass'].copy()
mass[~ix] = np.nan
# gd = normalize(p['grain_dist']) * p['rhog'] * (1. - p['porosity'])
# gd = gd.reshape((1,1,1,-1)).repeat(ny, axis=0) \
# .repeat(nx, axis=1) \
# .repeat(nl, axis=2)
mass1 = np.nanmean(mass, axis=2, keepdims=True).repeat(nl, axis=2)
# mass2 = gd * s['thlyr'][:,:,:,np.newaxis].repeat(nf, axis=-1)
mass = mass1 * f + mass * (1. - f)
s['mass'][ix] = mass[ix]
return s
def wet_bed_reset(s, p):
''' Text
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
if p['process_wet_bed_reset']:
Tbedreset = p['dt_opt'] / p['Tbedreset']
ix = s['zs'] > (s['zb'] + 0.01)
s['zb'][ix] += (s['zb0'][ix] - s['zb'][ix]) * Tbedreset
return s
def update(s, p):
'''Update bathymetry and bed composition
Update bed composition by moving sediment fractions between bed
layers. The total mass in a single bed layer does not change as
sediment removed from a layer is repleted with sediment from
underlying layers. Similarly, excess sediment added in a layer is
moved to underlying layers in order to keep the layer mass
constant. The lowest bed layer exchanges sediment with an infinite
sediment source that follows the original grain size distribution
as defined in the model configuration file by ``grain_size`` and
``grain_dist``. The bathymetry is updated following the
cummulative erosion/deposition over the fractions if ``bedupdate``
is ``True``.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
nx = p['nx']
ny = p['ny']
nl = p['nlayers']
nf = p['nfractions']
# determine net erosion
pickup = s['pickup'].reshape((-1,nf))
# determine total mass that should be exchanged between layers
dm = -np.sum(pickup, axis=-1, keepdims=True).repeat(nf, axis=-1)
# get erosion and deposition cells
ix_ero = dm[:,0] < 0.
ix_dep = dm[:,0] > 0.
# reshape mass matrix
m = s['mass'].reshape((-1,nl,nf))
# negative mass may occur in case of deposition due to numerics,
# which should be prevented
m, dm, pickup = prevent_negative_mass(m, dm, pickup)
# determine weighing factors
d = normalize(m, axis=2)
# move mass among layers
m[:,0,:] -= pickup
for i in range(1,nl):
m[ix_ero,i-1,:] -= dm[ix_ero,:] * d[ix_ero,i,:]
m[ix_ero,i, :] += dm[ix_ero,:] * d[ix_ero,i,:]
m[ix_dep,i-1,:] -= dm[ix_dep,:] * d[ix_dep,i-1,:]
m[ix_dep,i, :] += dm[ix_dep,:] * d[ix_dep,i-1,:]
m[ix_dep,-1,:] -= dm[ix_dep,:] * d[ix_dep,-1,:]
if p['grain_dist'].ndim == 2:
m[ix_ero,-1,:] -= dm[ix_ero,:] * normalize(p['grain_dist'][-1,:])[np.newaxis,:].repeat(np.sum(ix_ero), axis=0)
elif type(p['bedcomp_file']) == np.ndarray:
gs = p['bedcomp_file'].reshape((-1,nl,nf))
m[ix_ero,-1,:] -= dm[ix_ero,:] * normalize(gs[ix_ero,-1, :], axis=1)
else:
m[ix_ero,-1,:] -= dm[ix_ero,:] * normalize(p['grain_dist'])[np.newaxis,:].repeat(np.sum(ix_ero), axis=0)
# remove tiny negatives
m = prevent_tiny_negatives(m, p['max_error'])
# warn if not all negatives are gone
if m.min() < 0:
logger.warning(format_log('Negative mass',
nrcells=np.sum(np.any(m<0., axis=-1)),
minvalue=m.min(),
minwind=s['uw'].min(),
time=p['_time']))
# reshape mass matrix
s['mass'] = m.reshape((ny+1,nx+1,nl,nf))
# update bathy
if p['process_bedupdate']:
dz = dm[:, 0].reshape((ny + 1, nx + 1)) / (p['rhog'] * (1. - p['porosity']))
# s['dzb'] = dm[:, 0].reshape((ny + 1, nx + 1))
s['dzb'] = dz.copy()
# redistribute sediment from inactive zone to marine interaction zone
s['zb'] += dz
if p['process_tide']:
s['zs'] += dz #???
return s
def prevent_negative_mass(m, dm, pickup):
'''Handle situations in which negative mass may occur due to numerics
Negative mass may occur by moving sediment to lower layers down to
accomodate deposition of sediments. In particular two cases are
important:
#. A net deposition cell has some erosional fractions.
In this case the top layer mass is reduced according to the
existing sediment distribution in the layer to accomodate
deposition of fresh sediment. If the erosional fraction is
subtracted afterwards, negative values may occur. Therefore the
erosional fractions are subtracted from the top layer
beforehand in this function. An equal mass of deposition
fractions is added to the top layer in order to keep the total
layer mass constant. Subsequently, the distribution of the
sediment to be moved to lower layers is determined and the
remaining deposits are accomodated.
#. Deposition is larger than the total mass in a layer.
In this case a non-uniform distribution in the bed may also
lead to negative values as the abundant fractions are reduced
disproportionally as sediment is moved to lower layers to
accomodate the deposits. This function fills the top layers
entirely with fresh deposits and moves the existing sediment
down such that the remaining deposits have a total mass less
than the total bed layer mass. Only the remaining deposits are
fed to the routine that moves sediment through the layers.
Parameters
----------
m : np.ndarray
Sediment mass in bed (nx*ny, nl, nf)
dm : np.ndarray
Total sediment mass exchanged between layers (nx*ny, nf)
pickup : np.ndarray
Sediment pickup (nx*ny, nf)
Returns
-------
np.ndarray
Sediment mass in bed (nx*ny, nl, nf)
np.ndarray
Total sediment mass exchanged between layers (nx*ny, nf)
np.ndarray
Sediment pickup (nx*ny, nf)
Note
----
The situations handled in this function can also be prevented by
reducing the time step, increasing the layer mass or increasing
the adaptation time scale.
'''
nl = m.shape[1]
nf = m.shape[2]
###
### case #1: deposition cells with some erosional fractions
###
ix_dep = dm[:,0] > 0.
# determine erosion and deposition fractions per cell
ero = np.maximum(0., pickup)
dep = -np.minimum(0., pickup)
# determine gross erosion
erog = np.sum(ero, axis=1, keepdims=True).repeat(nf, axis=1)
# determine net deposition cells with some erosional fractions
ix = ix_dep & (erog[:,0] > 0)
# remove erosional fractions from pickup and remove an equal mass
# of accretive fractions from the pickup, adapt sediment exchange
# mass and bed composition accordingly
if np.any(ix):
d = normalize(dep, axis=1)
ddep = erog[ix,:] * d[ix,:]
pickup[ix,:] = -dep[ix,:] + ddep
dm[ix,:] = -np.sum(pickup[ix,:], axis=-1, keepdims=True).repeat(nf, axis=-1)
m[ix,0,:] -= ero[ix,:] - ddep # FIXME: do not use deposition in normalization
###
### case #2: deposition cells with deposition larger than the mass present in the top layer
###
mx = m[:,0,:].sum(axis=-1, keepdims=True)
# determine deposition in terms of layer mass (round down)
n = dm[:,:1] // mx
# determine if deposition is larger than a sinle layer mass
if np.any(n > 0):
# determine distribution of deposition
d = normalize(pickup, axis=1)
# walk through layers from top to bottom
for i in range(nl):
ix = (n > i).flatten()
if not np.any(ix):
break
# move all sediment below current layer down one layer
m[ix,(i+1):,:] = m[ix,i:-1,:]
# fill current layer with deposited sediment
m[ix,i,:] = mx[ix,:].repeat(nf, axis=1) * d[ix,:]
# remove deposited sediment from pickup
pickup[ix,:] -= m[ix,i,:]
# discard any remaining deposits at locations where all layers
# are filled with fresh deposits
ix = (dm[:,:1] > mx).flatten()
if np.any(ix):
pickup[ix,:] = 0.
# recompute sediment exchange mass
dm[ix,:] = -np.sum(pickup[ix,:], axis=-1, keepdims=True).repeat(nf, axis=-1)
return m, dm, pickup
def average_change(l, s, p):
#Compute bed level change with previous time step [m/timestep]
s['dzb'] = s['zb'] - l['zb']
# Collect time steps
s['dzbyear'] = s['dzb'] * (3600. * 24. * 365.25) / (p['dt_opt'] * p['accfac'])
n = (p['dt_opt'] * p['accfac']) / p['avg_time']
s['dzbavg'] = n*s['dzbyear']+(1-n)*l['dzbavg']
# Calculate average bed level change as input for vegetation growth [m/year]
# s['dzbveg'] = s['dzbavg'].copy()
s['dzbveg'] = s['dzbyear'].copy()
return s | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/bed.py | bed.py |
INITIAL_STATE = {
('ny', 'nx') : (
'uw', # [m/s] Wind velocity
'uws', # [m/s] Component of wind velocity in x-direction
'uwn', # [m/s] Component of wind velocity in y-direction
'tau', # [N/m^2] Wind shear stress
'taus', # [N/m^2] Component of wind shear stress in x-direction
'taun', # [N/m^2] Component of wind shear stress in y-direction
'tau0', # [N/m^2] Wind shear stress over a flat bed
'taus0', # [N/m^2] Component of wind shear stress in x-direction over a flat bed
'taun0', # [N/m^2] Component of wind shear stress in y-direction over a flat bed
'taus_u', # [N/m^2] Saved direction of wind shear stress in x-direction
'taun_u', # [N/m^2] Saved direction of wind shear stress in y-direction
'dtaus', # [-] Component of the wind shear perturbation in x-direction
'dtaun', # [-] Component of the wind shear perturbation in y-direction
'ustar', # [m/s] Wind shear velocity
'ustars', # [m/s] Component of wind shear velocity in x-direction
'ustarn', # [m/s] Component of wind shear velocity in y-direction
'ustar0', # [m/s] Wind shear velocity over a flat bed
'ustars0', # [m/s] Component of wind shear velocity in x-direction over a flat bed
'ustarn0', # [m/s] Component of wind shear velocity in y-direction over a flat bed
'udir', # [rad] Wind direction
'zs', # [m] Water level above reference (or equal to zb if zb > zs)
'SWL', # [m] Still water level above reference
'Hs', # [m] Wave height
'Hsmix', # [m] Wave height for mixing (including setup, TWL)
'Tp', # [s] Wave period for wave runup calculations
'zne', # [m] Non-erodible layer
),
}
MODEL_STATE = {
('ny', 'nx') : (
'x', # [m] Real-world x-coordinate of grid cell center
'y', # [m] Real-world y-coordinate of grid cell center
'ds', # [m] Real-world grid cell size in x-direction
'dn', # [m] Real-world grid cell size in y-direction
'dsdn', # [m^2] Real-world grid cell surface area
'dsdni', # [m^-2] Inverse of real-world grid cell surface area
# 'alfa', # [rad] Real-world grid cell orientation #Sierd_comm in later releases this needs a revision
'zb', # [m] Bed level above reference
'zs', # [m] Water level above reference
'zne', # [m] Height above reference of the non-erodible layer
'zb0', # [m] Initial bed level above reference
'zdry', # [m]
'dzdry', # [m]
'dzb', # [m/dt] Bed level change per time step (computed after avalanching!)
'dzbyear', # [m/yr] Bed level change translated to m/y
'dzbavg', # [m/year] Bed level change averaged over collected time steps
'S', # [-] Level of saturation
'moist', #NEWCH # [-] Moisture content (volumetric)
'moist_swr', #NEWCH # [-] Moisture content soil water retention relationship (volumetric)
'h_delta', #NEWCH # [-] Suction at reversal between wetting/drying conditions
'gw', #NEWCH # [m] Groundwater level above reference
'gw_prev', #NEWCH # [m] Groundwater level above reference in previous timestep
'wetting', #NEWCH # [bool] Flag indicating wetting or drying of soil profile
'scan_w', #NEWCH # [bool] Flag indicating that the moisture is calculated on the wetting scanning curve
'scan_d', #NEWCH # [bool] Flag indicating that the moisture is calculated on the drying scanning curve
'scan_w_moist', #NEWCH # [-] Moisture content (volumetric) computed on the wetting scanning curve
'scan_d_moist', #NEWCH # [-] Moisture content (volumetric) computed on the drying scanning curve
'w_h', #NEWCH # [-] Moisture content (volumetric) computed on the main wetting curve
'd_h', #NEWCH # [-] Moisture content (volumetric) computed on the main drying curve
'w_hdelta', #NEWCH # [-] Moisture content (volumetric) computed on the main wetting curve for hdelta
'd_hdelta', #NEWCH # [-] Moisture content (volumetric) computed on the main drying curve for hdelta
'ustar', # [m/s] Shear velocity by wind
'ustars', # [m/s] Component of shear velocity in x-direction by wind
'ustarn', # [m/s] Component of shear velocity in y-direction by wind
'ustar0', # [m/s] Initial shear velocity (without perturbation)
'zsep', # [m] Z level of polynomial that defines the separation bubble
'hsep', # [m] Height of separation bubbel = difference between z-level of zsep and of the bed level zb
'theta_stat', # [degrees] Updated, spatially varying static angle of repose
'theta_dyn', # [degrees] Updated, spatially varying dynamic angle of repose
'rhoveg', # [-] Vegetation cover
'drhoveg', # Change in vegetation cover
'hveg', # [m] height of vegetation
'dhveg', # [m] Difference in vegetation height per time step
'dzbveg', # [m] Bed level change used for calculation of vegetation growth
'germinate', # vegetation germination
'lateral', # vegetation lateral expansion
'vegfac', # Vegetation factor to modify shear stress by according to Raupach 1993
'fence_height', # Fence height
'R', # [m] wave runup
'eta', # [m] wave setup
'sigma_s', # [m] swash
'TWL', # [m] Total Water Level above reference (SWL + Run-up)
'SWL', # [m] Still Water Level above reference
'DSWL', # [m] Dynamic Still water level above reference (SWL + Set-up)
'Rti', # [-] Factor taking into account sheltering by roughness elements
),
('ny','nx','nfractions') : (
'Cu', # [kg/m^2] Equilibrium sediment concentration integrated over saltation height
'Cuf', # [kg/m^2] Equilibrium sediment concentration integrated over saltation height, assuming the fluid shear velocity threshold
'Cu0', # [kg/m^2] Flat bad equilibrium sediment concentration integrated over saltation height
'Ct', # [kg/m^2] Instantaneous sediment concentration integrated over saltation height
'q', # [kg/m/s] Instantaneous sediment flux
'qs', # [kg/m/s] Instantaneous sediment flux in x-direction
'qn', # [kg/m/s] Instantaneous sediment flux in y-direction
'pickup', # [kg/m^2] Sediment entrainment
'w', # [-] Weights of sediment fractions
'w_init', # [-] Initial guess for ``w''
'w_air', # [-] Weights of sediment fractions based on grain size distribution in the air
'w_bed', # [-] Weights of sediment fractions based on grain size distribution in the bed
'uth', # [m/s] Shear velocity threshold
'uthf', # [m/s] Fluid shear velocity threshold
'uth0', # [m/s] Shear velocity threshold based on grainsize only (aerodynamic entrainment)
'u', # [m/s] Mean horizontal saltation velocity in saturated state
'us', # [m/s] Component of the saltation velocity in x-direction
'un', # [m/s] Component of the saltation velocity in y-direction
'u0',
),
('ny','nx','nlayers') : (
'thlyr', # [m] Bed composition layer thickness
'salt', # [-] Salt content
),
('ny','nx','nlayers','nfractions') : (
'mass', # [kg/m^2] Sediment mass in bed
),
}
#: AeoLiS model default configuration
DEFAULT_CONFIG = {
'process_wind' : True, # Enable the process of wind
'process_transport' : True, # Enable the process of transport
'process_bedupdate' : True, # Enable the process of bed updating
'process_threshold' : True, # Enable the process of threshold
'th_grainsize' : True, # Enable wind velocity threshold based on grainsize
'th_bedslope' : False, # Enable wind velocity threshold based on bedslope
'th_moisture' : False, # Enable wind velocity threshold based on moisture
'th_drylayer' : False, # Enable threshold based on drying of layer
'th_humidity' : False, # Enable wind velocity threshold based on humidity
'th_salt' : False, # Enable wind velocity threshold based on salt
'th_sheltering' : False, # Enable wind velocity threshold based on sheltering by roughness elements
'th_nelayer' : False, # Enable wind velocity threshold based on a non-erodible layer
'process_avalanche' : False, # Enable the process of avalanching
'process_shear' : False, # Enable the process of wind shear
'process_tide' : False, # Enable the process of tides
'process_wave' : False, # Enable the process of waves
'process_runup' : False, # Enable the process of wave runup
'process_moist' : False, # Enable the process of moist
'process_mixtoplayer' : False, # Enable the process of mixing
'process_wet_bed_reset' : False, # Enable the process of bed-reset in the intertidal zone
'process_meteo' : False, # Enable the process of meteo
'process_salt' : False, # Enable the process of salt
'process_humidity' : False, # Enable the process of humidity
'process_groundwater' : False, #NEWCH # Enable the process of groundwater
'process_scanning' : False, #NEWCH # Enable the process of scanning curves
'process_inertia' : False, # NEW
'process_separation' : False, # Enable the including of separation bubble
'process_vegetation' : False, # Enable the process of vegetation
'process_fences' : False, # Enable the process of sand fencing
'process_dune_erosion' : False, # Enable the process of wave-driven dune erosion
'process_seepage_face' : False, # Enable the process of groundwater seepage (NB. only applicable to positive beach slopes)
'visualization' : False, # Boolean for visualization of model interpretation before and just after initialization
'xgrid_file' : None, # Filename of ASCII file with x-coordinates of grid cells
'ygrid_file' : None, # Filename of ASCII file with y-coordinates of grid cells
'bed_file' : None, # Filename of ASCII file with bed level heights of grid cells
'wind_file' : None, # Filename of ASCII file with time series of wind velocity and direction
'tide_file' : None, # Filename of ASCII file with time series of water levels
'wave_file' : None, # Filename of ASCII file with time series of wave heights
'meteo_file' : None, # Filename of ASCII file with time series of meteorlogical conditions
'bedcomp_file' : None, # Filename of ASCII file with initial bed composition
'threshold_file' : None, # Filename of ASCII file with shear velocity threshold
'fence_file' : None, # Filename of ASCII file with sand fence location/height (above the bed)
'ne_file' : None, # Filename of ASCII file with non-erodible layer
'veg_file' : None, # Filename of ASCII file with initial vegetation density
'wave_mask' : None, # Filename of ASCII file with mask for wave height
'tide_mask' : None, # Filename of ASCII file with mask for tidal elevation
'runup_mask' : None, # Filename of ASCII file with mask for run-up
'threshold_mask' : None, # Filename of ASCII file with mask for the shear velocity threshold
'gw_mask' : None, #NEWCH # Filename of ASCII file with mask for the groundwater level
'nx' : 0, # [-] Number of grid cells in x-dimension
'ny' : 0, # [-] Number of grid cells in y-dimension
'dt' : 60., # [s] Time step size
'dx' : 1.,
'dy' : 1.,
'CFL' : 1., # [-] CFL number to determine time step in explicit scheme
'accfac' : 1., # [-] Numerical acceleration factor
'max_bedlevel_change' : 999., # [m] Maximum bedlevel change after one timestep. Next timestep dt will be modified (use 999. if not used)
'tstart' : 0., # [s] Start time of simulation
'tstop' : 3600., # [s] End time of simulation
'restart' : None, # [s] Interval for which to write restart files
'dzb_interval' : 86400, # [s] Interval used for calcuation of vegetation growth
'output_times' : 60., # [s] Output interval in seconds of simulation time
'output_file' : None, # Filename of netCDF4 output file
'output_vars' : ['zb', 'zs',
'Ct', 'Cu',
'uw', 'udir',
'uth', 'mass'
'pickup', 'w'], # Names of spatial grids to be included in output
'output_types' : [], # Names of statistical parameters to be included in output (avg, sum, var, min or max)
'external_vars' : [], # Names of variables that are overwritten by an external (coupling) model, i.e. CoCoNuT
'grain_size' : [225e-6], # [m] Average grain size of each sediment fraction
'grain_dist' : [1.], # [-] Initial distribution of sediment fractions
'nlayers' : 3, # [-] Number of bed layers
'layer_thickness' : .01, # [m] Thickness of bed layers
'g' : 9.81, # [m/s^2] Gravitational constant
'v' : 0.000015, # [m^2/s] Air viscosity
'rhoa' : 1.225, # [kg/m^3] Air density
'rhog' : 2650., # [kg/m^3] Grain density
'rhow' : 1025., # [kg/m^3] Water density
'porosity' : .4, # [-] Sediment porosity
'Aa' : .085, # [-] Constant in formulation for wind velocity threshold based on grain size
'z' : 10., # [m] Measurement height of wind velocity
'h' : None, # [m] Representative height of saltation layer
'k' : 0.001, # [m] Bed roughness
'L' : 100., # [m] Typical length scale of dune feature (perturbation)
'l' : 10., # [m] Inner layer height (perturbation)
'c_b' : 0.2, # [-] Slope at the leeside of the separation bubble # c = 0.2 according to Durán 2010 (Sauermann 2001: c = 0.25 for 14 degrees)
'mu_b' : 30, # [deg] Minimum required slope for the start of flow separation
'buffer_width' : 10, # [m] Width of the bufferzone around the rotational grid for wind perturbation
'sep_filter_iterations' : 0, # [-] Number of filtering iterations on the sep-bubble (0 = no filtering)
'zsep_y_filter' : False, # [-] Boolean for turning on/off the filtering of the separation bubble in y-direction
'Cb' : 1.5, # [-] Constant in bagnold formulation for equilibrium sediment concentration
'Ck' : 2.78, # [-] Constant in kawamura formulation for equilibrium sediment concentration
'Cl' : 6.7, # [-] Constant in lettau formulation for equilibrium sediment concentration
'Cdk' : 5., # [-] Constant in DK formulation for equilibrium sediment concentration
# 'm' : 0.5, # [-] Factor to account for difference between average and maximum shear stress
# 'alpha' : 0.4, # [-] Relation of vertical component of ejection velocity and horizontal velocity difference between impact and ejection
'kappa' : 0.41, # [-] Von Kármán constant
'sigma' : 4.2, # [-] Ratio between basal area and frontal area of roughness elements
'beta' : 130., # [-] Ratio between drag coefficient of roughness elements and bare surface
'bi' : 1., # [-] Bed interaction factor
'T' : 1., # [s] Adaptation time scale in advection equation
'Tdry' : 3600.*1.5, # [s] Adaptation time scale for soil drying
'Tsalt' : 3600.*24.*30., # [s] Adaptation time scale for salinitation
'Tbedreset' : 86400., # [s]
'eps' : 1e-3, # [m] Minimum water depth to consider a cell "flooded"
'gamma' : .5, # [-] Maximum wave height over depth ratio
'xi' : .3, # [-] Surf similarity parameter
'facDOD' : .1, # [-] Ratio between depth of disturbance and local wave height
'csalt' : 35e-3, # [-] Maximum salt concentration in bed surface layer
'cpair' : 1.0035e-3, # [MJ/kg/oC] Specific heat capacity air
'fc' : 0.11, # NEWCH # [-] Moisture content at field capacity (volumetric)
'w1_5' : 0.02, # NEWCH # [-] Moisture content at wilting point (gravimetric)
'resw_moist' : 0.01, # NEWCH # [-] Residual soil moisture content (volumetric)
'satw_moist' : 0.35, # NEWCH # [-] Satiated soil moisture content (volumetric)
'resd_moist' : 0.01, # NEWCH # [-] Residual soil moisture content (volumetric)
'satd_moist' : 0.5, # NEWCH # [-] Satiated soil moisture content (volumetric)
'nw_moist' : 2.3, # NEWCH # [-] Pore-size distribution index in the soil water retention function
'nd_moist' : 4.5, # NEWCH # [-] Pore-size distribution index in the soil water retention function
'mw_moist' : 0.57, # NEWCH # [-] m, van Genucthen param (can be approximated as 1-1/n)
'md_moist' : 0.42, # NEWCH # [-] m, van Genucthen param (can be approximated as 1-1/n)
'alfaw_moist' : -0.070, # NEWCH # [cm^-1] Inverse of the air-entry value for a wetting branch of the soil water retention function (Schmutz, 2014)
'alfad_moist' : -0.035, # NEWCH # [cm^-1] Inverse of the air-entry value for a drying branch of the soil water retention function (Schmutz, 2014)
'thick_moist' : 0.002, # NEWCH # [m] Thickness of surface moisture soil layer
'K_gw' : 0.00078, # NEWCH # [m/s] Hydraulic conductivity (Schmutz, 2014)
'ne_gw' : 0.3, # NEWCH # [-] Effective porosity
'D_gw' : 12, # NEWCH # [m] Aquifer depth
'tfac_gw' : 10, # NEWCH # [-] Reduction factor for time step in ground water calculations
'Cl_gw' : 0.7, # NEWCH # [m] Groundwater overheight due to runup
'in_gw' : 0, # NEWCH # [m] Initial groundwater level
'GW_stat' : 1, # NEWCH # [m] Landward static groundwater boundary (if static boundary is defined)
'theta_dyn' : 33., # [degrees] Initial Dynamic angle of repose, critical dynamic slope for avalanching
'theta_stat' : 34., # [degrees] Initial Static angle of repose, critical static slope for avalanching
'avg_time' : 86400., # [s] Indication of the time period over which the bed level change is averaged for vegetation growth
'gamma_vegshear' : 16., # [-] Roughness factor for the shear stress reduction by vegetation
'hveg_max' : 1., # [m] Max height of vegetation
'dzb_opt' : 0., # [m/year] Sediment burial for optimal growth
'V_ver' : 0., # [m/year] Vertical growth
'V_lat' : 0., # [m/year] Lateral growth
'germinate' : 0., # [1/year] Possibility of germination per year
'lateral' : 0., # [1/year] Posibility of lateral expension per year
'veg_gamma' : 1., # [-] Constant on influence of sediment burial
'veg_sigma' : 0.8, # [-] Sigma in gaussian distrubtion of vegetation cover filter
'sedimentinput' : 0., # [-] Constant boundary sediment influx (only used in solve_pieter)
'scheme' : 'euler_backward', # Name of numerical scheme (euler_forward, euler_backward or crank_nicolson)
'solver' : 'trunk', # Name of the solver (trunk, pieter, steadystate,steadystatepieter)
'boundary_lateral' : 'circular', # Name of lateral boundary conditions (circular, constant ==noflux)
'boundary_offshore' : 'constant', # Name of offshore boundary conditions (flux, constant, uniform, gradient)
'boundary_onshore' : 'gradient', # Name of onshore boundary conditions (flux, constant, uniform, gradient)
'boundary_gw' : 'no_flow', # Landward groundwater boundary, dGw/dx = 0 (or 'static')
'method_moist_threshold' : 'belly_johnson', # Name of method to compute wind velocity threshold based on soil moisture content
'method_moist_process' : 'infiltration', # Name of method to compute soil moisture content(infiltration or surface_moisture)
'offshore_flux' : 0., # [-] Factor to determine offshore boundary flux as a function of Q0 (= 1 for saturated flux , = 0 for noflux)
'constant_offshore_flux' : 0., # [kg/m/s] Constant input flux at offshore boundary
'onshore_flux' : 0., # [-] Factor to determine onshore boundary flux as a function of Q0 (= 1 for saturated flux , = 0 for noflux)
'constant_onshore_flux' : 0., # [kg/m/s] Constant input flux at offshore boundary
'lateral_flux' : 0., # [-] Factor to determine lateral boundary flux as a function of Q0 (= 1 for saturated flux , = 0 for noflux)
'method_transport' : 'bagnold', # Name of method to compute equilibrium sediment transport rate
'method_roughness' : 'constant', # Name of method to compute the roughness height z0, note that here the z0 = k, which does not follow the definition of Nikuradse where z0 = k/30.
'method_grainspeed' : 'windspeed', # Name of method to assume/compute grainspeed (windspeed, duran, constant)
'max_error' : 1e-6, # [-] Maximum error at which to quit iterative solution in implicit numerical schemes
'max_iter' : 1000, # [-] Maximum number of iterations at which to quit iterative solution in implicit numerical schemes
'max_iter_ava' : 1000, # [-] Maximum number of iterations at which to quit iterative solution in avalanching calculation
'refdate' : '2020-01-01 00:00', # [-] Reference datetime in netCDF output
'callback' : None, # Reference to callback function (e.g. example/callback.py':callback)
'wind_convention' : 'nautical', # Convention used for the wind direction in the input files (cartesian or nautical)
'alfa' : 0, # [deg] Real-world grid cell orientation wrt the North (clockwise)
'dune_toe_elevation' : 3, # Choose dune toe elevation, only used in the PH12 dune erosion solver
'beach_slope' : 0.1, # Define the beach slope, only used in the PH12 dune erosion solver
'veg_min_elevation' : 3, # Choose the minimum elevation where vegetation can grow
'vegshear_type' : 'raupach', # Choose the Raupach grid based solver (1D or 2D) or the Okin approach (1D only)
'okin_c1_veg' : 0.48, #x/h spatial reduction factor in Okin model for use with vegetation
'okin_c1_fence' : 0.48, #x/h spatial reduction factor in Okin model for use with sand fence module
'okin_initialred_veg' : 0.32, #initial shear reduction factor in Okin model for use with vegetation
'okin_initialred_fence' : 0.32, #initial shear reduction factor in Okin model for use with sand fence module
'veggrowth_type' : 'orig', #'orig', 'duranmoore14'
'rhoveg_max' : 0.5, #maximum vegetation density, only used in duran and moore 14 formulation
't_veg' : 3, #time scale of vegetation growth (days), only used in duran and moore 14 formulation
'v_gam' : 1, # only used in duran and moore 14 formulation
}
REQUIRED_CONFIG = ['nx', 'ny']
#: Merge initial and model state
MODEL_STATE.update({
(k, MODEL_STATE[k] + INITIAL_STATE[k])
for k in set(MODEL_STATE).intersection(INITIAL_STATE)
}) | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/constants.py | constants.py |
from __future__ import absolute_import, division
import os
import re
import logging
from datetime import datetime
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
# check if netCDF4 is available
try:
import netCDF4
HAVE_NETCDF = True
except ImportError:
HAVE_NETCDF = False
logger.warning('No netCDF4 available, output is disabled')
def initialize(outputfile, outputvars, s, p, dimensions):
'''Create empty CF-compatible netCDF4 output file
Parameters
----------
outputfile : str
Name of netCDF4 output file
outputvars : dictionary
Spatial grids to be written to netCDF4 output file
s : dict
Spatial grids
p : dict
Model configuration parameters
dimensions : dict
Dictionary that specifies a tuple with the named dimensions
for each spatial grid (e.g. ('ny', 'nx', 'nfractions'))
Examples
--------
>>> netcdf.initialize('aeolis.nc',
... ['Ct', 'Cu', 'zb'],
... ['avg', 'max'],
... s, p, {'Ct':('ny','nx','nfractions'),
... 'Cu':('ny','nx','nfractions'),
... 'zb':('ny','nx')})
'''
# abort if netCDF4 is not available
if not HAVE_NETCDF:
return
with netCDF4.Dataset(outputfile, 'w') as nc:
# add dimensions
nc.createDimension('s', p['nx']+1)
nc.createDimension('n', p['ny']+1)
nc.createDimension('time', 0)
nc.createDimension('nv', 2)
nc.createDimension('nv2', 4)
nc.createDimension('layers', p['nlayers'])
nc.createDimension('fractions', p['nfractions'])
# add global attributes
# see http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/formats/DataDiscoveryAttConvention.html
nc.Conventions = 'CF-1.6'
nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'
#nc.featureType = 'grid'
#nc.cdm_data_type = 'grid'
nc.standard_name_vocabulary = 'CF-1.6'
nc.title = ''
nc.summary = ''
nc.source = 'AeoLiS'
nc.id = ''
nc.naming_authority = ''
nc.time_coverage_start = ''
nc.time_coverage_end = ''
nc.time_coverage_resolution = ''
nc.geospatial_lat_min = 0
nc.geospatial_lat_max = 0
nc.geospatial_lat_units = 'degrees_north'
nc.geospatial_lat_resolution = ''
nc.geospatial_lon_min = 0
nc.geospatial_lon_max = 0
nc.geospatial_lon_units = 'degrees_east'
nc.geospatial_lon_resolution = ''
nc.geospatial_vertical_min = 0
nc.geospatial_vertical_max = 0
nc.geospatial_vertical_units = ''
nc.geospatial_vertical_resolution = ''
nc.geospatial_vertical_positive = ''
nc.institution = ''
nc.creator_name = ''
nc.creator_url = ''
nc.creator_email = ''
nc.project = ''
nc.processing_level = ''
nc.references = ''
nc.keywords_vocabulary = 'NASA/GCMD Earth Science Keywords. Version 6.0'
nc.keywords = ''
nc.acknowledgment = ''
nc.comment = ''
nc.contributor_name = ''
nc.contributor_role = ''
nc.date_created = datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%MZ')
nc.date_modified = datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%MZ')
nc.date_issued = datetime.strftime(datetime.utcnow(), '%Y-%m-%dT%H:%MZ')
nc.publisher_name = ''
nc.publisher_email = ''
nc.publisher_url = ''
nc.history = ''
nc.license = ''
nc.metadata_link = '0'
# add variables
nc.createVariable('s', 'float32', (u's'))
nc.variables['s'].long_name = 's-coordinate'
nc.variables['s'].units = '1'
nc.variables['s'].valid_min = -np.inf
nc.variables['s'].valid_max = np.inf
nc.createVariable('n', 'float32', (u'n'))
nc.variables['n'].long_name = 'n-coordinate'
nc.variables['n'].units = '1'
nc.variables['n'].valid_min = -np.inf
nc.variables['n'].valid_max = np.inf
nc.createVariable('x', 'float32', (u'n', u's'))
nc.variables['x'].long_name = 'x-coordinate'
nc.variables['x'].standard_name = 'projection_x_coordinate'
nc.variables['x'].units = 'm'
nc.variables['x'].axis = 'X'
nc.variables['x'].valid_min = -np.inf
nc.variables['x'].valid_max = np.inf
nc.variables['x'].bounds = 'x_bounds'
nc.variables['x'].grid_mapping = 'crs'
nc.createVariable('y', 'float32', (u'n', u's'))
nc.variables['y'].long_name = 'y-coordinate'
nc.variables['y'].standard_name = 'projection_y_coordinate'
nc.variables['y'].units = 'm'
nc.variables['y'].axis = 'Y'
nc.variables['y'].valid_min = -np.inf
nc.variables['y'].valid_max = np.inf
nc.variables['y'].bounds = 'y_bounds'
nc.variables['y'].grid_mapping = 'crs'
nc.createVariable('layers', 'float32', (u'layers',))
nc.variables['layers'].long_name = 'bed layers'
nc.variables['layers'].units = '1'
nc.variables['layers'].valid_min = 0
nc.variables['layers'].valid_max = np.inf
nc.createVariable('fractions', 'float32', (u'fractions',))
nc.variables['fractions'].long_name = 'sediment fractions'
nc.variables['fractions'].units = 'm'
nc.variables['fractions'].valid_min = 0
nc.variables['fractions'].valid_max = np.inf
nc.createVariable('lat', 'float32', (u'n', u's'))
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].standard_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
nc.variables['lat'].valid_min = -np.inf
nc.variables['lat'].valid_max = np.inf
nc.variables['lat'].bounds = 'lat_bounds'
nc.variables['lat'].ancillary_variables = ''
nc.createVariable('lon', 'float32', (u'n', u's'))
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].standard_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lon'].valid_min = -np.inf
nc.variables['lon'].valid_max = np.inf
nc.variables['lon'].bounds = 'lon_bounds'
nc.variables['lon'].ancillary_variables = ''
nc.createVariable('time', 'float64', (u'time',))
nc.variables['time'].long_name = 'time'
nc.variables['time'].standard_name = 'time'
nc.variables['time'].units = 'seconds since %s' % p['refdate']
nc.variables['time'].calendar = 'julian'
nc.variables['time'].axis = 'T'
nc.variables['time'].bounds = 'time_bounds'
#nc.variables['time'].ancillary_variables = ''
nc.createVariable('x_bounds', 'float32', (u's', u'n', u'nv'))
nc.variables['x_bounds'].units = 'm'
nc.variables['x_bounds'].comment = 'x-coordinate values at the upper and lower bounds of each pixel.'
nc.createVariable('y_bounds', 'float32', (u's', u'n', u'nv'))
nc.variables['y_bounds'].units = 'm'
nc.variables['y_bounds'].comment = 'y-coordinate values at the left and right bounds of each pixel.'
nc.createVariable('lat_bounds', 'float32', (u's', u'n', u'nv2'))
nc.variables['lat_bounds'].units = 'degrees_north'
nc.variables['lat_bounds'].comment = 'latitude values at the north and south bounds of each pixel.'
nc.createVariable('lon_bounds', 'float32', (u's', u'n', u'nv2'))
nc.variables['lon_bounds'].units = 'degrees_east'
nc.variables['lon_bounds'].comment = 'longitude values at the west and east bounds of each pixel.'
nc.createVariable('time_bounds', 'float32', (u'time', u'nv'))
nc.variables['time_bounds'].units = 'seconds since %s' % p['refdate']
nc.variables['time_bounds'].comment = 'time bounds for each time value'
meta = parse_metadata(outputvars)
for var0, exts in outputvars.items():
if var0 not in s:
continue
if var0 not in dimensions:
continue
dims = ['time'] + [d[1:] for d in dimensions[var0]]
dims = ['s' if d == 'x' else d for d in dims]
dims = ['n' if d == 'y' else d for d in dims]
for ext in exts:
if ext is None:
var = var0
else:
var = '%s_%s' % (var0, ext)
nc.createVariable(var, 'float32', dims)
nc.variables[var].long_name = var
#nc.variables[var].standard_name = 'sea_surface_height_above_mean_sea_level'
nc.variables[var].scale_factor = 1.0
nc.variables[var].add_offset = 0.0
nc.variables[var].valid_min = -np.inf
nc.variables[var].valid_max = np.inf
nc.variables[var].coordinates = ' '.join(dims)
nc.variables[var].grid_mapping = 'crs'
nc.variables[var].source = ''
nc.variables[var].references = ''
#nc.variables[var].cell_methods = ''
#nc.variables[var].ancillary_variables = ''
#nc.variables[var].comment = ''
if meta[var0]['units']:
nc.variables[var].units = meta[var0]['units']
nc.createVariable('crs', 'int32', ())
nc.variables['crs'].grid_mapping_name = 'stereographic'
nc.variables['crs'].epsg_code = 'EPSG:28992'
nc.variables['crs'].semi_major_axis = 6377397.155
nc.variables['crs'].semi_minor_axis = 6356078.96282
nc.variables['crs'].inverse_flattening = 299.1528128
nc.variables['crs'].latitude_of_projection_origin = 52.0922178
nc.variables['crs'].longitude_of_projection_origin = 5.23155
nc.variables['crs'].scale_factor_at_projection_origin = 0.9999079
nc.variables['crs'].false_easting = 155000.0
nc.variables['crs'].false_northing = 463000.0
nc.variables['crs'].proj4_params = '+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.999908 +x_0=155000 +y_0=463000 +ellps=bessel +units=m +towgs84=565.4174,50.3319,465.5542,-0.398957388243134,0.343987817378283,-1.87740163998045,4.0725 +no_defs'
# store static data
nc.variables['s'][:] = np.arange(p['nx']+1)
nc.variables['n'][:] = np.arange(p['ny']+1)
nc.variables['x'][:,:] = s['x']
nc.variables['y'][:,:] = s['y']
nc.variables['layers'][:] = np.arange(p['nlayers'])
nc.variables['fractions'][:] = p['grain_size']
nc.variables['lat'][:,:] = 0.
nc.variables['lon'][:,:] = 0.
nc.variables['x_bounds'][:,:] = 0.
nc.variables['y_bounds'][:,:] = 0.
nc.variables['lat_bounds'][:,:] = 0.
nc.variables['lon_bounds'][:,:] = 0.
# store model settings writing arrays as attributes is not supported anymore
if 0:
for k, v in p.items():
if k.startswith('_'):
continue
k = 'par_%s' % k
if v is None:
nc.setncattr(k, -1)
elif isinstance(v, bool):
nc.setncattr(k, int(v))
else:
nc.setncattr(k, np.real(v))
def append(outputfile, variables):
'''Append variables to existing netCDF4 output file
Increments the time axis length with one and appends the provided
spatial grids along the time axis. The ``variables`` dictionary
should at least have the ``time`` field indicating the current
simulation time. The CF time bounds are updated accordingly.
Parameters
----------
outputfile : str
Name of netCDF4 output file
variables : dict
Dictionary with spatial grids and time
Examples
--------
>>> netcdf.append('aeolis.nc', {'time', 3600.,
... 'Ct', np.array([[0.,0., ... ,0.]]),
... 'Cu', np.array([[1.,1., ... ,1.]]))
See Also
--------
set_bounds
'''
# abort if netCDF4 is not available
if not HAVE_NETCDF:
return
with netCDF4.Dataset(outputfile, 'a') as nc:
i = nc.variables['time'].shape[0]
nc.variables['time'][i] = variables['time']
for k, v in variables.items():
if k == 'time':
continue
nc.variables[k][i,...] = v
set_bounds(outputfile)
def set_bounds(outputfile):
'''Sets CF time bounds
Parameters
----------
outputfile : str
Name of netCDF4 output file
'''
# abort if netCDF4 is not available
if not HAVE_NETCDF:
return
with netCDF4.Dataset(outputfile, 'a') as nc:
i = nc.variables['time'].shape[0] - 1
nc.variables['time_bounds'][i,0] = 0 if i == 0 else nc.variables['time'][i-1]
nc.variables['time_bounds'][i,1] = nc.variables['time'][i]
def dump(outputfile, dumpfile, var='mass', ix=-1):
'''Dumps time slice from netCDF4 output file to ASCII file
This function can be used to use a specific time slice from a
netCDF4 output file as input file for another AeoLiS model
run. For example, the bed composition from a spinup run can be
used as initial composition for other runs reducing the spinup
time.
Parameters
----------
outputfile : str
Name of netCDF4 output file
dumpfile : str
Name of ASCII dump file
var : str, optional
Name of spatial grid to be dumped (default: mass)
ix : int
Time slice index to be dumped (default: -1)
Examples
--------
>>> # use bedcomp_file = bedcomp.txt in model configuration file
... netcdf.dump('aeolis.nc', 'bedcomp.txt', var='mass')
'''
# abort if netCDF4 is not available
if not HAVE_NETCDF:
return
with netCDF4.Dataset(outputfile, 'r') as ds:
m = ds.variables[var][ix,...]
np.savetxt(dumpfile, m.reshape((-1, m.shape[1])))
def parse_metadata(outputvars):
'''Parse metadata from constants.py
Parses the Python comments in constants.py to extract meta data,
like units, for the model state variables that can be used as
netCDF4 meta data.
Parameters
----------
outputvars : dictionary
Spatial grids to be written to netCDF4 output file
Returns
-------
meta : dict
Dictionary with meta data for the output variables
'''
pyfile = os.path.join(os.path.split(__file__)[0], 'constants.py')
meta = {var:{'units':None} for var in outputvars.keys()}
if os.path.exists(pyfile):
with open(pyfile, 'r') as fp:
for line in fp:
m = re.match('^\s*\'(.*)\',\s*#\s*\[(.*)\]', line)
if m:
var, units = m.groups()
if var in meta.keys():
if units == '-':
meta[var]['units'] = '1'
else:
meta[var]['units'] = units
return meta | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/netcdf.py | netcdf.py |
from __future__ import absolute_import, division
import logging
import numpy as np
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s, p):
'''EXPLAIN WHAT HAPPENS IN THIS FUNCTION?
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# initialize x-dimensions
s['x'][:,:] = p['xgrid_file']
# # Initializing all other arrays
# s['xz'] = np.zeros(np.shape(s['x']))
# s['xu'] = np.zeros(np.shape(s['x']))
# s['xv'] = np.zeros(np.shape(s['x']))
# s['xc'] = np.zeros(np.shape(s['x']))
# s['yz'] = np.zeros(np.shape(s['x']))
# s['yu'] = np.zeros(np.shape(s['x']))
# s['yv'] = np.zeros(np.shape(s['x']))
# s['yc'] = np.zeros(np.shape(s['x']))
# s['dsz'] = np.zeros(np.shape(s['x']))
# s['dsu'] = np.zeros(np.shape(s['x']))
# s['dsv'] = np.zeros(np.shape(s['x']))
# s['dsc'] = np.zeros(np.shape(s['x']))
# s['dnz'] = np.zeros(np.shape(s['x']))
# s['dnu'] = np.zeros(np.shape(s['x']))
# s['dnv'] = np.zeros(np.shape(s['x']))
# s['dnc'] = np.zeros(np.shape(s['x']))
# s['dsdnz'] = np.zeros(np.shape(s['x']))
# s['dsdnzi'] = np.zeros(np.shape(s['x']))
# s['alfaz'] = np.zeros(np.shape(s['x']))
# s['alfau'] = np.zeros(np.shape(s['x']))
# s['alfav'] = np.zeros(np.shape(s['x']))
# # World coordinates of z-points
# s['xz'][:,:] = s['x'][:,:]
# # World coordinates of u-points
# s['xu'][:,1:] = 0.5 * (s['xz'][:,:-1] + s['xz'][:,1:])
# s['xu'][:,0] = 1.5 * s['xz'][:,0] - 0.5 * s['xz'][:,1]
# # World coordinates of v-points
# s['xv'][1:,:] = 0.5 * (s['xz'][:-1,:] + s['xz'][1:,:])
# s['xv'][0,:] = 1.5 * s['xz'][0,:] - 0.5 * s['xz'][1,:]
# # World coordinates of c-points
# s['xc'][1:,1:] = 0.25 *(s['xz'][:-1,:-1] + s['xz'][:-1,1:] + s['xz'][1:,:-1] + s['xz'][1:,1:])
# s['xc'][1:,0] = 0.5 * (s['xu'][:-1,0] + s['xu'][1:,0])
# s['xc'][0,1:] = 0.5 * (s['xv'][0,:-1] + s['xv'][0,1:])
# s['xc'][0,0] = s['xu'][0,0]
# # initialize y-dimension
ny = p['ny']
if ny == 0:
s['y'][:,:] = 0.
# s['yz'][:,:] = 0.
# s['yu'][:,:] = 0.
# s['yv'][:,:] = 0.
# s['dnz'][:,:] = 1.
# s['dnu'][:,:] = 1.
# s['dnv'][:,:] = 1.
# s['dnc'][:,:] = 1.
# s['alfaz'][:,:] = 0.
else:
# initialize y-dimensions
s['y'][:,:] = p['ygrid_file']
# # World coordinates of z-points
# s['yz'][:,:] = s['y'][:,:] # Different from XBeach
# # World coordinates of u-points
# s['yu'][:,1:] = 0.5 * (s['yz'][:,:-1] + s['yz'][:,1:])
# s['yu'][:,0] = 1.5 * s['yz'][:,0] - 0.5 * s['yz'][:,1]
# # World coordinates of v-points
# s['yv'][1:,:] = 0.5 * (s['yz'][:-1,:] + s['yz'][1:,:])
# s['yv'][0,:] = 1.5 * s['yz'][0,:] - 0.5 * s['yz'][1,:]
# # World coordinates of c-points
# s['yc'][1:,1:] = 0.25 *(s['yz'][:-1,:-1] + s['yz'][:-1,1:] + s['yz'][1:,:-1] + s['yz'][1:,1:])
# s['yc'][0,1:] = 0.5 * (s['yv'][0,:-1] + s['yv'][0,1:])
# s['yc'][1:,0] = 0.5 * (s['yu'][:-1,0] + s['yu'][1:,0])
# s['yc'][0,0] = s['yv'][0,0]
# # Distances in n-direction
# s['dnz'][:-1,:] = ((s['yv'][:-1,:]-s['yv'][1:,:])**2.+(s['xv'][:-1,:]-s['xv'][1:,:])**2.)**0.5
# s['dnu'][1:,:] = ((s['xc'][:-1,:]-s['xc'][1:,:])**2.+(s['yc'][:-1,:]-s['yc'][1:,:])**2.)**0.5
# s['dnv'][1:,:] = ((s['xz'][:-1,:]-s['xz'][1:,:])**2.+(s['yz'][:-1,:]-s['yz'][1:,:])**2.)**0.5
# s['dnc'][1:,:] = ((s['xu'][:-1,:]-s['xu'][1:,:])**2.+(s['yu'][:-1,:]-s['yu'][1:,:])**2.)**0.5
# s['dnz'][-1,:] = s['dnz'][-2,:]
# s['dnu'][0,:] = s['dnu'][1,:]
# s['dnv'][0,:] = s['dnv'][1,:]
# s['dnc'][0,:] = s['dnc'][1,:]
# # Distances in s-direction
# s['dsz'][:,:-1] = ((s['xu'][:,:-1]-s['xu'][:,1:])**2.+(s['yu'][:,:-1]-s['yu'][:,1:])**2.)**0.5
# s['dsu'][:,1:] = ((s['xz'][:,:-1]-s['xz'][:,1:])**2.+(s['yz'][:,:-1]-s['yz'][:,1:])**2.)**0.5
# s['dsv'][:,1:] = ((s['xc'][:,:-1]-s['xc'][:,1:])**2.+(s['yc'][:,:-1]-s['yc'][:,1:])**2.)**0.5
# s['dsc'][:,1:] = ((s['xv'][:,:-1]-s['xv'][:,1:])**2.+(s['yv'][:,:-1]-s['yv'][:,1:])**2.)**0.5
# s['dsz'][:,-1] = s['dsz'][:,-2]
# s['dsu'][:,0] = s['dsu'][:,1]
# s['dsv'][:,0] = s['dsv'][:,1]
# s['dsc'][:,0] = s['dsc'][:,1]
# # Cell areas
# s['dsdnz'][:-1,:-1] = (0.5*(s['dsv'][:-1,:-1]+s['dsv'][1:,:-1])) * (0.5*(s['dnu'][:-1,:-1]+s['dnu'][:-1,1:]))
# s['dsdnz'][:-1,-1] = s['dsdnz'][:-1,-2]
# s['dsdnz'][-1,:] = s['dsdnz'][-2,:]
# s['dsdnzi'][:,:] = 1. / s['dsdnz']
# # Alfaz, grid orientation in z-points
# s['alfaz'][:-1,:] = np.arctan2(s['yu'][1:,:] - s['yu'][:-1,:], s['xu'][1:,:] - s['xu'][:-1,:])
# s['alfaz'][-1,:] = s['alfaz'][-2,:]
# # Alfau, grid orientation in u-points
# s['alfau'][1:,:] = np.arctan2(s['yz'][1:,:] - s['yz'][:-1,:], s['xz'][1:,:] - s['xz'][:-1,:])
# s['alfau'][0,:] = s['alfau'][1,:]
# # Alfav, grid orientation in v-points
# s['alfav'][:-1,:] = np.arctan2(s['yc'][1:,:] - s['yc'][:-1,:], s['xc'][1:,:] - s['xc'][:-1,:])
# s['alfav'][-1,:] = s['alfav'][-2,:]
# First compute angle with horizontal
dx = s['x'][0,1] - s['x'][0,0]
dy = s['y'][0,1] - s['y'][0,0]
if dx == 0.:
p['alpha'] = 90.
else:
p['alpha'] = np.rad2deg(np.arctan(dy/dx))
if dx <= 0 and dy <= 0:
p['alpha'] += 180.
# Rotate grids to allign with horizontal
xr, yr = rotate(s['x'], s['y'], p['alpha'], origin=(np.mean(s['x']), np.mean(s['y'])))
# initialize y-dimension
if ny == 0:
s['dn'][:,:] = 1.
s['ds'][:, 1:] = np.diff(s['x'], axis=1)
s['ds'][:, 0] = s['ds'][:, 1]
else:
s['dn'][:,:] = ((yr[0,1]-yr[0,0])**2.+(xr[0,1]-xr[0,0])**2.)**0.5
s['ds'][:,:] = ((xr[1,0]-xr[0,0])**2.+(yr[1,0]-yr[0,0])**2.)**0.5
# compute cell areas
s['dsdn'][:,:] = s['ds'] * s['dn']
s['dsdni'][:,:] = 1. / s['dsdn']
if ny > 0:
dx_test = s['x'][0,1] - s['x'][0,0]
dy_test = s['y'][1,0] - s['y'][0,0]
if (dx_test <= 0.) or (dy_test <= 0.):
logger.warn(format_log('WARNING: After rotation to the horizontal orientation, both x and y should be ascending. Otherwise he solver might produce false results. It is recommended to use the following function: create_grd (see https://github.com/openearth/aeolis-python/blob/AEOLIS_V2/tools/setup/setup_tools.py)'))
return s, p | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/gridparams.py | gridparams.py |
from __future__ import absolute_import, division
import os
import docopt
import logging
import numpy as np
from aeolis.model import AeoLiSRunner, WindGenerator
#class StreamFormatter(logging.Formatter):
#
# def format(self, record):
# if record.levelname == 'INFO':
# return record.getMessage()
# else:
# return '%s: %s' % (record.levelname, record.getMessage())
def aeolis():
'''aeolis : a process-based model for simulating supply-limited aeolian sediment transport
Usage:
aeolis <config> [options]
Positional arguments:
config configuration file
Options:
-h, --help show this help message and exit
--callback=FUNC reference to callback function (e.g. example/callback.py:callback)
--restart=FILE model restart file
--verbose=LEVEL logging verbosity [default: 20]
--debug write debug logs
'''
print_license()
arguments = docopt.docopt(aeolis.__doc__)
logger = logging.getLogger('aeolis')
# start model
model = AeoLiSRunner(configfile=arguments['<config>'])
model.run(callback=arguments['--callback'],
restartfile=arguments['--restart'])
def wind():
'''aeolis-wind : a wind time series generation tool for the aeolis model
Usage:
aeolis-wind <file> [--mean=MEAN] [--max=MAX] [--duration=DURATION] [--timestep=TIMESTEP]
Positional arguments:
file output file
Options:
-h, --help show this help message and exit
--mean=MEAN mean wind speed [default: 10]
--max=MAX maximum wind speed [default: 30]
--duration=DURATION duration of time series [default: 3600]
--timestep=TIMESTEP timestep of time series [default: 60]
'''
print_license()
arguments = docopt.docopt(wind.__doc__)
# create random wind time series
generator = WindGenerator(mean_speed=float(arguments['--mean']),
max_speed=float(arguments['--max']),
dt=float(arguments['--timestep']))
generator.generate(duration=float(arguments['--duration']))
generator.write_time_series(arguments['<file>'])
u = generator.get_time_series()[1]
fmt = '%-4s : %6.3f m/s'
print(fmt % ('min', np.min(u)))
print(fmt % ('mean', np.mean(u)))
print(fmt % ('max', np.max(u)))
def print_license():
print('AeoLiS Copyright (C) 2015 Bas Hoonhout')
print('This program comes with ABSOLUTELY NO WARRANTY.')
print('This is free software, and you are welcome to redistribute it')
print('under certain conditions; See LICENSE.txt for details.')
print('')
if __name__ == '__main__':
aeolis() | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/console.py | console.py |
import logging
import numpy as np
import scipy.special
import scipy.interpolate
from scipy import ndimage, misc
#import matplotlib
import matplotlib.pyplot as plt
import os
#import scipy.interpolate as spint
#import scipy.spatial.qhull as qhull
import time
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
class WindShear:
'''Class for computation of 2DH wind shear perturbations over a topography.
The class implements a 2D FFT solution to the wind shear
perturbation on curvilinear grids. As the FFT solution is only
defined on an equidistant rectilinear grid with circular boundary
conditions that is aligned with the wind direction, a rotating
computational grid is automatically defined for the computation.
The computational grid is extended in all directions using a
logistic sigmoid function as to ensure full coverage of the input
grid for all wind directions, circular boundaries and preservation
of the alongshore uniformity. An extra buffer distance can be
used as to minimize the disturbence from the borders in the input
grid. The results are interpolated back to the input grid when
necessary.
Frequencies related to wave lengths smaller than a computational
grid cell are filtered from the 2D spectrum of the topography
using a logistic sigmoid tapering. The filtering aims to minimize
the disturbance as a result of discontinuities in the topography
that may physically exists, but cannot be solved for in the
computational grid used.
Example
-------
>>> w = WindShear(x, y, z)
>>> w(u0=10., udir=30.).add_shear(taux, tauy)
Notes
-----
To do:
* Actual resulting values are still to be compared with the results
from Kroy et al. (2002)
* Grid interpolation can still be optimized
* Separation bubble is still to be improved
'''
igrid = {}
cgrid = {}
istransect = False
def __init__(self, x, y, z, dx, dy, L, l, z0,
buffer_width, buffer_relaxation=None):
'''Class initialization
Parameters
----------
x : numpy.ndarray
2D array with x-coordinates of input grid
y : numpy.ndarray
2D array with y-coordinates of input grid
z : numpy.ndarray
2D array with topography of input grid
dx : float, optional
Grid spacing in x dimension of computational grid
(default: 1)
dy : float, optional
Grid spacing of y dimension of computational grid
(default: 1)
buffer_width : float, optional
Width of buffer distance between input grid boundary and
computational grid boundary (default: 100)
buffer_relaxation : float, optional
Relaxation of topography in buffer from input grid
boundary to computational grid boundary (default:
buffer_width / 4)
L : float, optional
Length scale of topographic features (default: 100)
l : float, optional
Height of inner layer (default: 10)
z0 : float, optional
Aerodynamic roughness (default: .001)
'''
if buffer_relaxation is None:
buffer_relaxation = buffer_width / 4.
if z.shape[0] == 1:
self.istransect = True
# Assigning values to original (i) and computational (c) grid
self.cgrid = dict(dx = dx, dy = dy)
# Setting buffer settings
self.buffer_width = buffer_width
self.buffer_relaxation = buffer_relaxation
# Setting shear perturbation settings
self.L = L
self.l = l
self.z0 = z0
def __call__(self, x, y, z, taux, tauy, u0, udir,
process_separation, c, mu_b,
taus0, taun0, sep_filter_iterations, zsep_y_filter,
plot=False):
'''Compute wind shear for given wind speed and direction
Parameters
----------
u0 : float
Free-flow wind speed
udir : float
Wind direction in degrees
process_separattion :
'''
# Reload x and y because of horizontalized input-grid
self.igrid = dict(x=x, y=y, z=z, taux=taux, tauy=tauy)
# Convert to cartesian to perform all the rotations
u_angle = 270. - udir # wind angle
if plot:
fig, axs = plt.subplots(2, 3, figsize=(16, 9))
self.plot(ax=axs[0,0], cmap='Reds', stride=10, computational_grid=False)
axs[0,0].set_title('Original input grid')
# =====================================================================
# Creating, rotating and filling computational grid
# =====================================================================
# Creating the computational grid
self.set_computational_grid(udir)
# Storing computational (c) and original (i) grids
gi = self.igrid # initial grid
gc = self.cgrid # computational grid
# Rotate computational (c) grid to the current wind direction
gc['x'], gc['y'] = self.rotate(gc['xi'], gc['yi'], -u_angle, origin=(self.x0, self.y0))
# =====================================================================
# Filling the computational grid with bedlevel and shear stress
# =====================================================================
# For now turned off because caused problems.
# Just normal extrapolation applied
# xi_buff, yi_buff, zi_buff = self.buffer_original_grid()
# gc['z'] = self.interpolate(xi_buff, yi_buff, zi_buff, gc['x'], gc['y'], 0)
# Interpolate bed levels and shear to the computational grid
gc['z'] = self.interpolate(gi['x'], gi['y'], gi['z'], gc['x'], gc['y'], 0)
# Project the taus0 and taun0 on the computational grid
gc['taux'] = np.full(np.shape(gc['x']), taus0)
gc['tauy'] = np.full(np.shape(gc['x']), taun0)
if plot:
self.plot(ax=axs[0,1], cmap='Reds', stride=10, computational_grid=True)
axs[0,1].set_title('Interpolated values on computational grid')
# =====================================================================
# Rotating x, y and taux, tauy to horizontal position
# =====================================================================
gc['x'], gc['y'] = self.rotate(gc['x'], gc['y'], u_angle, origin=(self.x0, self.y0))
gi['x'], gi['y'] = self.rotate(gi['x'], gi['y'], u_angle, origin=(self.x0, self.y0))
gc['taux'], gc['tauy'] = self.rotate(gc['taux'], gc['tauy'], u_angle)
if plot:
self.plot(ax=axs[0,2], cmap='Reds', stride=10, computational_grid=True)
axs[0,2].set_title('Interpolated values on computational grid')
# =====================================================================
# Computing bubble and add it the bedlevel for shear perturbation.
# Afterwards, computing the change in shear stress (dtaux and dtauy),
# rotate to horizontal computational grid and add to tau0
# =====================================================================
# Compute separation bubble
if process_separation:
zsep = self.separation(c, mu_b, sep_filter_iterations, zsep_y_filter)
z_origin = gc['z'].copy()
gc['z'] = np.maximum(gc['z'], zsep)
# Compute wind shear stresses on computational grid
self.compute_shear(u0, nfilter=(1., 2.))
# Add shear
self.add_shear()
# Prevent negatives in x-direction (wind-direction)
gc['taux'] = np.maximum(gc['taux'], 0.)
# Compute the influence of the separation on the shear stress
if process_separation:
gc['hsep'] = gc['z'] - z_origin
self.separation_shear(gc['hsep'])
if plot:
tau_plot = np.hypot(gc['taux'], gc['tauy'])
pc = axs[1,0].pcolormesh(gc['x'], gc['y'], tau_plot)
plt.colorbar(pc, ax=axs[1,0])
axs[1,0].set_title('Rotate grids, such that computational is horizontal')
# =====================================================================
# Rotating x, y and taux, tauy to original orientation
# =====================================================================
gc['x'], gc['y'] = self.rotate(gc['x'], gc['y'], -u_angle, origin=(self.x0, self.y0))
gi['x'], gi['y'] = self.rotate(gi['x'], gi['y'], -u_angle, origin=(self.x0, self.y0))
gc['taux'], gc['tauy'] = self.rotate(gc['taux'], gc['tauy'], -u_angle)
# =====================================================================
# Interpolation from the computational grid back to the original
# =====================================================================
# Interpolate wind shear results to real grid
gi['taux'] = self.interpolate(gc['x'], gc['y'], gc['taux'], gi['x'], gi['y'], taus0)
gi['tauy'] = self.interpolate(gc['x'], gc['y'], gc['tauy'], gi['x'], gi['y'], taun0)
if process_separation:
gi['hsep'] = self.interpolate(gc['x'], gc['y'], gc['hsep'], gi['x'], gi['y'], 0. )
# Final plots and lay-out
if plot:
tau_plot = np.hypot(gi['taux'], gi['tauy'])
pc = axs[1,1].pcolormesh(gi['x'], gi['y'], tau_plot)
plt.colorbar(pc, ax=axs[1,1])
axs[1,1].set_title('Interpolate back onto original grid')
self.plot(ax=axs[1,2], cmap='Reds', stride=10, computational_grid=False)
axs[1,2].set_title('Rotate original grid back')
for axr in axs:
for ax in axr:
ax.set_xlim([-400, 400])
ax.set_ylim([-400, 400])
ax.set_aspect('equal')
# Create plotting folder
os.getcwd()
fig_path = os.path.join(os.getcwd(), 'plots')
if not os.path.exists(fig_path):
os.makedirs(fig_path)
fig_name = 'udir_' + str(int(udir)) + '.png'
plt.savefig(os.path.join(fig_path, fig_name), dpi=200)
plt.close('all')
return self
# Input functions for __call()
def set_computational_grid(self, udir):
'''Define computational grid
The computational grid is square with dimensions equal to the
diagonal of the bounding box of the input grid, plus twice the
buffer width.
'''
# Copying the original (i) and computational (c) grid
gi = self.igrid
gc = self.cgrid
# Compute grid center, same for both original (i) and computational (c) grid
x0, y0 = np.mean(gi['x']), np.mean(gi['y'])
# Initialization
b_W = np.zeros(4)
b_L = np.zeros(4)
xcorner = np.zeros(4)
ycorner = np.zeros(4)
# Computing the corner-points of the grid
xcorner[0] = gi['x'][0, 0]
ycorner[0] = gi['y'][0, 0]
xcorner[1] = gi['x'][-1, 0]
ycorner[1] = gi['y'][-1, 0]
xcorner[2] = gi['x'][0, -1]
ycorner[2] = gi['y'][0, -1]
xcorner[3] = gi['x'][-1, -1]
ycorner[3] = gi['y'][-1, -1]
# Preventing vertical lines
udir_verticals = np.arange(-1080, 1080, 90)
udir_vertical_bool = False
for udir_vertical in udir_verticals:
if (abs(udir - udir_vertical) <= 0.001):
udir_vertical_bool = True
if udir_vertical_bool:
udir -= 0.1
# Compute slope (m) and intercept (b) from parallel lines along all (4) grids corners
for i in range(4):
# Parallel boundaries
m_W, b_W[i] = np.polyfit([xcorner[i], xcorner[i] - np.sin(np.deg2rad(udir))],
[ycorner[i], ycorner[i] - np.cos(np.deg2rad(udir))], 1)
# Perpendicular boundaries
m_L, b_L[i] = np.polyfit([xcorner[i], xcorner[i] - np.sin(np.deg2rad(udir-90.))],
[ycorner[i], ycorner[i] - np.cos(np.deg2rad(udir-90.))], 1)
# Determine the most outer boundaries (for parallel and perpendicular)
db_W = self.maxDiff(b_W)
db_L = self.maxDiff(b_L)
# Compute the distance between the outer boundaries to determine the width (W) and length (L) of the grid
self.Width = abs(db_W) / np.sqrt((m_W**2.) + 1) + self.buffer_width * 2.
self.Length = abs(db_L) / np.sqrt((m_L**2.) + 1) + self.buffer_width * 2.
# Create the grid
xc, yc = self.get_exact_grid(x0 - self.Length/2., x0 + self.Length/2.,
y0 - self.Width/2., y0 + self.Width/2.,
gc['dx'], gc['dy'])
# Storing grid parameters
self.x0 = x0
self.y0 = y0
gc['xi'] = xc
gc['yi'] = yc
return self
def separation(self, c, mu_b, sep_filter_iterations, zsep_y_filter):
# Initialize grid and bed dimensions
gc = self.cgrid
x = gc['x']
y = gc['y']
z = gc['z']
nx = len(gc['z'][1])
ny = len(gc['z'][0])
dx = gc['dx']
dy = gc['dy']
# Initialize arrays
dzx = np.zeros(gc['z'].shape)
dzdx0 = np.zeros(gc['z'].shape)
dzdx1 = np.zeros(gc['z'].shape)
stall = np.zeros(gc['z'].shape)
bubble = np.zeros(gc['z'].shape)
k = np.array(range(0, nx))
zsep = np.zeros(z.shape) # total separation bubble
zsep_new = np.zeros(z.shape) # first-oder separation bubble surface
zfft = np.zeros((ny,nx), dtype=complex)
# Compute bed slope angle in x-dir
dzx[:,:-2] = np.rad2deg(np.arctan((z[:,2:]-z[:,:-2])/(2.*dx)))
dzx[:,-2] = dzx[:,-3]
dzx[:,-1] = dzx[:,-2]
# Determine location of separation bubbles
'''Separation bubble exist if bed slope angle (lee side)
is larger than max angle that wind stream lines can
follow behind an obstacle (mu_b = ..)'''
stall += np.logical_and(abs(dzx) > mu_b, dzx < 0.)
stall[:,1:-1] += np.logical_and(stall[:,1:-1]==0, stall[:,:-2]>0., stall[:,2:]>0.)
# Define separation bubble
bubble[:,:-1] = (stall[:,:-1] == 0.) * (stall[:,1:] > 0.)
# Better solution for cleaner separation bubble, but no working Barchan dune (yet)
p = 1
bubble[:,p:] = bubble[:,:-p]
bubble[:,-p:] = 0
bubble = bubble.astype(int)
# Count separation bubbles
n = np.sum(bubble)
bubble_n = np.asarray(np.where(bubble == True)).T
# Walk through all separation bubbles and determine polynoms
j = 9999
for k in range(0, n):
i = bubble_n[k,1]
j = bubble_n[k,0]
#Bart: check for negative wind direction
if np.sum(gc['taux']) >= 0:
idir = 1
else:
idir = -1
ix_neg = (dzx[j, i+idir*5:] >= 0) # i + 5??
if np.sum(ix_neg) == 0:
zbrink = z[j,i] # z level of brink at z(x0)
else:
zbrink = z[j,i] - z[j,i+idir*5+idir*np.where(ix_neg)[0][0]]
# Better solution and cleaner separation bubble, but no working Barchan dune (yet)
dzdx0 = (z[j,i] - z[j,i-3]) / (3.*dx)
a = dzdx0 / c
ls = np.minimum(np.maximum((3.*zbrink/(2.*c) * (1. + a/4. + a**2/8.)), 0.1), 200.)
a2 = -3 * zbrink/ls**2 - 2 * dzdx0 / ls
a3 = 2 * zbrink/ls**3 + dzdx0 / ls**2
i_max = min(i+int(ls/dx)+1,int(nx-1))
if idir == 1:
xs = x[j,i:i_max] - x[j,i]
else:
xs = -(x[j,i:i_max] - x[j,i])
zsep_new[j,i:i_max] = (a3*xs**3 + a2*xs**2 + dzdx0*xs + z[j,i])
# Choose maximum of bedlevel, previous zseps and new zseps
zsep[j,:] = np.maximum.reduce([z[j,:], zsep[j,:], zsep_new[j,:]])
for filter_iter in range(sep_filter_iterations):
zsep_new = np.zeros(zsep.shape)
Cut = 1.5
dk = 2.0 * np.pi / (np.max(x))
zfft[j,:] = np.fft.fft(zsep[j,:])
zfft[j,:] *= np.exp(-(dk*k*dx)**2/(2.*Cut**2))
zsep_fft = np.real(np.fft.ifft(zfft[j,:]))
if np.sum(ix_neg) == 0:
zbrink = zsep_fft[i]
else:
zbrink = zsep_fft[i] - zsep_fft[i+idir*5+idir*np.where(ix_neg)[0][0]]
# First order polynom
dzdx1 = (zsep_fft[i] - zsep_fft[i-3])/(3.*dx)
a = dzdx1 / c
ls = np.minimum(np.maximum((3.*zbrink/(2.*c) * (1. + a/4. + a**2/8.)), 0.1), 200.)
a2 = -3 * zbrink/ls**2 - 2 * dzdx1 / ls
a3 = 2 * zbrink/ls**3 + dzdx1 / ls**2
i_max1 = min(i+idir*int(ls/dx),int(nx-1))
if idir == 1:
xs1 = x[j,i:i_max1] - x[j,i]
else:
xs1 = -(x[j,i:i_max1] - x[j,i])
zsep_new[j, i:i_max1] = (a3*xs1**3 + a2*xs1**2 + dzdx1*xs1 + zbrink)
# Pick the maximum seperation bubble hieght at all locations
zsep[j,:] = np.maximum.reduce([z[j,:], zsep[j,:], zsep_new[j,:]])
# Smooth surface of separation bubbles over y direction
if zsep_y_filter:
zsep = ndimage.gaussian_filter1d(zsep, sigma=0.2, axis=0)
#Correct for any seperation bubbles that are below the bed surface following smoothing
ilow = zsep < z
zsep[ilow] = z[ilow]
return zsep
def compute_shear(self, u0, nfilter=(1., 2.)):
'''Compute wind shear perturbation for given free-flow wind
speed on computational grid
Parameters
----------
u0 : float
Free-flow wind speed
nfilter : 2-tuple
Wavenumber range used for logistic sigmoid filter. See
:func:`filter_highfrequencies`
'''
gc = self.cgrid
gi = self.igrid # initial grid
if u0 == 0.:
self.cgrid['dtaux'] = np.zeros(gc['z'].shape)
self.cgrid['dtauy'] = np.zeros(gc['z'].shape)
return
ny, nx = gc['z'].shape
kx, ky = np.meshgrid(2. * np.pi * np.fft.fftfreq(nx+1, gc['dx'])[1:],
2. * np.pi * np.fft.fftfreq(ny+1, gc['dy'])[1:])
hs = np.fft.fft2(gc['z'])
hs = self.filter_highfrequenies(kx, ky, hs, nfilter)
z0 = self.z0 # roughness length which takes into account saltation
L = self.L /4. # typical length scale of the hill (=1/kx) ??
# Inner layer height
l = self.l
# interpolate roughness length z0 to computational grid
if np.size(z0)>1:
z0new = self.interpolate(gi['x'], gi['y'], z0, gc['x'], gc['y'], 0)
else:
z0new = z0
for i in range(5):
l = 2 * 0.41**2 * L /np.log(l/z0new)
# Middle layer height
hm = 1.0
for i in range(5):
hm = L / np.sqrt(np.log(hm/z0new))
# Non-dimensional velocity
ul = np.log(l/z0new) / np.log(hm/z0new)
# Arrays in Fourier
k = np.sqrt(kx**2 + ky**2)
sigma = np.sqrt(1j * L * kx * z0new /l)
time_start_perturbation = time.time()
# Shear stress perturbation
dtaux_t = hs * kx**2 / k * 2 / ul**2 * \
(-1. + (2. * np.log(l/z0new) + k**2/kx**2) * sigma * \
sc_kv(1., 2. * sigma) / sc_kv(0., 2. * sigma))
dtauy_t = hs * kx * ky / k * 2 / ul**2 * \
2. * np.sqrt(2.) * sigma * sc_kv(1., 2. * np.sqrt(2.) * sigma)
gc['dtaux'] = np.real(np.fft.ifft2(dtaux_t))
gc['dtauy'] = np.real(np.fft.ifft2(dtauy_t))
def separation_shear(self, hsep):
'''Reduces the computed wind shear perturbation below the
separation surface to mimic the turbulence effects in the
separation bubble
Parameters
----------
hsep : numpy.ndarray
Height of seperation bubble (in x direction)
'''
tau_sep = 0.5
slope = 0.2 # according to Durán 2010 (Sauermann 2001: c = 0.25 for 14 degrees)
delta = 1./(slope*tau_sep)
zsepdelta = np.minimum(np.maximum(1. - delta * hsep, 0.), 1.)
self.cgrid['taux'] *= zsepdelta
self.cgrid['tauy'] *= zsepdelta
def maxDiff(self, arr):
result = 0
n = len(arr)
# Iterate through all pairs.
for i in range(0,n):
for j in range(i, n):
if (abs(arr[i] - arr[j]) + abs(i - j) > result):
result = abs(arr[i] - arr[j]) + abs(i - j)
return result
def filter_highfrequenies(self, kx, ky, hs, nfilter=(1, 2)):
'''Filter high frequencies from a 2D spectrum
A logistic sigmoid filter is used to taper higher frequencies
from the 2D spectrum. The range over which the sigmoid runs
from 0 to 1 with a precision ``p`` is given by the 2-tuple
``nfilter``. The range is defined as wavenumbers in terms of
gridcells, i.e. a value 1 corresponds to a wave with length
``dx``.
Parameters
----------
kx : numpy.ndarray
Wavenumbers in x-direction
ky : numpy.ndarray
Wavenumbers in y-direction
hs : numpy.ndarray
2D spectrum
nfilter : 2-tuple
Wavenumber range used for logistic sigmoid filter
p : float
Precision of sigmoid range definition
Returns
-------
hs : numpy.ndarray
Filtered 2D spectrum
'''
if nfilter is not None:
n1 = np.min(nfilter)
n2 = np.max(nfilter)
px = 2 * np.pi / self.cgrid['dx'] / np.abs(kx)
py = 2 * np.pi / self.cgrid['dy'] / np.abs(ky)
s1 = n1 / np.log(1. / .01 - 1.)
s2 = -n2 / np.log(1. / .99 - 1.)
f1 = 1. / (1. + np.exp(-(px + n1 - n2) / s1))
f2 = 1. / (1. + np.exp(-(py + n1 - n2) / s2))
hs *= f1 * f2
return hs
def get_shear(self):
'''Returns wind shear perturbation field
Returns
-------
taux : numpy.ndarray
Wind shear perturbation in x-direction
tauy : numpy.ndarray
Wind shear perturbation in y-direction
'''
taux = self.igrid['taux']
tauy = self.igrid['tauy']
return taux, tauy
def add_shear(self):
'''Add wind shear perturbations to a given wind shear field
Parameters
----------
taux : numpy.ndarray
Wind shear in x-direction
tauy : numpy.ndarray
Wind shear in y-direction
Returns
-------
taux : numpy.ndarray
Wind shear including perturbations in x-direction
tauy : numpy.ndarray
Wind shear including perturbations in y-direction
'''
taux = self.cgrid['taux']
tauy = self.cgrid['tauy']
tau = np.sqrt(taux**2 + tauy**2)
ix = tau != 0.
dtaux = self.cgrid['dtaux']
dtauy = self.cgrid['dtauy']
self.cgrid['taux'][ix] = tau[ix] * (taux[ix] / tau[ix] + dtaux[ix])
self.cgrid['tauy'][ix] = tau[ix] * (tauy[ix] / tau[ix] + dtauy[ix])
return self
def get_separation(self):
'''Returns difference in height between z-coordinate of
the separation polynomial and of the bed level
Returns
-------
hsep : numpy.ndarray
Height of seperation bubble
'''
hsep = self.igrid['hsep']
return hsep
def plot(self, ax=None, cmap='Reds', stride=10, computational_grid=False, **kwargs):
'''Plot wind shear perturbation
Parameters
----------
ax : matplotlib.pyplot.Axes, optional
Axes to plot onto
cmap : matplotlib.cm.Colormap or string, optional
Colormap for topography (default: Reds)
stride : int, optional
Stride to apply to wind shear vectors (default: 10)
computational_grid : bool, optional
Plot on computational grid rather than input grid
(default: False)
kwargs : dict
Additional arguments to :func:`matplotlib.pyplot.quiver`
Returns
-------
ax : matplotlib.pyplot.Axes
Axes used for plotting
'''
d = stride
if ax is None:
fig, ax = plt.subplots()
if computational_grid:
g = self.cgrid
else:
g = self.igrid
ax.pcolormesh(g['x'], g['y'], g['z'], cmap=cmap)
ax.quiver(g['x'][::d,::d], g['y'][::d,::d],
g['taux'][::d,::d], g['tauy'][::d,::d], **kwargs)
if computational_grid:
ax.plot(self.get_borders(self.igrid['x']),
self.get_borders(self.igrid['y']), '-k')
return ax
@staticmethod
def get_exact_grid(xmin, xmax, ymin, ymax, dx, dy):
'''Returns a grid with given gridsizes approximately within given bounding box'''
x = np.arange(np.floor(xmin / dx) * dx,
np.ceil(xmax / dx) * dx, dx)
y = np.arange(np.floor(ymin / dy) * dy,
np.ceil(ymax / dy) * dy, dy)
x, y = np.meshgrid(x, y)
return x, y
@staticmethod
def get_borders(x):
'''Returns borders of a grid as one-dimensional array'''
return np.concatenate((x[0,:].T,
x[1:-1,-1],
x[-1,::-1].T,
x[-1:1:-1,0],
x[0,:1]), axis=0)
@staticmethod
def rotate(x, y, alpha, origin=(0,0)):
'''Rotate a matrix over given angle around given origin'''
xr = x - origin[0]
yr = y - origin[1]
a = alpha / 180. * np.pi
R = np.asmatrix([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
xy = np.concatenate((xr.reshape((-1,1)),
yr.reshape((-1,1))), axis=1) * R
return (np.asarray(xy[:,0].reshape(x.shape) + origin[0]),
np.asarray(xy[:,1].reshape(y.shape) + origin[1]))
def interpolate(self, x, y, z, xi, yi, z0):
'''Interpolate one grid to an other'''
# First compute angle with horizontal
dx = x[0,1] - x[0,0]
dy = y[0,1] - y[0,0]
angle = np.rad2deg(np.arctan(dy/dx))
if dx <= 0 and dy<=0:
angle += 180.
# Rotate grids to allign with horizontal
x, y = self.rotate(x, y, angle, origin=(self.x0, self.y0))
xi, yi = self.rotate(xi, yi, angle, origin=(self.x0, self.y0))
# Rotate 180 deg if necessary
if not np.all(sorted(y[:,0]) == y[:,0]) and not np.all(sorted(x[0,:]) == x[0,:]):
x, y = self.rotate(x, y, 180, origin=(self.x0, self.y0))
xi, yi = self.rotate(xi, yi, 180, origin=(self.x0, self.y0))
# Concatenate
xy = np.concatenate((y.reshape((-1,1)),
x.reshape((-1,1))), axis=1)
xyi = np.concatenate((yi.reshape((-1,1)),
xi.reshape((-1,1))), axis=1)
# Interpolate
pad_w = np.maximum(np.shape(x)[0], np.shape(x)[1])
x_pad = np.pad(x, ((pad_w, pad_w), (pad_w, pad_w)), 'reflect', reflect_type='odd')
y_pad = np.pad(y, ((pad_w, pad_w), (pad_w, pad_w)), 'reflect', reflect_type='odd')
z_pad = np.pad(z, ((pad_w, pad_w), (pad_w, pad_w)), 'edge')
if self.istransect:
zi = np.interp(xi.flatten(), x_pad.flatten(), z_pad.flatten()).reshape(xi.shape)
else:
# in the scipy 1.10 version the regular grid interpolator does not work with non c-contigous arrays.
# Here we make a copy as a dirty solution feeding the interpolator with ordered copies
inter = scipy.interpolate.RegularGridInterpolator((y_pad[:,0].copy(order='C'), x_pad[0,:].copy(order='C')), z_pad, bounds_error = False, fill_value = z0)
zi = inter(xyi).reshape(xi.shape)
return zi | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/shear.py | shear.py |
from __future__ import absolute_import, division
import logging
import numpy as np
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def angele_of_repose(s,p):
'''Determine the dynamic and static angle of repose.
Both the critical dynamic and static angle of repose are spatial varying
and depend on surface moisture content and roots of present vegetation
and ....
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# comment Lisa: dependence on moisture content is not yet implemented
# Can we do something with theta dependent on vegetation cover (larger rhoveg = larger theta?)
theta_stat = p['theta_stat']
theta_dyn = p['theta_dyn']
s['theta_stat'] = theta_stat
s['theta_dyn'] = theta_dyn
return s
def avalanche(s, p):
'''Avalanching occurs if bed slopes exceed critical slopes.
Simulates the process of avalanching that is triggered by the exceedence
of a critical static slope ``theta_stat`` by the bed slope. The iteration
stops if the bed slope does not exceed the dynamic critical slope
``theta_dyn``.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
if p['process_avalanche']:
nx = p['nx']+1
ny = p['ny']+1
#parameters
tan_stat = np.tan(np.deg2rad(s['theta_stat']))
tan_dyn = np.tan(np.deg2rad(s['theta_dyn']))
E = 0.2
grad_h_down = np.zeros((ny,nx,4))
flux_down = np.zeros((ny,nx,4))
slope_diff = np.zeros((ny,nx))
grad_h = np.zeros((ny,nx))
max_iter_ava = p['max_iter_ava']
max_grad_h, grad_h, grad_h_down = calc_gradients(s['zb'], nx, ny, s['ds'], s['dn'], s['zne'])
s['gradh'] = grad_h.copy()
initiate_avalanche = (max_grad_h > tan_stat)
if initiate_avalanche:
for i in range(0,max_iter_ava):
grad_h_down *= 0.
flux_down *= 0.
slope_diff *= 0.
grad_h *= 0.
max_grad_h, grad_h, grad_h_down = calc_gradients(s['zb'], nx, ny, s['ds'], s['dn'], s[ 'zne'])
if max_grad_h < tan_dyn:
break
# Calculation of flux
grad_h_nonerod = (s['zb'] - s['zne']) / s['ds'] # HAS TO BE ADJUSTED!
ix = np.logical_and(grad_h > tan_dyn, grad_h_nonerod > 0)
slope_diff[ix] = np.tanh(grad_h[ix]) - np.tanh(0.9*tan_dyn)
ix = grad_h_nonerod < grad_h - tan_dyn
slope_diff[ix] = np.tanh(grad_h_nonerod[ix])
ix = grad_h != 0
if ny == 1:
#1D interpretation
flux_down[:,:,0][ix] = slope_diff[ix] * grad_h_down[:,:,0][ix] / grad_h[ix]
flux_down[:,:,2][ix] = slope_diff[ix] * grad_h_down[:,:,2][ix] / grad_h[ix]
# Calculation of change in bed level
q_in = np.zeros((ny,nx))
q_out = 0.5*np.abs(flux_down[:,:,0]) + 0.5*np.abs(flux_down[:,:,2])
q_in[0,1:-1] = 0.5*(np.maximum(flux_down[0,:-2,0],0.) \
- np.minimum(flux_down[0,2:,0],0.) \
+ np.maximum(flux_down[0,2:,2],0.) \
- np.minimum(flux_down[0,:-2,2],0.))
else:
# 2D interpretation
flux_down[:,:,0][ix] = slope_diff[ix] * grad_h_down[:,:,0][ix] / grad_h[ix]
flux_down[:,:,1][ix] = slope_diff[ix] * grad_h_down[:,:,1][ix] / grad_h[ix]
flux_down[:,:,2][ix] = slope_diff[ix] * grad_h_down[:,:,2][ix] / grad_h[ix]
flux_down[:,:,3][ix] = slope_diff[ix] * grad_h_down[:,:,3][ix] / grad_h[ix]
# Calculation of change in bed level
q_in = np.zeros((ny,nx))
q_out = 0.5*np.abs(flux_down[:,:,0]) + 0.5* np.abs(flux_down[:,:,1]) + 0.5*np.abs(flux_down[:,:,2]) + 0.5* np.abs(flux_down[:,:,3])
q_in[1:-1,1:-1] = 0.5*(np.maximum(flux_down[1:-1,:-2,0],0.) \
- np.minimum(flux_down[1:-1,2:,0],0.) \
+ np.maximum(flux_down[:-2,1:-1,1],0.) \
- np.minimum(flux_down[2:,1:-1,1],0.) \
+ np.maximum(flux_down[1:-1,2:,2],0.) \
- np.minimum(flux_down[1:-1,:-2,2],0.) \
+ np.maximum(flux_down[2:,1:-1,3],0.) \
- np.minimum(flux_down[:-2,1:-1,3],0.))
s['zb'] += E * (q_in - q_out)
return s
def calc_gradients(zb, nx, ny, ds, dn, zne):
'''Calculates the downslope gradients in the bed that are needed for
avalanching module
Parameters
----------
Returns
-------
np.ndarray
Downslope gradients in 4 different directions (nx*ny, 4)
'''
grad_h_down = np.zeros((ny,nx,4))
# Calculation of slope (positive x-direction)
grad_h_down[:,1:-1,0] = zb[:,1:-1] - zb[:,2:]
ix = zb[:,2:] > zb[:,:-2]
grad_h_down[:,1:-1,0][ix] = - (zb[:,1:-1][ix] - zb[:,:-2][ix])
ix = np.logical_and(zb[:,2:]>zb[:,1:-1], zb[:,:-2]>zb[:,1:-1])
grad_h_down[:,1:-1,0][ix] = 0.
# Calculation of slope (positive y-direction)
grad_h_down[1:-1,:,1] = zb[1:-1,:] - zb[2:,:]
ix = zb[2:,:] > zb[:-2,:]
grad_h_down[1:-1,:,1][ix] = - (zb[1:-1,:][ix] - zb[:-2,:][ix])
ix = np.logical_and(zb[2:,:]>zb[1:-1,:], zb[:-2,:]>zb[1:-1,:])
grad_h_down[1:-1,:,1][ix] = 0.
# Calculation of slope (negative x-direction)
grad_h_down[:,1:-1,2] = zb[:,1:-1] - zb[:,:-2]
ix = zb[:,:-2] > zb[:,2:]
grad_h_down[:,1:-1,2][ix] = - (zb[:,1:-1][ix] - zb[:,2:][ix])
ix = np.logical_and(zb[:,:-2]>zb[:,1:-1], zb[:,2:]>zb[:,1:-1])
grad_h_down[:,1:-1,2][ix] = 0.
# Calculation of slope (negative y-direction)
grad_h_down[1:-1,:,3] = zb[1:-1,:] - zb[:-2,:]
ix = zb[:-2,:] > zb[2:,:]
grad_h_down[1:-1,:,3][ix] = - (zb[1:-1,:][ix] - zb[2:,:][ix])
ix = np.logical_and(zb[:-2,:]>zb[1:-1,:], zb[2:,:]>zb[1:-1,:])
grad_h_down[1:-1,:,3][ix] = 0.
if ny == 1:
#1D interpretation
grad_h_down[:,0,:] = 0
grad_h_down[:,-1,:] = 0
else:
# 2D interpretation
grad_h_down[:,0,:] = 0
grad_h_down[:,-1,:] = 0
grad_h_down[0,:,:] = 0
grad_h_down[-1,:,:] = 0
grad_h_down[:,:,0] /= ds
grad_h_down[:,:,1] /= dn
grad_h_down[:,:,2] /= ds
grad_h_down[:,:,3] /= dn
grad_h2 = 0.5*grad_h_down[:,:,0]**2 + 0.5*grad_h_down[:,:,1]**2 + 0.5*grad_h_down[:,:,2]**2 + 0.5*grad_h_down[:,:,3]**2
if 0: #Sierd_com; to be changed in future release
ix = zb < zne + 0.005
grad_h2[ix] = 0.
grad_h = np.sqrt(grad_h2)
max_grad_h = np.max(grad_h)
return max_grad_h, grad_h, grad_h_down | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/avalanching.py | avalanching.py |
from __future__ import absolute_import, division
import logging
import numpy as np
import matplotlib.pyplot as plt
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def duran_grainspeed(s, p):
'''Compute grain speed according to Duran 2007 (p. 42)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# Create each grain fraction
nf = p['nfractions']
d = p['grain_size']
z = s['zb']
x = s['x']
y = s['y']
ny = p['ny']
uw = s['uw']
uws = s['uws']
uwn = s['uwn']
uth = s['uth0'] #TEMP: s['uth'] causes problems around Bc
uth0 = s['uth0']
ustar = s['ustar']
ustars = s['ustars']
ustarn = s['ustarn']
ustar0 = s['ustar0']
rhog = p['rhog']
rhoa = p['rhoa']
srho = rhog/rhoa
A = 0.95
B = 5.12
g = np.repeat(p['g'], nf, axis = 0)
v = np.repeat(p['v'], nf, axis = 0)
kappa = p['kappa']
# Drag coefficient (Duran, 2007 -> Jimenez and Madsen, 2003)
r = 1. # Duran 2007, p. 33
c = 14./(1.+1.4*r)
tv = (v/g**2)**(1/3) # +- 5.38 ms # Andreotti, 2004
lv = (v**2/(p['Aa']**2*g*(srho-1)))**(1/3)
zm = c * uth * tv # characteristic height of the saltation layer +- 20 mm
z0 = d/20. # grain based roughness layer +- 10 mu m - Duran 2007 p.32
z1 = 35. * lv # reference height +- 35 mm
alpha = 0.17 * d / lv
Sstar = d/(4*v)*np.sqrt(g*d*(srho-1.))
Cd = (4/3)*(A+np.sqrt(2*alpha)*B/Sstar)**2
uf = np.sqrt(4/(3*Cd)*(srho-1.)*g*d) # Grain settling velocity - Jimnez and Madsen, 2003
# Initiate arrays
ets = np.zeros(uth.shape)
etn = np.zeros(uth.shape)
ueff = np.zeros(uth.shape)
ueff0 = np.zeros(uth.shape)
ustar0 = np.repeat(ustar0[:,:,np.newaxis], nf, axis=2)
ustar = np.repeat(ustar[:,:,np.newaxis], nf, axis=2)
ustars = np.repeat(ustars[:,:,np.newaxis], nf, axis=2)
ustarn = np.repeat(ustarn[:,:,np.newaxis], nf, axis=2)
uw = np.repeat(uw[:,:,np.newaxis], nf, axis=2)
uws = np.repeat(uws[:,:,np.newaxis], nf, axis=2)
uwn = np.repeat(uwn[:,:,np.newaxis], nf, axis=2)
# Efficient wind velocity (Duran, 2006 - Partelli, 2013)
ueff = (uth0 / kappa) * (np.log(z1 / z0))
ueff0 = (uth0 / kappa) * (np.log(z1 / z0))
ueffmin = ueff
# Surface gradient
dzs = np.zeros(z.shape)
dzn = np.zeros(z.shape)
dzs[:,1:-1] = (z[:,2:]-z[:,:-2])/(x[:,2:]-x[:,:-2])
dzn[1:-1,:] = (z[:-2,:]-z[2:,:])/(y[:-2,:]-y[2:,:])
# Boundaries
if ny > 0:
dzs[:,0] = dzs[:,1]
dzn[0,:] = dzn[1,:]
dzs[:,-1] = dzs[:,-2]
dzn[-1,:] = dzn[-2,:]
dhs = np.repeat(dzs[:,:,np.newaxis], nf, axis = 2)
dhn = np.repeat(dzn[:,:,np.newaxis], nf, axis = 2)
# Wind direction
ets = uws / uw
etn = uwn / uw
ix = (ustar >= 0.05) #(ustar >= uth)
ets[ix] = ustars[ix] / ustar[ix]
etn[ix] = ustarn[ix] / ustar[ix]
Axs = ets + 2*alpha*dhs
Axn = etn + 2*alpha*dhn
Ax = np.hypot(Axs, Axn)
# Compute grain speed
u0 = np.zeros(uth.shape)
us = np.zeros(uth.shape)
un = np.zeros(uth.shape)
u = np.zeros(uth.shape)
for i in range(nf):
# determine ueff for different grainsizes
ix = (ustar[:,:,i] >= uth[:,:,i])#*(ustar[:,:,i] > 0.)
# ueff[ix,i] = (uth[ix,i] / kappa) * (np.log(z1[i] / z0[i]) + 2*(np.sqrt(1+z1[i]/zm[ix,i]*(ustar[ix,i]**2/uth[ix,i]**2-1))-1)) #???
# ueff0[:,:,i] = (uth0[:,:,i] / kappa) * (np.log(z1[i] / z0[i]) + 2*(np.sqrt(1+z1[i]/zm[:,:,i]*(ustar0[:,:,i]**2/uth0[:,:,i]**2-1))-1))
ueff[ix,i] = (uth[ix,i] / kappa) * (np.log(z1[i] / z0[i]) + (z1[i]/zm[ix,i]) * (ustar[ix,i]/uth[ix,i]-1)) # Duran 2007 1.60 p.42
ueff0[:,:,i] = (uth0[:,:,i] / kappa) * (np.log(z1[i] / z0[i]) + (z1[i]/zm[:,:,i]) * (ustar0[:,:,i]/uth[:,:,i]-1)) # Duran 2007 1.60 p.42
ueff[~ix,i] = 0.
# loop over fractions
u0[:,:,i] = (ueff0[:,:,i] - uf[i] / (np.sqrt(2 * alpha[i])))
us[:,:,i] = (ueff[:,:,i] - uf[i] / (np.sqrt(2. * alpha[i]) * Ax[:,:,i])) * ets[:,:,i] \
- (np.sqrt(2*alpha[i]) * uf[i] / Ax[:,:,i]) * dhs[:,:,i]
un[:,:,i] = (ueff[:,:,i] - uf[i] / (np.sqrt(2. * alpha[i]) * Ax[:,:,i])) * etn[:,:,i] \
- (np.sqrt(2*alpha[i]) * uf[i] / Ax[:,:,i]) * dhn[:,:,i]
u[:,:,i] = np.hypot(us[:,:,i], un[:,:,i])
# set the grain velocity to zero inside the separation bubble
ix = (s['zsep'] > s['zb'] + 0.01)
sepspeed = 0.1
us[ix,i] = sepspeed * ets[ix, i]
un[ix,i] = sepspeed * etn[ix, i]
u[:,:,i] = np.hypot(us[:,:,i], un[:,:,i])
return u0, us, un, u
def constant_grainspeed(s, p):
'''Define saltation velocity u [m/s]
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
# Initialize arrays
nf = p['nfractions']
uw = s['uw'].copy()
uws = s['uws'].copy()
uwn = s['uwn'].copy()
ustar = s['ustar'].copy()
ustars = s['ustars'].copy()
ustarn = s['ustarn'].copy()
us = np.zeros(ustar.shape)
un = np.zeros(ustar.shape)
u = np.zeros(ustar.shape)
# u with direction of perturbation theory
uspeed = 1. # m/s
ix = ustar != 0
us[ix] = uspeed * ustars[ix] / ustar[ix]
un[ix] = uspeed * ustarn[ix] / ustar[ix]
# u under the sep bubble
sepspeed = 1.0 #m/s
ix = (ustar == 0.)*(uw != 0.)
us[ix] = sepspeed * uws[ix] / uw[ix]
un[ix] = sepspeed * uwn[ix] / uw[ix]
u = np.hypot(us, un)
s['us'] = us[:,:,np.newaxis].repeat(nf, axis=2)
s['un'] = un[:,:,np.newaxis].repeat(nf, axis=2)
s['u'] = u[:,:,np.newaxis].repeat(nf, axis=2)
return s
def equilibrium(s, p):
'''Compute equilibrium sediment concentration following Bagnold (1937)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
if p['process_transport']:
nf = p['nfractions']
# u via grainvelocity:
if p['method_grainspeed']=='duran':
#the syntax inside grainspeed needs to be cleaned up
u0, us, un, u = duran_grainspeed(s,p)
s['u0'] = u0
s['us'] = us
s['un'] = un
s['u'] = u
elif p['method_grainspeed']=='windspeed':
s['u0'] = s['uw'][:,:,np.newaxis].repeat(nf, axis=2)
s['us'] = s['uws'][:,:,np.newaxis].repeat(nf, axis=2)
s['un'] = s['uwn'][:,:,np.newaxis].repeat(nf, axis=2)
s['u'] = s['uw'][:,:,np.newaxis].repeat(nf, axis=2)
u = s['u']
elif p['method_grainspeed']=='constant':
s = constant_grainspeed(s,p)
u0 = s['u']
us = s['us']
un = s['un']
u = s['u']
ustar = s['ustar'][:,:,np.newaxis].repeat(nf, axis=2)
ustar0 = s['ustar0'][:,:,np.newaxis].repeat(nf, axis=2)
uth = s['uth']
uthf = s['uthf']
uth0 = s['uth0']
rhoa = p['rhoa']
g = p['g']
s['Cu'] = np.zeros(uth.shape)
s['Cuf'] = np.zeros(uth.shape)
ix = (ustar != 0.)*(u != 0.)
if p['method_transport'].lower() == 'bagnold':
s['Cu'][ix] = np.maximum(0., p['Cb'] * rhoa / g * (ustar[ix] - uth[ix])**3 / u[ix])
s['Cuf'][ix] = np.maximum(0., p['Cb'] * rhoa / g * (ustar[ix] - uthf[ix])**3 / u[ix])
s['Cu0'][ix] = np.maximum(0., p['Cb'] * rhoa / g * (ustar0[ix] - uth0[ix])**3 / u[ix])
elif p['method_transport'].lower() == 'kawamura':
s['Cu'][ix] = np.maximum(0., p['Ck'] * rhoa / g * (ustar[ix] + uth[ix])**2 * (ustar[ix] - uth[ix]) / u[ix])
s['Cuf'][ix] = np.maximum(0, p['Ck'] * rhoa / g * (ustar[ix] + uthf[ix])**2 * (ustar[ix] - uthf[ix]) / u[ix])
elif p['method_transport'].lower() == 'lettau':
s['Cu'][ix] = np.maximum(0., p['Cl'] * rhoa / g * ustar[ix]**2 * (ustar[ix] - uth[ix]) / u[ix])
s['Cuf'][ix] = np.maximum(0., p['Cl'] * rhoa / g * ustar[ix]**2 * (ustar[ix] - uthf[ix]) / u[ix])
elif p['method_transport'].lower() == 'dk':
s['Cu'][ix] = np.maximum(0., p['Cdk'] * rhoa / g * 0.8*uth[ix] * (ustar[ix]**2 - (0.8*uth[ix])**2) / u[ix])
s['Cuf'][ix] = np.maximum(0., p['Cdk'] * rhoa / g * 0.8*uthf[ix] * (ustar[ix]**2 - (0.8*uthf[ix])**2) / u[ix])
s['Cu0'][ix] = np.maximum(0., p['Cdk'] * rhoa / g * 0.8*uth0[ix] * (ustar0[ix]**2 - (0.8*uth0[ix])**2) / u[ix])
elif p['method_transport'].lower() == 'sauermann':
alpha_sauermann = 0.35
s['Cu'][ix] = np.maximum(0., 2.* alpha_sauermann * rhoa / g * (ustar[ix]**2 - uth[ix]**2))
s['Cuf'][ix] = np.maximum(0., 2.* alpha_sauermann * rhoa / g * (ustar[ix]**2 - uthf[ix]**2))
s['Cu0'][ix] = np.maximum(0., 2.* alpha_sauermann * rhoa / g * (ustar0[ix]**2 - uth0[ix]**2))
elif p['method_transport'].lower() == 'vanrijn_strypsteen':
s['Cu'][ix] = np.maximum(0., p['Cb'] * rhoa / g * ((ustar[ix])**3 - (uth[ix])**3) / u[ix])
s['Cuf'][ix] = np.maximum(0., p['Cb'] * rhoa / g * ((ustar[ix])**3 - (uth[ix])**3) / u[ix])
s['Cu0'][ix] = np.maximum(0., p['Cb'] * rhoa / g * ((ustar0[ix])**3 - (uth0[ix])**3) / u[ix])
else:
logger.log_and_raise('Unknown transport formulation [%s]' % p['method_transport'], exc=ValueError)
s['Cu'] *= p['accfac']
s['Cuf'] *= p['accfac']
s['Cu0'] *= p['accfac']
return s
def compute_weights(s, p):
'''Compute weights for sediment fractions
Multi-fraction sediment transport needs to weigh the transport of
each sediment fraction to prevent the sediment transport to
increase with an increasing number of sediment fractions. The
weighing is not uniform over all sediment fractions, but depends
on the sediment availibility in the air and the bed and the bed
interaction parameter ``bi``.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
numpy.ndarray
Array with weights for each sediment fraction
'''
w_air = normalize(s['Ct'], s['Cu'])
w_bed = normalize(s['mass'][:,:,0,:], axis=2)
w = (1. - p['bi']) * w_air \
+ (1. - np.minimum(1., (1. - p['bi']) * np.sum(w_air, axis=2, keepdims=True))) * w_bed
w = normalize(w, axis=2)
return w, w_air, w_bed
def renormalize_weights(w, ix):
'''Renormalizes weights for sediment fractions
Renormalizes weights for sediment fractions such that the sum of
all weights is unity. To ensure that the erosion of specific
fractions does not exceed the sediment availibility in the bed,
the normalization only modifies the weights with index equal or
larger than ``ix``.
Parameters
----------
w : numpy.ndarray
Array with weights for each sediment fraction
ix : int
Minimum index to be modified
Returns
-------
numpy.ndarray
Array with weights for each sediment fraction
'''
f = np.sum(w[:,:,:ix], axis=2, keepdims=True)
w[:,:,ix:] = normalize(w[:,:,ix:], axis=2) * (1. - f)
# normalize in case of supply-limitation
# use uniform distribution in case of no supply
w = normalize(w, axis=2, fill=1./w.shape[2])
return w | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/transport.py | transport.py |
from __future__ import absolute_import, division
import numpy as np
import logging
import operator
#import scipy.special
#import scipy.interpolate
from scipy import ndimage, misc
import matplotlib.pyplot as plt
# package modules
import aeolis.shear
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s, p):
'''Initialize wind model
'''
# apply wind direction convention
if isarray(p['wind_file']):
if p['wind_convention'] == 'nautical':
#fix issue associated with longshore winds/divide by zero
ifix = p['wind_file'][:, 2] == 0.
p['wind_file'][ifix, 2] = 0.01
elif p['wind_convention'] == 'cartesian':
#fix issue associated with longshore winds/divide by zero
ifix = p['wind_file'][:, 2] == 270.
p['wind_file'][ifix, 2] = 270.01
p['wind_file'][:,2] = 270.0 - p['wind_file'][:,2]
else:
logger.log_and_raise('Unknown convention: %s'
% p['wind_convention'], exc=ValueError)
# initialize wind shear model (z0 according to Duran much smaller)
# Otherwise no Barchan
z0 = calculate_z0(p, s)
if p['process_shear']:
if p['ny'] > 0:
s['shear'] = aeolis.shear.WindShear(s['x'], s['y'], s['zb'],
dx=p['dx'], dy=p['dy'],
L=p['L'], l=p['l'], z0=z0,
buffer_width=p['buffer_width'])
else:
s['shear'] = np.zeros(s['x'].shape)
return s
def interpolate(s, p, t):
'''Interpolate wind velocity and direction to current time step
Interpolates the wind time series for velocity and direction to
the current time step. The cosine and sine of the direction angle
are interpolated separately to prevent zero-crossing errors. The
wind velocity is decomposed in two grid components based on the
orientation of each individual grid cell. In case of a
one-dimensional model only a single positive component is used.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
t : float
Current time
Returns
-------
dict
Spatial grids
'''
if p['process_wind'] and p['wind_file'] is not None:
uw_t = p['wind_file'][:,0]
uw_s = p['wind_file'][:,1]
uw_d = p['wind_file'][:,2] / 180. * np.pi
s['uw'][:,:] = interp_circular(t, uw_t, uw_s)
s['udir'][:,:] = np.arctan2(interp_circular(t, uw_t, np.sin(uw_d)),
interp_circular(t, uw_t, np.cos(uw_d))) * 180. / np.pi
s['uws'] = - s['uw'] * np.sin((-p['alfa'] + s['udir']) / 180. * np.pi) # alfa [deg] is real world grid cell orientation (clockwise)
s['uwn'] = - s['uw'] * np.cos((-p['alfa'] + s['udir']) / 180. * np.pi)
s['uw'] = np.abs(s['uw'])
# Compute wind shear velocity
kappa = p['kappa']
z = p['z']
z0 = calculate_z0(p, s)
s['ustars'] = s['uws'] * kappa / np.log(z/z0)
s['ustarn'] = s['uwn'] * kappa / np.log(z/z0)
s['ustar'] = np.hypot(s['ustars'], s['ustarn'])
s = velocity_stress(s,p)
s['ustar0'] = s['ustar'].copy()
s['ustars0'] = s['ustar'].copy()
s['ustarn0'] = s['ustar'].copy()
s['tau0'] = s['tau'].copy()
s['taus0'] = s['taus'].copy()
s['taun0'] = s['taun'].copy()
return s
def calculate_z0(p, s):
'''Calculate z0 according to chosen roughness method
The z0 is required for the calculation of the shear velocity. Here, z0
is calculated based on a user-defined method. The constant method defines
the value of z0 as equal to k (z0 = ks). This was implemented to ensure
backward compatibility and does not follow the definition of Nikuradse
(z0 = k / 30). For following the definition of Nikuradse use the method
constant_nikuradse. The mean_grainsize_initial method uses the intial
mean grain size ascribed to the bed (grain_dist and grain_size in the
input file) to calculate the z0. The median_grainsize_adaptive bases the
z0 on the median grain size (D50) in the surface layer in every time step.
The resulting z0 is variable accross the domain (x,y). The
strypsteen_vanrijn method is based on the roughness calculation in their
paper.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
array
z0
'''
if p['method_roughness'] == 'constant':
z0 = p['k'] # Here, the ks (roughness length) is equal to the z0, this method is implemented to assure backward compatibility. Note, this does not follow the definition of z0 = ks /30 by Nikuradse
if p['method_roughness'] == 'constant_nikuradse':
z0 = p['k'] / 30 # This equaion follows the definition of the bed roughness as introduced by Nikuradse
if p['method_roughness'] == 'mean_grainsize_initial': #(based on Nikuradse and Bagnold, 1941), can only be applied in case with uniform grain size and is most applicable to a flat bed
z0 = np.sum(p['grain_size']*p['grain_dist']) / 30.
if p['method_roughness'] == 'mean_grainsize_adaptive': # makes Nikuradse roughness method variable through time and space depending on grain size variations
z0 = calc_mean_grain_size(p, s) / 30.
if p['method_roughness'] == 'median_grainsize_adaptive': # based on Sherman and Greenwood, 1982 - only appropriate for naturally occurring grain size distribution
d50 = calc_grain_size(p, s, 50)
z0 = 2*d50 / 30.
if p['method_roughness'] == 'vanrijn_strypsteen': # based on van Rijn and Strypsteen, 2019; Strypsteen et al., 2021
if len(p['grain_dist']) == 1: # if one grainsize is used the d90 is calculated with the d50
d50 = p['grain_size']
d90 = 2*d50
else:
d50 = calc_grain_size(p, s, 50) #calculate d50 and d90 per cell.
d90 = calc_grain_size(p, s, 90)
ustar_grain_stat = p['kappa'] * (s['uw'] / np.log(30*p['z']/d90))
ustar_th_B = 0.1 * np.sqrt((p['rhog'] - p['rhoa']) / p['rhoa'] * p['g'] * d50) # Note that Aa could be filled in in the spot of 0.1
T = (np.square(ustar_grain_stat) - np.square(ustar_th_B))/np.square(ustar_th_B) # T represents different phases of the transport related to the saltation layer and ripple formation
#T[T < 0] = 0
alpha1 = 15
alpha2 = 1
gamma_r = 1 + 1/T
z0 = (d90 + alpha1 * gamma_r * d50 * np.power(T, alpha2)) / 30
return z0
def shear(s,p):
# Compute shear velocity field (including separation)
if 'shear' in s.keys() and p['process_shear'] and p['ny'] > 0:
s['shear'](x=s['x'], y=s['y'], z=s['zb'],
taux=s['taus'], tauy=s['taun'],
u0=s['uw'][0,0], udir=s['udir'][0,0],
process_separation = p['process_separation'],
c = p['c_b'],
mu_b = p['mu_b'],
taus0 = s['taus0'][0,0], taun0 = s['taun0'][0,0],
sep_filter_iterations=p['sep_filter_iterations'],
zsep_y_filter=p['zsep_y_filter'])
s['taus'], s['taun'] = s['shear'].get_shear()
s['tau'] = np.hypot(s['taus'], s['taun'])
s = stress_velocity(s,p)
# Returns separation surface
if p['process_separation']:
s['hsep'] = s['shear'].get_separation()
s['zsep'] = s['hsep'] + s['zb']
elif p['process_shear'] and p['ny'] == 0: #NTC - Added in 1D only capabilities
s = compute_shear1d(s, p)
s = stress_velocity(s, p)
if p['process_separation']:
zsep = separation1d(s, p)
s['zsep'] = zsep
s['hsep'] = s['zsep'] - s['zb']
tau_sep = 0.5
slope = 0.2 # according to Durán 2010 (Sauermann 2001: c = 0.25 for 14 degrees)
delta = 1. / (slope * tau_sep)
zsepdelta = np.minimum(np.maximum(1. - delta * s['hsep'], 0.), 1.)
s['taus'] *= zsepdelta
s['taun'] *= zsepdelta
s = stress_velocity(s, p)
# if p['process_nelayer']:
# if p['th_nelayer']:
# ustar = s['ustar'].copy()
# ustars = s['ustars'].copy()
# ustarn = s['ustarn'].copy()
# s['zne'][:,:] = p['ne_file']
# ix = s['zb'] <= s['zne']
# s['ustar'][ix] = np.maximum(0., s['ustar'][ix] - (s['zne'][ix]-s['zb'][ix])* (1/p['layer_thickness']) * s['ustar'][ix])
# ix = ustar != 0.
# s['ustars'][ix] = s['ustar'][ix] * (ustars[ix] / ustar[ix])
# s['ustarn'][ix] = s['ustar'][ix] * (ustarn[ix] / ustar[ix])
return s
def velocity_stress(s, p):
s['tau'] = p['rhoa'] * s['ustar'] ** 2
ix = s['ustar'] > 0.
s['taus'][ix] = s['tau'][ix]*s['ustars'][ix]/s['ustar'][ix]
s['taun'][ix] = s['tau'][ix]*s['ustarn'][ix]/s['ustar'][ix]
s['tau'] = np.hypot(s['taus'], s['taun'])
ix = s['ustar'] == 0.
s['taus'][ix] = 0.
s['taun'][ix] = 0.
s['tau'][ix] = 0.
return s
def stress_velocity(s, p):
s['ustar'] = np.sqrt(s['tau'] / p['rhoa'])
ix = s['tau'] > 0.
s['ustars'][ix] = s['ustar'][ix] * s['taus'][ix] / s['tau'][ix]
s['ustarn'][ix] = s['ustar'][ix] * s['taun'][ix] / s['tau'][ix]
ix = s['tau'] == 0.
s['ustar'][ix] = 0.
s['ustars'][ix] = 0.
s['ustarn'][ix] = 0.
return s
def compute_shear1d(s, p):
'''Compute wind shear perturbation for given free-flow wind
speed on computational grid. based on same implementation in Duna'''
tau = s['tau'].copy()
taus = s['taus'].copy()
taun = s['taun'].copy()
ets = np.zeros(s['tau'].shape)
etn = np.zeros(s['tau'].shape)
ix = tau != 0
ets[ix] = taus[ix] / tau[ix]
etn[ix] = taun[ix] / tau[ix]
x = s['x'][0,:]
zb = s['zb'][0,:]
#Bart: check for negative wind direction
if np.sum(taus) < 0:
x = np.flip(x)
zb = np.flip(zb)
dzbdx = np.zeros(x.shape)
tau_over_tau0 = np.zeros(x.shape)
dx = x[1] - x[0]
dx = np.abs(dx)
dzbdx[1:-1] = (zb[2:] - zb[0:-2]) / 2 / dx
nx = x.size - 1
alfa = 3
beta = 1
for i in range(nx + 1):
integ = 0
startval = i - nx
endval = i - 1
for j in np.arange(startval, endval + 1):
if j != 0:
integ = integ + dzbdx[i - j] / (j * np.pi)
tau_over_tau0[i] = alfa * (integ + beta * dzbdx[i]) + 1
tau_over_tau0[i] = np.maximum(tau_over_tau0[i], 0.1)
#should double check this - but i think this is right. duna is in u10, so slightly different
#Bart: check for negative wind direction
if np.sum(taus) < 0:
tau_over_tau0 = np.flip(tau_over_tau0)
s['tau'] = tau * tau_over_tau0
s['taus'] = s['tau'] * ets
s['taun'] = s['tau'] * etn
return s
def separation1d(s, p):
# Initialize grid and bed dimensions
#load relevant input
x = s['x'][0,:]
#x = s['x']
z = s['zb'][0,:]
dx = p['dx']
dy = dx
c = p['c_b']
mu_b = p['mu_b']
nx = np.size(z)
udir = s['udir'][0][0]
#make the grids 2d to utilize same code as in the shear module
ny = 3
#z = np.matlib.repmat(z, ny, 1)
z = np.tile(z, [ny, 1])
if udir < 360:
udir = udir + 360
if udir > 360:
udir = udir - 360
if udir > 180 and udir < 360:
udir = np.abs(udir-270)
dx = dx / np.cos(udir * np.pi / 180)
dy = dx
direction = 1
elif udir == 180:
dx = 0.0001
direction = 1
elif udir == 360:
dx = 0.0001
direction = 1
else:
udir = np.abs(udir-90)
dx = dx / np.cos(udir * np.pi / 180)
dy = dx
direction = 2
x = np.tile(x, [ny, 1])
if direction == 2:
z = np.flip(z, 1)
#y = np.matrix.transpose(np.tile(y, [ny, 1]))
# Initialize arrays
dzx = np.zeros(z.shape)
dzdx0 = np.zeros(z.shape)
dzdx1 = np.zeros(z.shape)
stall = np.zeros(z.shape)
bubble = np.zeros(z.shape)
k = np.array(range(0, nx))
zsep = z.copy() # total separation bubble
zsep0 = np.zeros(z.shape) # zero-order separation bubble surface
zsep1 = np.zeros(z.shape) # first-oder separation bubble surface
zfft = np.zeros((ny, nx), dtype=complex)
# Compute bed slope angle in x-dir
dzx[:, :-1] = np.rad2deg(np.arctan((z[:, 1:] - z[:, :-1]) / dx))
dzx[:, 0] = dzx[:, 1]
dzx[:, -1] = dzx[:, -2]
# Determine location of separation bubbles
'''Separation bubble exist if bed slope angle (lee side)
is larger than max angle that wind stream lines can
follow behind an obstacle (mu_b = ..)'''
stall += np.logical_and(abs(dzx) > mu_b, dzx < 0.)
stall[:, 1:-1] += np.logical_and(stall[:, 1:-1] == 0, stall[:, :-2] > 0., stall[:, 2:] > 0.)
# Define separation bubble
bubble[:, :-1] = np.logical_and(stall[:, :-1] == 0., stall[:, 1:] > 0.)
# Shift bubble back to x0: start of separation bubble
p = 2
bubble[:, :-p] = bubble[:, p:]
bubble[:, :p] = 0
bubble = bubble.astype(int)
# Count separation bubbles
n = np.sum(bubble)
bubble_n = np.asarray(np.where(bubble == True)).T
# Walk through all separation bubbles and determine polynoms
for k in range(0, n):
i = bubble_n[k, 1]
j = bubble_n[k, 0]
ix_neg = (dzx[j, i + 5:] >= 0) # i + 5??
if np.sum(ix_neg) == 0:
zbrink = z[j, i] # z level of brink at z(x0)
else:
zbrink = z[j, i] - z[j, i + 5 + np.where(ix_neg)[0][0]]
# Zero order polynom
dzdx0 = (z[j, i - 1] - z[j, i - 2]) / dx
# if dzdx0 > 0.1:
# dzdx0 = 0.1
a = dzdx0 / c
ls = np.minimum(np.maximum((3. * zbrink / (2. * c) * (1. + a / 4. + a ** 2 / 8.)), 0.1), 200.)
a2 = -3 * zbrink / ls ** 2 - 2 * dzdx0 / ls
a3 = 2 * zbrink / ls ** 3 + dzdx0 / ls ** 2
i_max = min(i + int(ls / dx), int(nx - 1))
xs = x[j, i:i_max] - x[j, i]
zsep0[j, i:i_max] = (a3 * xs ** 3 + a2 * xs ** 2 + dzdx0 * xs + z[j, i])
# Zero order filter
Cut = 1.5
dk = 2.0 * np.pi / (np.max(x))
zfft[j, :] = np.fft.fft(zsep0[j, :])
zfft[j, :] *= np.exp(-(dk * k * dx) ** 2 / (2. * Cut ** 2))
zsep0[j, :] = np.real(np.fft.ifft(zfft[j, :]))
# First order polynom
dzdx1 = (zsep0[j, i - 1] - zsep0[j, i - 2]) / dx
a = dzdx1 / c
ls = np.minimum(np.maximum((3. * z[j, i] / (2. * c) * (1. + a / 4. + a ** 2 / 8.)), 0.1), 200.)
a2 = -3 * z[j, i] / ls ** 2 - 2 * dzdx1 / ls
a3 = 2 * z[j, i] / ls ** 3 + dzdx1 / ls ** 2
i_max1 = min(i + int(ls / dx), int(nx - 1))
xs1 = x[j, i:i_max1] - x[j, i]
# Combine Seperation Bubble
zsep1[j, i:i_max1] = (a3 * xs1 ** 3 + a2 * xs1 ** 2 + dzdx1 * xs1 + z[j, i])
zsep[j, i:i_max] = np.maximum(zsep1[j, i:i_max], z[j, i:i_max])
# Smooth surface of separation bubbles over y direction
zsep = ndimage.gaussian_filter1d(zsep, sigma=0.2, axis=0)
ilow = zsep < z
zsep[ilow] = z[ilow]
#remove the 2d aspect of results
zsepout = zsep[1,:]
if direction == 2:
zsepout = np.flip(zsepout)
return zsepout | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/wind.py | wind.py |
from __future__ import absolute_import, division
import logging
import numpy as np
import scipy
import matplotlib.pyplot as plt
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def compute(s, p):
'''Compute wind velocity threshold based on bed surface properties
Computes wind velocity threshold based on grain size fractions,
bed slope, soil moisture content, air humidity, the presence of
roughness elements and a non-erodible layer. All bed surface
properties increase the current wind velocity threshold, except
for the grain size fractions. Therefore, the computation is
initialized by the grain size fractions and subsequently altered
by the other bed surface properties.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
See Also
--------
compute_grainsize
compute_bedslope
compute_moisture
compute_humidity
compute_sheltering
non_erodible
'''
if p['process_threshold'] and p['threshold_file'] is None:
if p['th_grainsize']:
s = compute_grainsize(s, p)
if p['th_bedslope']:
s = compute_bedslope(s, p)
if p['th_moisture']:
s = compute_moisture(s, p)
else:
# no aeolian transport when the bed level is lower than the water level
if p['process_tide']:
ix = s['zb'] - s['zs'] < - p['eps']
s['uth'][ix] = np.inf
if p['th_drylayer']:
s = dry_layer(s, p)
if p['th_humidity']:
s = compute_humidity(s, p)
if p['th_salt']:
s = compute_salt(s, p)
if p['th_sheltering']:
s = compute_sheltering(s, p)
# apply complex mask
s['uth'] = apply_mask(s['uth'], s['threshold_mask'])
s['uthf'] = s['uth'].copy()
#non-erodible layer (NEW)
if p['th_nelayer']:
s = non_erodible(s,p)
return s
def compute_grainsize(s, p):
'''Compute wind velocity threshold based on grain size fractions following Bagnold (1937)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
s['uth'][:,:,:] = 1.
s['uth'][:,:,:] = p['Aa'] * np.sqrt((p['rhog'] - p['rhoa']) / p['rhoa'] * p['g'] * p['grain_size'])
# Shear velocity threshold based on grainsize only (aerodynamic entrainment)
s['uth0'] = s['uth'].copy()
return s
def compute_bedslope(s, p):
'''Modify wind velocity threshold based on bed slopes following Dyer (1986)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
return s
def compute_moisture(s, p):
'''Modify wind velocity threshold based on soil moisture content following
Belly (1964) or Hotta (1984)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
nf = p['nfractions']
# convert from volumetric content (percentage of volume) to
# geotechnical mass content (percentage of dry mass)
mg = (s['moist'][:,:] * p['rhow'] / (p['rhog'] * (1. - p['porosity'])))
mg = mg[:,:,np.newaxis].repeat(nf, axis=2)
ix = mg > p['resw_moist']* p['rhow'] / (p['rhog'] * (1. - p['porosity'])) + 0.005
if p['method_moist_threshold'].lower() == 'belly_johnson':
s['uth'][ix] *= np.maximum(1., 1.8+0.6*np.log10(mg[ix] * 100.))
elif p['method_moist_threshold'].lower() == 'hotta':
s['uth'][ix] += 7.5 * mg[ix]
elif p['method_moist_threshold'].lower() == 'chepil':
s['uth'][ix] = np.sqrt( s['uth'][ix] ** 2 + 0.6 * mg[ix] ** 2 / (p['rhoa'] * p['w1_5'] ** 2))
elif p['method_moist_threshold'].lower() == 'saleh_fryear':
s['uth'][ix] = 0.305 + 0.022 * mg[ix] / p['w1_5'] + 0.506 * (mg[ix] / p['w1_5']) ** 2
elif p['method_moist_threshold'].lower() == 'saleh_fryear_mod':
s['uth'][ix] += 0.022 * mg[ix] / p['w1_5'] + 0.506 * (mg[ix] / p['w1_5']) ** 2
elif p['method_moist_threshold'].lower() == 'shao':
s['uth'][ix] *= np.exp(22.7 * mg[ix])
elif p['method_moist_threshold'].lower() == 'dong_2002':
d = np.sum(p['grain_size'] * p['grain_dist']) * 1000
if d < 0.135:
K = 2.51
elif d < 0.150:
K = 2.05
elif d < 0.200:
K = 2.75
elif d < 0.250:
K = 1.59
elif d < 0.400:
K = 1.87
else:
K = 2.15
s['uth'][ix] *= np.sqrt(1 + K * 100 * mg[ix])
elif p['method_moist_threshold'].lower() == 'gregory_darwish':
d = np.sum(p['grain_size'] * p['grain_dist'])
wdiff = np.zeros(mg.shape)
wdiff[ix] = np.maximum(mg[ix] - 0.5 * p['w1_5'],0)
a1 = 6.12e-05
a2 = 738.8
a3 = 0.1
s['uth'][ix] *= np.sqrt(1 + mg[ix] + 6 / np.pi * a1 / (p['rhog'] * p['g'] * d ** 2) \
+ a2 / (p['rhow'] * p['g'] * d) * np.exp(-a3 * mg[ix] / p['w1_5']) * wdiff[ix])
elif p['method_moist_threshold'].lower() == 'cornelis':
d = np.sum(p['grain_size'] * p['grain_dist'])
a1 = 0.013
a2 = 1.7e-4
a3 = 3e14
s['uth'][ix] = np.sqrt(a1 * (1 + mg[ix] + a2 /((p['rhog'] - p['rhoa']) * p['g'] * d ** 2) \
* (1 + a3 * 0.075 ** 2 * d / (10 ** 9 * np.exp(-6.5 * mg[ix] / p['w1_5']))) \
* (p['rhog'] - p['rhoa']) / p['rhoa'] * p['g'] * d))
elif p['method_moist_threshold'].lower() == 'dong_2007':
d = np.sum(p['grain_size'] * p['grain_dist'])
s['uth'][ix] = 0.16 * np.sqrt((p['rhog'] - p['rhoa']) / p['rhoa'] * p['g'] * d) * (1 + 478.2 * mg[ix] ** 1.52)
else:
logger.log_and_raise('Unknown moisture formulation [%s]' % p['method_moist'], exc=ValueError)
# should be .04 according to Pye and Tsoar
# should be .64 according to Delgado-Fernandez (10% vol.)
ix = mg > 0.064
s['uth'][ix] = np.inf
return s
#REMOVE?? CH
# def compute_humidity(s, p):
# '''Modify wind velocity threshold based on air humidity following Arens (1996)
# Parameters
# ----------
# s : dict
# Spatial grids
# p : dict
# Model configuration parameters
# Returns
# -------
# dict
# Spatial grids
# '''
# nx = p['nx']+1
# ny = p['ny']+1
# nf = p['nfractions']
# # compute effect of humidity on shear velocity threshold
# H = 5.45 * (1. + .17 * (1. + np.cos(s['udir'])) - 2.11/100. + 2.11/(100. - s['meteo']['R']))
# # modify shear velocity threshold
# s['uth'] += H.reshape((ny,nx,1)).repeat(nf, axis=-1) # TODO: probably incorrect
# return s
def compute_salt(s, p):
'''Modify wind velocity threshold based on salt content following Nickling and Ecclestone (1981)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
nx = p['nx']+1
ny = p['ny']+1
nf = p['nfractions']
# compute effect of salt content on shear velocity threshold
cs = p['csalt'] * (1. - s['salt'][:,:,:1])
CS = 1.03 * np.exp(.1027 * 1e3 * cs).repeat(nf, axis=-1)
# modify shear velocity threshold
s['uth'] *= CS
return s
def compute_sheltering(s, p):
'''Modify wind velocity threshold based on the presence of roughness elements following Raupach (1993)
Raupach (1993) presents the following amplification factor for the
shear velocity threshold due to the presence of roughness
elements.
.. math::
R_t = \\frac{u_{*,th,s}}{u_{*,th,r}}
= \\sqrt{\\frac{\\tau_s''}{\\tau}}
= \\frac{1}{\\sqrt{\\left( 1 - m \\sigma \\lambda \\right)
\\left( 1 + m \\beta \\lambda \\right)}}
:math:`m` is a constant smaller or equal to unity that accounts
for the difference between the average stress on the bed surface
:math:`\\tau_s` and the maximum stress on the bed surface
:math:`\\tau_s''`.
:math:`\\beta` is the stress partition coefficient defined as the
ratio between the drag coefficient of the roughness element itself
:math:`C_r` and the drag coefficient of the bare surface without
roughness elements :math:`C_s`.
:math:`\\sigma` is the shape coefficient defined as the basal area
divided by the frontal area: :math:`\\frac{A_b}{A_f}`. For
hemispheres :math:`\\sigma = 2`, for spheres :math:`\\sigma = 1`.
:math:`\\lambda` is the roughness density defined as the number of
elements per surface area :math:`\\frac{n}{S}` multiplied by the
frontal area of a roughness element :math:`A_f`, also known as the
frontal area index:
.. math::
\\lambda = \\frac{n b h}{S} = \\frac{n A_f}{S}
If multiplied by :math:`\\sigma` the equation simplifies to the
mass fraction of non-erodible elements:
.. math::
\\sigma \\lambda = \\frac{n A_b}{S} = \\sum_{k=n_0}^{n_k} \hat{w}^{\mathrm{bed}}_k
where :math:`k` is the fraction index, :math:`n_0` is the smallest
non-erodible fraction, :math:`n_k` is the largest non-erodible
fraction and :math:`\hat{w}^{\mathrm{bed}}_k` is the mass fraction
of sediment fraction :math:`k`. It is assumed that the fractions
are ordered by increasing size.
Substituting the derivation in the Raupach (1993) equation gives
the formulation implemented in this function:
.. math::
u_{*,th,r} = u_{*,th,s} * \\sqrt{\\left( 1 - m \\sum_{k=n_0}^{n_k} \hat{w}^{\mathrm{bed}}_k \\right)
\\left( 1 + m \\frac{\\beta}{\\sigma} \\sum_{k=n_0}^{n_k} \hat{w}^{\mathrm{bed}}_k \\right)}
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
nx = p['nx']+1
ny = p['ny']+1
nf = p['nfractions']
# mass fraction of non-erodible fractions used as roughness measure
mass = s['mass'][:,:,0,:].reshape((-1,nf))
gd = np.zeros((nx*ny,))
for i in range(nf):
ix = (s['ustar'] <= s['uth'][:,:,i]).flatten()
gd[ix] += mass[ix,i]
gd /= mass.sum(axis=-1)
# compute inverse of shear stress ratio
Rti = np.sqrt((1. - p['m'] * gd) * (1. + p['m'] * p['beta'] / p['sigma'] * gd))
s['Rti'] = Rti
# modify shear velocity threshold
s['uth'] *= Rti.reshape((ny,nx,1)).repeat(nf, axis=-1)
return s
def non_erodible(s,p):
'''Modify wind velocity threshold based on the presence of a
non-erodible layer.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
nf = p['nfractions']
s['zne'][:,:] = p['ne_file']
# Determine where ne-layer is "exposed"
thuthlyr = 0.01
ix = (s['zb'] <= s['zne'] + thuthlyr)
# Smooth method
# dzne = np.maximum( ( s['zne'] + thuthlyr - s['zb']) / thuthlyr, 0. )
# for i in range(nf):
# duth = np.maximum( 2.* s['ustar'] - s['uth'][:,:,i], 0)
# s['uth'][ix,i] += duth[ix] * dzne[ix]
# Hard method
for i in range(nf):
s['uth'][ix,i] = np.inf
return s | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/threshold.py | threshold.py |
import re
import numpy as np
#import numba
#import numba_scipy
import scipy
def isiterable(x):
'''Check if variable is iterable'''
if isinstance(x, str):
return False
try:
_ = [i for i in x]
except:
return False
return True
def makeiterable(x):
'''Ensure that variable is iterable'''
if not isiterable(x):
if x is None:
x = np.asarray([])
else:
x = np.asarray([x])
return np.asarray(x)
def isarray(x):
'''Check if variable is an array'''
if isinstance(x, str):
return False
if hasattr(x, '__getitem__'):
return True
else:
return False
def interp_array(x, xp, fp, circular=False, **kwargs):
'''Interpolate multiple time series at once
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 2-D sequence of floats
The y-coordinates of the data points, same length as ``xp``.
circular : bool
Use the :func:`interp_circular` function rather than the
:func:`numpy.interp` function.
kwargs : dict
Keyword options to the :func:`numpy.interp` function
Returns
-------
ndarray
The interpolated values, same length as second dimension of ``fp``.
'''
f = np.zeros((fp.shape[1],))
for i in range(fp.shape[1]):
if circular:
f[i] = interp_circular(x, xp, fp[:,i], **kwargs)
else:
f[i] = np.interp(x, xp, fp[:,i], **kwargs)
return f
def interp_circular(x, xp, fp, **kwargs):
'''One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a
function with given values at discrete data-points. Values beyond
the limits of ``x`` are interpolated in circular manner. For
example, a value of ``x > x.max()`` evaluates as ``f(x-x.max())``
assuming that ``x.max() - x < x.max()``.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as ``xp``.
kwargs : dict
Keyword options to the :func:`numpy.interp` function
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as ``x``.
Raises
------
ValueError
If ``xp`` and ``fp`` have different length
'''
xmin = xp.min()
xmax = xp.max()
xrng = xmax - xmin
x = xmin + np.mod(x - xmax - 1., xrng + 1.)
return np.interp(x, xp, fp, **kwargs)
def normalize(x, ref=None, axis=0, fill=0.):
'''Normalize array
Normalizes an array to make it sum to unity over a specific
axis. The procedure is safe for dimensions that sum to zero. These
dimensions return the ``fill`` value instead.
Parameters
----------
x : array_like
The array to be normalized
ref : array_like, optional
Alternative normalization reference, if not specified, the sum of x is used
axis : int, optional
The normalization axis (default: 0)
fill : float, optional
The return value for all-zero dimensions (default: 0.)
'''
x = makeiterable(x)
if ref is None:
ref = np.sum(x, axis=axis, keepdims=True).repeat(x.shape[axis], axis=axis)
ix = ref != 0.
y = np.zeros(x.shape) + fill
y[ix] = x[ix] / ref[ix]
return y
def prevent_tiny_negatives(x, max_error=1e-10, replacement=0.):
'''Replace tiny negative values in array
Parameters
----------
x : np.ndarray
Array with potential tiny negative values
max_error : float
Maximum absolute value to be replaced
replacement : float
Replacement value
Returns
-------
np.ndarray
Array with tiny negative values removed
'''
ix = (x < 0.) & (x > -max_error)
x[ix] = replacement
return x
def print_value(val, fill='<novalue>'):
'''Construct a string representation from an arbitrary value
Parameters
----------
val : misc
Value to be represented as string
fill : str, optional
String representation used in case no value is given
Returns
-------
str
String representation of value
'''
if isiterable(val):
return ' '.join([print_value(x) for x in val])
elif val is None:
return fill
elif isinstance(val, bool):
return 'T' if val else 'F'
elif isinstance(val, int):
return '%d' % val
elif isinstance(val, float):
if val < 1.:
return '%0.6f' % val
else:
return '%0.2f' % val
else:
return str(val)
def format_log(msg, ncolumns=2, **props):
'''Format log message into columns
Prints log message and additional data into a column format
that fits into a 70 character terminal.
Parameters
----------
msg : str
Main log message
ncolumns : int
Number of columns
props : key/value pairs
Properties to print in column format
Returns
-------
str
Formatted log message
Note
----
Properties names starting with ``min``, ``max`` or ``nr`` are
respectively replaced by ``min.``, ``max.`` or ``#``.
'''
fmt = []
fmt.append(msg)
i = 0
fmt.append('')
for k, v in sorted(props.items()):
if i == ncolumns:
fmt.append('')
i = 0
k = re.sub('^min', 'min. ', k)
k = re.sub('^max', 'max. ', k)
k = re.sub('^nr', '# ', k)
fmt[-1] += '%-15s: %-10s ' % (k.ljust(15, '.'),
print_value(v))
i += 1
return fmt
def apply_mask(arr, mask):
'''Apply complex mask
The real part of the complex mask is multiplied with the input
array. Subsequently the imaginary part is added and the result
returned.
The shape of the mask is assumed to match the first few dimensions
of the input array. If the input array is larger than the mask,
the mask is repeated for any additional dimensions.
Parameters
----------
arr : numpy.ndarray
Array or matrix to which the mask needs to be applied
mask : numpy.ndarray
Array or matrix with complex mask values
Returns
-------
arr : numpy.ndarray
Array or matrix to which the mask is applied
'''
# repeat mask to match shape of input array
mask = np.asarray(mask)
shp = arr.shape[mask.ndim:]
for d in shp:
mask = mask.reshape(mask.shape + tuple([1])).repeat(d, axis=-1)
# apply mask
arr *= np.real(mask)
arr += np.imag(mask)
return arr
def rotate(x, y, alpha, origin=(0,0)):
'''Rotate a matrix over given angle around given origin'''
xr = x - origin[0]
yr = y - origin[1]
a = alpha / 180. * np.pi
R = np.asmatrix([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
xy = np.concatenate((xr.reshape((-1,1)),
yr.reshape((-1,1))), axis=1) * R
return (np.asarray(xy[:,0].reshape(x.shape) + origin[0]),
np.asarray(xy[:,1].reshape(y.shape) + origin[1]))
# @numba.njit
def sc_kv(v, z):
return scipy.special.kv(v, z)
def calc_grain_size(p, s, percent):
'''Calculate grain size characteristics based on mass in each fraction
Calculate grain size distribution for each cell based on weight
distribution over the fractions. Interpolates to the requested percentage
in the grain size distribution. For example, percent=50 will result
in calculation of the D50. Calculation is only executed for the top layer
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
percent : float
Requested percentage in grain size dsitribution
Returns
-------
array
grain size per grid cell
'''
from scipy.interpolate import interp1d
mass = s['mass'][:,:,0,:] # only select upper, surface layer because that is the relevant layer for transport
D = np.zeros((mass.shape[0], mass.shape[1]))
for yi in range(mass.shape[0]):
for xi in range(mass.shape[1]):
diameters = np.insert(p['grain_size'], 0, 0)
cummulative_weights = np.cumsum(np.insert(mass[yi, xi,:], 0, 0))
percentages = 100*cummulative_weights/np.max(cummulative_weights)
f_linear = interp1d(list(percentages), diameters, fill_value='extrapolate') # get interpolation function
# Retrieve grain size characteristics based on interpolation
D[yi, xi] = f_linear(percent)
return D
def calc_mean_grain_size(p, s):
'''Calculate mean grain size based on mass in each fraction
Calculate mean grain size for each cell based on weight distribution
over the fractions. Calculation is only executed for the top layer.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
percent : float
Requested percentage in grain size dsitribution
Returns
-------
array
mean grain size per grid cell
'''
mass = s['mass'][:,:,0,:] # only select upper, surface layer because that is the relevant layer for transport
D_mean = np.zeros((mass.shape[0], mass.shape[1]))
for yi in range(mass.shape[0]):
for xi in range(mass.shape[1]):
diameters = p['grain_size']
weights = mass[yi, xi,:]/ np.sum(mass[yi, xi,:])
# Retrieve mean grain size based on weight of mass
D_mean[yi, xi] = np.sum(diameters*weights)
return D_mean | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/utils.py | utils.py |
from __future__ import absolute_import, division
import logging
from scipy import ndimage, misc
import numpy as np
import math
#import matplotlib.pyplot as plt
from aeolis.wind import *
# package modules
import aeolis.wind
#from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize (s,p):
'''Initialise vegetation based on vegetation file.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
Returns
-------
dict
Spatial grids
'''
if p['veg_file'] is not None:
s['rhoveg'][:, :] = p['veg_file']
if np.isnan(s['rhoveg'][0, 0]):
s['rhoveg'][:,:] = 0.
ix = s['rhoveg'] < 0
s['rhoveg'][ix] *= 0.
s['hveg'][:,:] = p['hveg_max']*np.sqrt(s['rhoveg'])
s['germinate'][:,:] = (s['rhoveg']>0)
s['lateral'][:,:] = 0.
return s
def vegshear(s, p):
if p['vegshear_type'] == 'okin' and p['ny'] == 0:
s = vegshear_okin(s, p)
else:
s = vegshear_raupach(s, p)
s = velocity_stress(s,p)
return s
def germinate(s,p):
ny = p['ny']
# time [year]
n = (365.25*24.*3600. / (p['dt_opt'] * p['accfac']))
# Determine which cells are already germinated before
s['germinate'][:, :] = (s['rhoveg'] > 0.)
# Germination
p_germinate_year = p['germinate']
p_germinate_dt = 1-(1-p_germinate_year)**(1./n)
germination = np.random.random((s['germinate'].shape))
# Germinate new cells
germinate_new = (s['dzbveg'] >= 0.) * (germination <= p_germinate_dt)
s['germinate'] += germinate_new.astype(float)
s['germinate'] = np.minimum(s['germinate'], 1.)
# Lateral expension
if ny > 1:
dx = s['ds'][2,2]
else:
dx = p['dx']
p_lateral_year = p['lateral']
p_lateral_dt = 1-(1-p_lateral_year)**(1./n)
p_lateral_cell = 1 - (1-p_lateral_dt)**(1./dx)
drhoveg = np.zeros((p['ny']+1, p['nx']+1, 4))
drhoveg[:,1:,0] = np.maximum((s['rhoveg'][:,:-1]-s['rhoveg'][:,1:]) / s['ds'][:,1:], 0.) # positive x-direction
drhoveg[:,:-1,1] = np.maximum((s['rhoveg'][:,1:]-s['rhoveg'][:,:-1]) / s['ds'][:,:-1], 0.) # negative x-direction
drhoveg[1:,:,2] = np.maximum((s['rhoveg'][:-1,:]-s['rhoveg'][1:,:]) / s['dn'][1:,:], 0.) # positive y-direction
drhoveg[:-1,:,3] = np.maximum((s['rhoveg'][1:,:]-s['rhoveg'][:-1,:]) / s['dn'][:-1,:], 0.) # negative y-direction
lat_veg = drhoveg > 0.
s['drhoveg'] = np.sum(lat_veg[:,:,:], 2)
p_lateral = p_lateral_cell * s['drhoveg']
s['lateral'] += (germination <= p_lateral)
s['lateral'] = np.minimum(s['lateral'], 1.)
return s
def grow (s, p): #DURAN 2006
ix = np.logical_or(s['germinate'] != 0., s['lateral'] != 0.) * ( p['V_ver'] > 0.)
# Reduction of vegetation growth due to sediment burial
s['dhveg'][ix] = p['V_ver'] * (1 - s['hveg'][ix] / p['hveg_max']) - np.abs(s['dzbveg'][ix]-p['dzb_opt']) * p['veg_gamma'] # m/year
# Adding growth
if p['veggrowth_type'] == 'orig': #based primarily on vegetation height
s['hveg'] += s['dhveg']*(p['dt_opt'] * p['accfac']) / (365.25*24.*3600.)
s['hveg'] = np.maximum(np.minimum(s['hveg'], p['hveg_max']), 0.)
s['rhoveg'] = (s['hveg']/p['hveg_max'])**2
else:
t_veg = p['t_veg']/365
v_gam = p['v_gam']
rhoveg_max = p['rhoveg_max']
ix2 = s['rhoveg'] > rhoveg_max
s['rhoveg'][ix2] = rhoveg_max
ixzero = s['rhoveg'] <= 0
if p['V_ver'] > 0:
s['drhoveg'][ix] = (rhoveg_max - s['rhoveg'][ix])/t_veg - (v_gam/p['hveg_max'])*np.abs(s['dzbveg'][ix] - p['dzb_opt'])*p['veg_gamma']
else:
s['drhoveg'][ix] = 0
s['rhoveg'] += s['drhoveg']*(p['dt']*p['accfac'])/(365.25 * 24 *3600)
irem = s['rhoveg'] < 0
s['rhoveg'][irem] = 0
s['rhoveg'][ixzero] = 0 #here only grow vegetation that already existed
#now convert back to height for Okin or wherever else needed
s['hveg'][:,:] = p['hveg_max']*np.sqrt(s['rhoveg'])
# Plot has to vegetate again after dying
s['germinate'] *= (s['rhoveg']!=0.)
s['lateral'] *= (s['rhoveg']!=0.)
# Dying of vegetation due to hydrodynamics (Dynamic Vegetation Limit)
if p['process_tide']:
s['rhoveg'] *= (s['zb'] +0.01 >= s['zs'])
s['hveg'] *= (s['zb'] +0.01 >= s['zs'])
s['germinate'] *= (s['zb'] +0.01 >= s['zs'])
s['lateral'] *= (s['zb'] +0.01 >= s['zs'])
ix = s['zb'] < p['veg_min_elevation']
s['rhoveg'][ix] = 0
s['hveg'][ix] = 0
s['germinate'][ix] = 0
s['lateral'][ix] = 0
return s
def vegshear_okin(s, p):
#Approach to calculate shear reduction in the lee of plants using the general approach of:
#Okin (2008), JGR, A new model of wind erosion in the presence of vegetation
#Note that implementation only works in 1D currently
#Initialize shear variables and other grid parameters
ustar = s['ustar'].copy()
ustars = s['ustars'].copy()
ustarn = s['ustarn'].copy()
ets = np.zeros(s['zb'].shape)
etn = np.zeros(s['zb'].shape)
ix = ustar != 0
ets[ix] = ustars[ix] / ustar[ix]
etn[ix] = ustarn[ix] / ustar[ix]
udir = s['udir'][0,0] + 180
x = s['x'][0,:]
zp = s['hveg'][0,:]
red = np.zeros(x.shape)
red_all = np.zeros(x.shape)
nx = x.size
c1 = p['okin_c1_veg']
intercept = p['okin_initialred_veg']
if udir < 0:
udir = udir + 360
if udir > 360:
udir = udir - 360
#Calculate shear reduction by looking through all cells that have plants present and looking downwind of those features
for igrid in range(nx):
if zp[igrid] > 0: # only look at cells with a roughness element
mult = np.ones(x.shape)
h = zp[igrid] #vegetation height at the appropriate cell
if udir >= 180 and udir <= 360:
xrel = -(x - x[igrid])
else:
xrel = x - x[igrid]
for igrid2 in range(nx):
if xrel[igrid2] >= 0 and xrel[igrid2]/h < 20:
# apply okin model
mult[igrid2] = intercept + (1 - intercept) * (1 - np.exp(-xrel[igrid2] * c1 / h))
red = 1 - mult
# fix potential issues for summation
ix = red < 0.00001
red[ix] = 0
ix = red > 1
red[ix] = 1
ix = xrel < 0
red[ix] = 0
# combine all reductions between plants
red_all = red_all + red
# cant have more than 100% reduction
ix = red_all > 1
red_all[ix] = 1
#update shear velocity according to Okin (note does not operate on shear stress)
mult_all = 1 - red_all
ustarveg = s['ustar'][0,:] * mult_all
ix = ustarveg < 0.01
ustarveg[ix] = 0.01 #some small number so transport code doesnt crash
s['ustar'][0,:] = ustarveg
s['ustars'][0,:] = s['ustar'][0,:] * ets[0,:]
s['ustarn'][0,:] = s['ustar'][0,:] * etn[0,:]
return s
def vegshear_raupach(s, p):
ustar = s['ustar'].copy()
ustars = s['ustars'].copy()
ustarn = s['ustarn'].copy()
ets = np.zeros(s['zb'].shape)
etn = np.zeros(s['zb'].shape)
ix = ustar != 0
ets[ix] = ustars[ix] / ustar[ix]
etn[ix] = ustarn[ix] / ustar[ix]
# Raupach, 1993
roughness = p['gamma_vegshear']
vegfac = 1. / np.sqrt(1. + roughness * s['rhoveg'])
# Smoothen the change in vegfac between surrounding cells following a gaussian distribution filter
s['vegfac'] = ndimage.gaussian_filter(vegfac, sigma=p['veg_sigma'])
# Apply reduction factor of vegetation to the total shear stress
s['ustar'] *= s['vegfac']
s['ustars'] = s['ustar'] * ets
s['ustarn'] = s['ustar'] * etn
return s | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/vegetation.py | vegetation.py |
from __future__ import absolute_import, division
import os
import re
import time
import shutil
import logging
from webbrowser import UnixBrowser
import numpy as np
from matplotlib import pyplot as plt
# package modules
from aeolis.utils import *
from aeolis.constants import *
# from regex import S
# initialize logger
logger = logging.getLogger(__name__)
def read_configfile(configfile, parse_files=True, load_defaults=True):
'''Read model configuration file
Updates default model configuration based on a model configuration
file. The model configuration file should be a text file with one
parameter on each line. The parameter name and value are seperated
by an equal sign (=). Any lines that start with a percent sign (%)
or do not contain an equal sign are omitted.
Parameters are casted into the best matching variable type. If the
variable type is ``str`` it is optionally interpreted as a
filename. If the corresponding file is found it is parsed using
the ``numpy.loadtxt`` function.
Parameters
----------
configfile : str
Model configuration file
parse_files : bool
If True, files referred to by string parameters are parsed
load_defaults : bool
If True, default settings are loaded and overwritten by the
settings from the configuration file
Returns
-------
dict
Dictionary with casted and optionally parsed model
configuration parameters
See Also
--------
write_configfile
check_configuration
'''
if load_defaults:
p = DEFAULT_CONFIG.copy()
else:
p = {}
if os.path.exists(configfile):
with open(configfile, 'r') as fp:
for line in fp:
if '=' in line and not line.strip().startswith('%'):
key, val = line.split('=')[:2]
p[key.strip()] = parse_value(val, parse_files=parse_files)
else:
logger.log_and_raise('File not found [%s]' % configfile, exc=IOError)
# normalize grain size distribution
if 'grain_dist' in p:
#p['grain_dist'] = normalize(p['grain_dist']) # commented to allow distribution for multiple layers.
p['grain_dist'] = makeiterable(p['grain_dist'])
p['grain_size'] = makeiterable(p['grain_size'])
# set default output file, if not given
if 'output_file' in p and not p['output_file']:
p['output_file'] = '%s.nc' % os.path.splitext(configfile)[0]
# set default value for h, if not given
if 'h' in p and not p['h']:
p['h'] = p['z']
if 'process_fences' in p and not p['process_fences']:
p['process_fences'] = False
# set default for nsavetimes, if not given
if 'nsavetimes' in p and not p['nsavetimes']:
p['nsavetimes'] = int(p['dzb_interval']/p['dt'])
return p
def write_configfile(configfile, p=None):
'''Write model configuration file
Writes model configuration to file. If no model configuration is
given, the default configuration is written to file. Any
parameters with a name ending with `_file` and holding a matrix
are treated as separate files. The matrix is then written to an
ASCII file using the ``numpy.savetxt`` function and the parameter
value is replaced by the name of the ASCII file.
Parameters
----------
configfile : str
Model configuration file
p : dict, optional
Dictionary with model configuration parameters
Returns
-------
dict
Dictionary with casted and optionally parsed model
configuration parameters
See Also
--------
read_configfile
'''
if p is None:
p = DEFAULT_CONFIG.copy()
fmt = '%%%ds = %%s\n' % np.max([len(k) for k in p.iterkeys()])
with open(configfile, 'w') as fp:
fp.write('%s\n' % ('%' * 70))
fp.write('%%%% %-64s %%%%\n' % 'AeoLiS model configuration')
fp.write('%%%% Date: %-58s %%%%\n' % time.strftime('%Y-%m-%d %H:%M:%S'))
fp.write('%s\n' % ('%' * 70))
fp.write('\n')
for k, v in sorted(p.iteritems()):
if k.endswith('_file') and isiterable(v):
fname = '%s.txt' % k.replace('_file', '')
backup(fname)
np.savetxt(fname, v)
fp.write(fmt % (k, fname))
else:
fp.write(fmt % (k, print_value(v, fill='')))
def check_configuration(p):
'''Check model configuration validity
Checks if required parameters are set and if the references files
for bathymetry, wind, tide and meteorological input are
valid. Throws an error if one or more requirements are not met.
Parameters
----------
p : dict
Model configuration dictionary with parsed files
See Also
--------
read_configfile
'''
# check validity of configuration
if not isarray(p['xgrid_file']) or \
not isarray(p['bed_file']) or (not isarray(p['ygrid_file']) and p['ny'] > 0):
logger.log_and_raise('Incomplete bathymetry definition', exc=ValueError)
if isarray(p['wind_file']):
if p['wind_file'].ndim != 2 or p['wind_file'].shape[1] < 3:
logger.log_and_raise('Invalid wind definition file', exc=ValueError)
if isarray(p['tide_file']):
if p['tide_file'].ndim != 2 or p['tide_file'].shape[1] < 2:
logger.log_and_raise('Invalid tide definition file', exc=ValueError)
if isarray(p['meteo_file']):
if p['meteo_file'].ndim != 2 or p['meteo_file'].shape[1] < 6:
logger.log_and_raise('Invalid meteo definition file', exc=ValueError)
if p['th_humidity']:
logger.warning('Wind velocity threshold based on air humidity following Arens (1996) '
'is implemented for testing only. Use with care.')
if p['th_salt']:
logger.warning('Wind velocity threshold based on salt content following Nickling and '
'Ecclestone (1981) is implemented for testing only. Use with care.')
if p['method_roughness'] == 'constant':
logger.warning('Warning: the used roughness method (constant) defines the z0 as '
'k (z0 = k), this was implemented to ensure backward compatibility '
'and does not follow the definition of Nikuradse (z0 = k / 30).')
def parse_value(val, parse_files=True, force_list=False):
'''Casts a string to the most appropriate variable type
Parameters
----------
val : str
String representation of value
parse_files : bool
If True, files referred to by string parameters are parsed by
``numpy.loadtxt``
force_list:
If True, interpret the value as a list, even if it consists of
a single value
Returns
-------
misc
Casted value
Examples
--------
>>> type(parse_value('T'))
bool
>>> type(parse_value('F'))
bool
>>> type(parse_value('123'))
int
>>> type(parse_value('123.2'))
float
>>> type(parse_value('euler_forward'))
str
>>> type(parse_value(''))
NoneType
>>> type(parse_value('zb zs Ct'))
numpy.ndarray
>>> type(parse_value('zb', force_list=True))
numpy.ndarray
>>> type(parse_value('0.1 0.2 0.3')[0])
float
>>> type(parse_value('wind.txt'), parse_files=True)
numpy.ndarray
>>> type(parse_value('wind.txt'), parse_files=False)
str
'''
# New initial steps to filter out commented lines (with %)
if '%' in val:
val = val.split('%')[0]
val = val.strip()
if ' ' in val or force_list:
return np.asarray([parse_value(x) for x in val.split(' ')])
elif re.match('^[TF]$', val):
return val == 'T'
elif re.match('^-?\d+$', val):
return int(val)
elif re.match('^-?[\d\.]+$', val):
return float(val)
elif re.match('None', val):
return None
elif os.path.isfile(val) and parse_files:
for dtype in [float, complex]:
try:
val = np.loadtxt(val, dtype=dtype)
break
except:
pass
return val
elif val == '':
return None
else:
return val
def backup(fname):
'''Creates a backup file of the provided file, if it exists'''
if os.path.exists(fname):
backupfile = get_backupfilename(fname)
shutil.copyfile(fname, backupfile)
def get_backupfilename(fname):
'''Returns a non-existing backup filename'''
for n in range(1, 1000):
backupfile = '%s~%03d' % (fname, n)
if not os.path.exists(backupfile):
break
if os.path.exists(backupfile):
logger.log_and_raise('Too many backup files in use! Please clean up...', exc=ValueError)
return backupfile
def visualize_grid(s, p):
'''Create figures and tables for the user to check whether the grid-input is correctly interpreted'''
# Read the x,y,z dimensions
x = s['x']
y = s['y']
zb = s['zb']
# Read the angle of the rotated grid and avoid negative values
alpha = p['alpha']
if alpha < 0.:
alpha += 360.
# Determine the maximum dimensions in x- and y-direction
xlen = np.max(x)-np.min(x)
ylen = np.max(y)-np.min(y)
xylen = np.maximum(xlen, ylen)
# Compute the coordinates for the arc of the angle alpha
arc_angles = np.linspace(270., 270. + alpha, 1+int(alpha))
radius = np.minimum(xlen, ylen) * 0.05
arc_x = x[0,0] + radius * np.cos(np.deg2rad(arc_angles))
arc_y = y[0,0] + radius * np.sin(np.deg2rad(arc_angles))
# Compute coordinates of labels to indicate boundary types
x_offshore = np.mean([x[0,0], x[-1,0]])
y_offshore = np.mean([y[0,0], y[-1,0]])
x_onshore = np.mean([x[0,-1], x[-1,-1]])
y_onshore = np.mean([y[0,-1], y[-1,-1]])
x_lateralA = np.mean([x[0,0], x[0,-1]])
y_lateralA = np.mean([y[0,0], y[0,-1]])
x_lateralB = np.mean([x[-1,0], x[-1,-1]])
y_lateralB = np.mean([y[-1,0], y[-1,-1]])
# Create plots
fig, ax = plt.subplots()
if p['ny'] > 0:
pc = ax.pcolormesh(x,y,zb)
else:
pc = ax.scatter(x,y,c=zb)
# Plot all the texts
plottxts = []
plottxts.append(ax.text(x_offshore, y_offshore, 'Offshore: ' + p['boundary_offshore'], rotation=alpha + 90, ha = 'center', va='center'))
plottxts.append(ax.text(x_onshore, y_onshore, 'Onshore: ' + p['boundary_onshore'], rotation=alpha + 270, ha = 'center', va='center'))
plottxts.append(ax.text(x_lateralA, y_lateralA, 'Lateral: ' + p['boundary_lateral'], rotation=alpha + 0, ha = 'center', va='center'))
plottxts.append(ax.text(x_lateralB, y_lateralB, 'Lateral: ' + p['boundary_lateral'], rotation=alpha + 180, ha = 'center', va='center'))
plottxts.append(ax.text(x[0,0], y[0,0], '(0,0)', ha = 'right', va='top'))
plottxts.append(ax.text(x[0,-1], y[0,-1], '(0,' + str(len(x[0,:])-1) + ')', ha = 'right', va='top'))
plottxts.append(ax.text(x[-1,0], y[-1,0], '(' + str(len(x[:,0])-1) + ',0)', ha = 'right', va='top'))
plottxts.append(ax.text(x[-1,-1], y[-1,-1], '(' + str(len(x[:,0])-1) + ',' + str(len(x[0,:])-1) + ')', ha = 'right', va='top'))
plottxts.append(ax.text(x[0,0], y[0,0]-0.1*ylen, r'$\alpha$ :' + str(int(alpha)) + r'$^\circ$', ha='center', va='center'))
# Set boxes around the texts
for txt in plottxts:
txt.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='black'))
# Plot dots to indicate the corner-points
ax.plot(x[0,0], y[0,0], 'ro')
ax.plot(x[0,-1], y[0,-1], 'ro')
ax.plot(x[-1,0], y[-1,0], 'ro')
ax.plot(x[-1,-1], y[-1,-1], 'ro')
# Plot the arc to indicate angle
ax.plot(arc_x, arc_y, color = 'red')
ax.plot([x[0,0], x[0,0]], [y[0,0],y[0,0]-0.08*ylen], '--', color = 'red', linewidth=3)
ax.plot([x[0,0], arc_x[-1]], [y[0,0], arc_y[-1]], color = 'red', linewidth=3)
# Figure lay-out settings
fig.colorbar(pc, ax=ax)
ax.axis('equal')
ax.set_xlim([np.min(x) - 0.15*xylen, np.max(x) + 0.15*xylen])
ax.set_ylim([np.min(y) - 0.15*xylen, np.max(y) + 0.15*xylen])
height = 8.26772 # A4 width
width = 11.6929 # A4 height
fig.set_size_inches(width, height)
plt.tight_layout()
# Saving and plotting figure
fig.savefig('figure_grid_initialization.png', dpi=200)
plt.close()
return
def visualize_timeseries(p, t):
'''Create figures and tables for the user to check whether the timeseries-input is correctly interpreted'''
# Start and stop times
tstart = p['tstart']
tstop = p['tstop']
# Read the user input (wind)
uw_t = p['wind_file'][:,0]
uw_s = p['wind_file'][:,1]
uw_d = p['wind_file'][:,2]
# Read the user input (waves)
w_t = p['wave_file'][:,0]
w_Hs = p['wave_file'][:,1]
w_Tp = p['wave_file'][:,2]
# Read the user input (tide)
T_t = p['tide_file'][:,0]
T_zs = p['tide_file'][:,1]
# Create plots
fig, axs = plt.subplots(5, 1)
# Plotting
axs[0].plot(uw_t, uw_s, 'k')
axs[1].plot(uw_t, uw_d, 'k')
axs[2].plot(w_t, w_Hs, 'k')
axs[3].plot(w_t, w_Tp, 'k')
axs[4].plot(T_t, T_zs, 'k')
# Assiging titles
axs[0].set_title('Wind velocity at height z, uw (m/s)')
axs[1].set_title('Wind direction, udir (deg)')
axs[2].set_title('Wave height, Hs (m)')
axs[3].set_title('Wave period, Tp (sec)')
axs[4].set_title('Water level, zs (m)')
for ax in axs:
ax.set_xlim([tstart, tstop])
ax.set_xlabel('Time since refdate (s) from tstart (=' + str(tstart) + ') to tstop (=' + str(tstop) + ')')
width = 8.26772 # A4 width
height = 11.6929 # A4 height
fig.set_size_inches(width, height)
plt.tight_layout()
# Saving and plotting figure
fig.savefig('figure_timeseries_initialization.png', dpi=200)
plt.close()
def visualize_spatial(s, p):
'''Create figures and tables for the user to check whether the input is correctly interpreted'''
# Read the x,y dimensions
x = s['x']
y = s['y']
# Reading masks and if constant, fill 2D-array
uth_mask_multi = np.ones(np.shape(x)) * np.real(s['threshold_mask'])
tide_mask_multi = np.ones(np.shape(x)) * np.real(s['tide_mask'])
wave_mask_multi = np.ones(np.shape(x)) * np.real(s['wave_mask'])
uth_mask_add = np.ones(np.shape(x)) * np.imag(s['threshold_mask'])
tide_mask_add = np.ones(np.shape(x)) * np.imag(s['tide_mask'])
wave_mask_add = np.ones(np.shape(x)) * np.imag(s['wave_mask'])
# Determine the maximum dimensions in x- and y-direction
xlen = np.max(x)-np.min(x)
ylen = np.max(y)-np.min(y)
# Creating values
fig, axs = plt.subplots(5, 3)
pcs = [[None for _ in range(3)] for _ in range(5)]
# Plotting colormeshes
if p['ny'] > 0:
pcs[0][0] = axs[0,0].pcolormesh(x, y, s['zb'], cmap='viridis')
pcs[0][1] = axs[0,1].pcolormesh(x, y, s['zne'], cmap='viridis')
pcs[0][2] = axs[0,2].pcolormesh(x, y, s['rhoveg'], cmap='Greens', clim= [0, 1])
pcs[1][0] = axs[1,0].pcolormesh(x, y, s['uw'], cmap='plasma')
pcs[1][1] = axs[1,1].pcolormesh(x, y, s['ustar'], cmap='plasma')
pcs[1][2] = axs[1,2].pcolormesh(x, y, s['tau'], cmap='plasma')
pcs[2][0] = axs[2,0].pcolormesh(x, y, s['moist'], cmap='Blues', clim= [0, 0.4])
pcs[2][1] = axs[2,1].pcolormesh(x, y, s['gw'], cmap='viridis')
pcs[2][2] = axs[2,2].pcolormesh(x, y, s['uth'][:,:,0], cmap='plasma')
pcs[3][0] = axs[3,0].pcolormesh(x, y, uth_mask_multi, cmap='binary', clim= [0, 1])
pcs[3][1] = axs[3,1].pcolormesh(x, y, tide_mask_multi, cmap='binary', clim= [0, 1])
pcs[3][2] = axs[3,2].pcolormesh(x, y, wave_mask_multi, cmap='binary', clim= [0, 1])
pcs[4][0] = axs[4,0].pcolormesh(x, y, uth_mask_add, cmap='binary', clim= [0, 1])
pcs[4][1] = axs[4,1].pcolormesh(x, y, tide_mask_add, cmap='binary', clim= [0, 1])
pcs[4][2] = axs[4,2].pcolormesh(x, y, wave_mask_add, cmap='binary', clim= [0, 1])
else:
pcs[0][0] = axs[0,0].scatter(x, y, c=s['zb'], cmap='viridis')
pcs[0][1] = axs[0,1].scatter(x, y, c=s['zne'], cmap='viridis')
pcs[0][2] = axs[0,2].scatter(x, y, c=s['rhoveg'], cmap='Greens', clim= [0, 1])
pcs[1][0] = axs[1,0].scatter(x, y, c=s['uw'], cmap='plasma')
pcs[1][1] = axs[1,1].scatter(x, y, c=s['ustar'], cmap='plasma')
pcs[1][2] = axs[1,2].scatter(x, y, c=s['tau'], cmap='plasma')
pcs[2][0] = axs[2,0].scatter(x, y, c=s['moist'], cmap='Blues', clim= [0, 0.4])
pcs[2][1] = axs[2,1].scatter(x, y, c=s['gw'], cmap='viridis')
pcs[2][2] = axs[2,2].scatter(x, y, c=s['uth'][:,:,0], cmap='plasma')
pcs[3][0] = axs[3,0].scatter(x, y, c=uth_mask_multi, cmap='binary', clim= [0, 1])
pcs[3][1] = axs[3,1].scatter(x, y, c=tide_mask_multi, cmap='binary', clim= [0, 1])
pcs[3][2] = axs[3,2].scatter(x, y, c=wave_mask_multi, cmap='binary', clim= [0, 1])
pcs[4][0] = axs[4,0].scatter(x, y, c=uth_mask_add, cmap='binary', clim= [0, 1])
pcs[4][1] = axs[4,1].scatter(x, y, c=tide_mask_add, cmap='binary', clim= [0, 1])
pcs[4][2] = axs[4,2].scatter(x, y, c=wave_mask_add, cmap='binary', clim= [0, 1])
# Quiver for vectors
skip = 10
axs[1,0].quiver(x[::skip, ::skip], y[::skip, ::skip], s['uws'][::skip, ::skip], s['uwn'][::skip, ::skip])
axs[1,1].quiver(x[::skip, ::skip], y[::skip, ::skip], s['ustars'][::skip, ::skip], s['ustarn'][::skip, ::skip])
axs[1,2].quiver(x[::skip, ::skip], y[::skip, ::skip], s['taus'][::skip, ::skip], s['taun'][::skip, ::skip])
# Adding titles to the plots
axs[0,0].set_title('Bed level, zb (m)')
axs[0,1].set_title('Non-erodible layer, zne (m)')
axs[0,2].set_title('Vegetation density, rhoveg (-)')
axs[1,0].set_title('Wind velocity, uw (m/s)')
axs[1,1].set_title('Shear velocity, ustar (m/s)')
axs[1,2].set_title('Shear stress, tau (N/m2)')
axs[2,0].set_title('Soil moisture content, (-)')
axs[2,1].set_title('Ground water level, gw (m)')
axs[2,2].set_title('Velocity threshold (0th fraction), uth (m/s)')
axs[3,0].set_title('Threshold multiplication mask (-)')
axs[3,1].set_title('Tide multiplication mask (-)')
axs[3,2].set_title('Wave multiplication mask (-)')
axs[4,0].set_title('Threshold addition mask (-)')
axs[4,1].set_title('Tide addition mask (-)')
axs[4,2].set_title('Wave addition mask (-)')
# Formatting the plot
for irow, ax_rows in enumerate(axs):
for icol, ax in enumerate(ax_rows):
# Figure lay-out settings
fig.colorbar(pcs[irow][icol], ax=ax)
ax.axis('equal')
xylen = np.maximum(xlen, ylen)
ax.set_xlim([np.min(x) - 0.15*xylen, np.max(x) + 0.15*xylen])
ax.set_ylim([np.min(y) - 0.15*xylen, np.max(y) + 0.15*xylen])
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
width = 8.26772*2 # A4 width
height = 11.6929*2 # A4 height
fig.set_size_inches(width, height)
plt.tight_layout()
# Saving and plotting figure
fig.savefig('figure_params_initialization.png', dpi=300)
plt.close()
return | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/inout.py | inout.py |
import logging
import numpy as np
import scipy.special
import scipy.interpolate
from scipy import ndimage, misc
#import matplotlib
#import matplotlib.pyplot as plt
from builtins import range, int
import math
from collections import namedtuple
from copy import copy
from pprint import pprint as pp
import sys
import os
from aeolis.wind import velocity_stress
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def initialize(s,p):
if p['process_fences']:
s['fence_height'][:,:] = p['fence_file']
s['fence_base'] = copy(s['zb']) # initial fence base is the bed elevation
s['fence_top'] = s['fence_base'] + s['fence_height']
s['fence_height_init'] = s['fence_height']
s['zf'] = s['fence_height']
return s
def update_fences(s,p):
s = update_fence_height(s, p)
if p['ny'] > 0:
s = fence_shear2d(s, p)
else:
s = fence_shear1d(s, p)
s = velocity_stress(s,p)
return s
def update_fence_height(s, p):
s['fence_height'] = s['fence_top']-s['zb']
ix = s['fence_height_init'] < 0.1
s['fence_height'][ix] = 0
ix = s['fence_top'] < 0.1
s['fence_height'][ix] = 0
ix = s['fence_height'] < 0.1
s['fence_height'][ix] = 0
#if exceeds 1.5 m then assume the fence has eroded out
ix = s['fence_height'] > 1.5
s['fence_height'][ix] = 0
s['fence_height_init'][ix] = 0
return s
def fence_shear2d(s, p):
x = s['x']
y = s['y']
zf = s['fence_height']
ustarx = s['ustars']
ustary = s['ustarn']
#dx = p['dx']/10
#dy = p['dx']/10
dx = np.maximum(p['dx']/2, 0.25)
dy = dx
udir = s['udir'][0, 0]
if udir < 0:
udir = udir + 360
udir = udir - np.floor(udir / 360) * 360
if udir == 0 or udir == 360 or udir == -360 or udir == -180 or udir == 180:
udir += 0.00001
igrid, cgrid, x0, y0 = initialize_computational_grid(x, y, zf, ustarx, ustary, dx, dy, buffer_width=100)
igrid, cgrid = calc_fence_shear(igrid, cgrid, udir, x0, y0, p)
s['ustars'] = igrid['ustarx']
s['ustarn'] = igrid['ustary']
s['ustar'] = np.sqrt(s['ustars']**2 + s['ustarn']**2)
return s
def initialize_computational_grid(x, y, z, ustarx, ustary, dx, dy, buffer_width=100., buffer_relaxation=None):
if buffer_relaxation is None:
buffer_relaxation = buffer_width / 4.
mult_all = np.ones(x.shape)
igrid = dict(x=x,
y=y,
z=z,
ustarx=ustarx,
ustary=ustary,
mult_all=mult_all)
cgrid = dict(dx=dx,
dy=dy)
x0, y0, cgrid = set_computational_grid(igrid, cgrid, buffer_width)
return igrid, cgrid, x0, y0
def set_computational_grid(igrid, cgrid, buffer_width):
'''Define computational grid
The computational grid is square with dimensions equal to the
diagonal of the bounding box of the input grid, plus twice the
buffer width.
'''
gi = igrid
gc = cgrid
# grid center
x0, y0 = np.mean(gi['x']), np.mean(gi['y'])
# grid size
D = np.sqrt((gi['x'].max() - gi['x'].min()) ** 2 +
(gi['y'].max() - gi['y'].min()) ** 2) + 2 * buffer_width
# determine equidistant, square grid
xc, yc = get_exact_grid(x0 - D / 2., x0 + D / 2.,
y0 - D / 2., y0 + D / 2.,
gc['dx'], gc['dy'])
gc['xi'] = xc
gc['yi'] = yc
return x0, y0, gc
def calc_fence_shear(igrid, cgrid, udir, x0, y0, p):
'''Compute wind shear for given wind speed and direction
Parameters
----------
u0 : float
Free-flow wind speed
udir : float
Wind direction in degrees
process_separattion :
'''
gc = cgrid # computational grid
gi = igrid # initial grid
# Populate computational grid (rotate to wind direction + interpolate input topography)
populate_computational_grid(igrid, cgrid, udir + 90., x0, y0)
# Compute wind shear stresses on computational grid
gc = compute_fenceshear(gi, gc, udir, p)
ustarx_init = gc['ustarx'][0,0]
gc['ustarx'] = gc['ustarx'] * gc['mult_all']
gc['ustary'] = np.zeros(gc['x'].shape)
#ensure bad data doesnt make it through
#ix = gc['mindist'] > 20
#gc['ustarx'][ix] = ustarx_init
gc['ustarx'], gc['ustary'] = rotate(gc['ustarx'], gc['ustary'], udir + 90)
# Rotate both (i&c) grids + results in opposite dir.
gi['x'], gi['y'] = rotate(gi['x'], gi['y'], -(udir + 90.), origin=(x0, y0))
gc['x'], gc['y'] = rotate(gc['x'], gc['y'], -(udir + 90.), origin=(x0, y0))
gc['ustary'], gc['ustarx'] = rotate(gc['ustarx'], gc['ustary'], -(udir + 90))
# Interpolate wind shear results to real grid
gi['ustarx'] = interpolate(gc['x'], gc['y'], gc['ustarx'],
gi['x'], gi['y'])
gi['ustary'] = interpolate(gc['x'], gc['y'], gc['ustary'],
gi['x'], gi['y'])
# Rotate real grid and wind shear results back to orignal orientation
gc['x'], gc['y'] = rotate(gc['x'], gc['y'], udir + 90., origin=(x0, y0))
gi['x'], gi['y'] = rotate(gi['x'], gi['y'], +(udir + 90.), origin=(x0, y0))
gi['ustarx'], gi['ustary'] = rotate(gi['ustarx'], gi['ustary'], +(udir + 90))
#avoid any boundary effects
#gi['ustarx'][1,:] = gi['ustarx'][2,:]
#gi['ustarx'][:,1] = gi['ustarx'][:,2]
#gi['ustarx'][-2,:] = gi['ustarx'][-3,:]
#gi['ustarx'][:,-2] = gi['ustarx'][:,-3]
#gi['ustary'][1,:] = gi['ustary'][1,:]
#gi['ustary'][:,1] = gi['ustary'][:,1]
#gi['ustary'][-2,:] = gi['ustary'][-2,:]
#gi['ustary'][:,-2] = gi['ustary'][:,-2]
gi['ustarx'][0,:] = gi['ustarx'][1,:]
gi['ustarx'][:,0] = gi['ustarx'][:,1]
gi['ustarx'][-1,:] = gi['ustarx'][-2,:]
gi['ustarx'][:,-1] = gi['ustarx'][:,-2]
gi['ustary'][0,:] = gi['ustary'][1,:]
gi['ustary'][:,0] = gi['ustary'][:,1]
gi['ustary'][-1,:] = gi['ustary'][-2,:]
gi['ustary'][:,-1] = gi['ustary'][:,-2]
return gi, gc
# Input functions for __call()
def populate_computational_grid(igrid, cgrid, alpha, x0, y0):
'''Interpolate input topography to computational grid
Adds and fills buffer zone around the initial grid and
rotates the computational grid to current wind direction.
The computational grid is filled by interpolating the input
topography and initial wind induced shear stresses to it.
Parameters
----------
alpha : float
Rotation angle in degrees
'''
gi = igrid
gc = cgrid
x = gi['x']
shp = x.shape
try:
ny = shp[2]
except:
ny = 0
# Add buffer zone around grid # buffer is based on version bart, sigmoid function is no longer required
if ny <= 0:
dxi = gi['x'][0, 0]
dyi = gi['y'][0, 0]
else:
dxi = gi['x'][1, 1] - gi['x'][0, 0]
dyi = gi['y'][1, 1] - gi['y'][0, 0]
buf = 100 # amount of cells
xi, yi = np.meshgrid(
np.linspace(gi['x'][0, 0] - buf * dxi, gi['x'][-1, -1] + buf * dxi, gi['x'].shape[1] + 2 * buf),
np.linspace(gi['y'][0, 0] - buf * dyi, gi['y'][-1, -1] + buf * dyi, gi['y'].shape[0] + 2 * buf))
zi = np.zeros((xi.shape))
zi[buf:-buf, buf:-buf] = gi['z']
# Filling buffer zone edges
zi[buf:-buf, :buf] = np.repeat(zi[buf:-buf, buf + 1][:, np.newaxis], buf, axis=1)
zi[buf:-buf, -buf:] = np.repeat(zi[buf:-buf, -buf - 1][:, np.newaxis], buf, axis=1)
zi[:buf, buf:-buf] = np.repeat(zi[buf + 1, buf:-buf][np.newaxis], buf, axis=0)
zi[-buf:, buf:-buf] = np.repeat(zi[-buf - 1, buf:-buf][np.newaxis], buf, axis=0)
# Filling buffer zone corners
zi[:buf, :buf] = zi[buf + 1, buf + 1]
zi[-buf:, :buf] = zi[-buf - 1, buf + 1]
zi[:buf, -buf:] = zi[buf + 1, -buf - 1]
zi[-buf:, -buf:] = zi[-buf - 1, -buf - 1]
# Rotate computational grid to the current wind direction
xc, yc = rotate(gc['xi'], gc['yi'], alpha, origin=(x0, y0))
# Interpolate input topography to computational grid
zfc = interpolate(gi['x'], gi['y'], gi['z'], xc, yc)
ustarxc = interpolate(gi['x'], gi['y'], gi['ustarx'], xc, yc)
ustaryc = interpolate(gi['x'], gi['y'], gi['ustary'], xc, yc)
# Interpolate input wind - shear
# tauxc = interpolate(gi['x'], gi['y'], gi['taux'], xc, yc)
# tauyc = interpolate(gi['x'], gi['y'], gi['tauy'], xc, yc)
gc['x'] = xc
gc['y'] = yc
gc['z'] = zfc
# gc['taux'] = tauxc
# gc['tauy'] = tauyc
gc['zfc'] = zfc
gc['ustarx'] = ustarxc
gc['ustary'] = ustaryc
return gc
def compute_fenceshear(igrid, cgrid, udir, p):
'''Compute wind shear perturbation for given free-flow wind
speed on computational grid
Parameters
----------
u0 : float
Free-flow wind speed
nfilter : 2-tuple
Wavenumber range used for logistic sigmoid filter. See
:func:`filter_highfrequencies`
'''
gc = cgrid
zf = gc['z']
ny, nx = gc['z'].shape
mult_all = np.ones(zf.shape)
#mindist = np.ones(zf.shape) * 1000
for iy in range(ny):
# intialize other grid parameters
x = gc['x'][iy, :]
zp = gc['z'][iy, :]
red_all = np.zeros(x.shape)
nx2 = x.size
c1 = p['okin_c1_fence']
intercept = p['okin_initialred_fence']
for igrid in range(nx2):
# only look at cells with a roughness element
if zp[igrid] > 0:
# print(zp[igrid])
# local parameters
if udir >= 180 and udir <= 360:
xrel = x - x[igrid]
else:
xrel = -(x - x[igrid])
red = np.zeros(x.shape)
mult = np.ones(x.shape)
h = zp[igrid]
for igrid2 in range(nx2):
if xrel[igrid2] >= 0 and xrel[igrid2] / h < 20:
# apply okin model
# apply okin model
mult[igrid2] = intercept + (1 - intercept) * (1 - math.exp(-xrel[igrid2] * c1 / h))
#ifind = xrel > 0
#if np.size(ifind) > 0:
# mindist[iy, igrid2] = np.minimum(np.min(xrel[ifind]), mindist[iy, igrid2])
red = 1 - mult
# fix potential issues for summation
ix = red < 0.00001
red[ix] = 0
ix = red > 1
red[ix] = 1
ix = xrel < 0
red[ix] = 0
# combine all reductions between plants
red_all = red_all + red
# cant have more than 100% reduction
ix = red_all > 1
red_all[ix] = 1
# convert to a multiple
mult_all[iy, :] = 1 - red_all
#mult_all[iy, igrid2] = mult_all[iy, igrid2] - mult_temp
#avoid any boundary effects
mult_all[0,:] = 1
mult_all[:,0] = 1
mult_all[-1,:] = 1
mult_all[:,-1] = 1
cgrid['mult_all'] = mult_all
#cgrid['mindist'] = mindist
return cgrid
def get_exact_grid(xmin, xmax, ymin, ymax, dx, dy):
'''Returns a grid with given gridsizes approximately within given bounding box'''
x = np.arange(np.floor(xmin / dx) * dx,
np.ceil(xmax / dx) * dx, dx)
y = np.arange(np.floor(ymin / dy) * dy,
np.ceil(ymax / dy) * dy, dy)
x, y = np.meshgrid(x, y)
return x, y
def rotate(x, y, alpha, origin=(0, 0)):
'''Rotate a matrix over given angle around given origin'''
xr = x - origin[0]
yr = y - origin[1]
a = alpha / 180. * np.pi
R = np.asmatrix([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
xy = np.concatenate((xr.reshape((-1, 1)),
yr.reshape((-1, 1))), axis=1) * R
return (np.asarray(xy[:, 0].reshape(x.shape) + origin[0]),
np.asarray(xy[:, 1].reshape(y.shape) + origin[1]))
def interpolate(x, y, z, xi, yi):
'''Interpolate one grid to an other'''
xy = np.concatenate((y.reshape((-1, 1)),
x.reshape((-1, 1))), axis=1)
xyi = np.concatenate((yi.reshape((-1, 1)),
xi.reshape((-1, 1))), axis=1)
# version Bart
inter = scipy.interpolate.RegularGridInterpolator((y[:, 0], x[0, :]), z, bounds_error=False, fill_value=0.)
zi = inter(xyi).reshape(xi.shape)
return zi
def fence_shear1d(s, p):
ustar = s['ustar'].copy()
ustars = s['ustars'].copy()
ustarn = s['ustarn'].copy()
ets = np.zeros(s['zb'].shape)
etn = np.zeros(s['zb'].shape)
ix = ustar != 0
ets[ix] = ustars[ix] / ustar[ix]
etn[ix] = ustarn[ix] / ustar[ix]
udir = s['udir'][0,0]+180
x = s['x'][0,:]
zp = s['fence_height'][0,:]
red = np.zeros(x.shape)
red_all = np.zeros(x.shape)
nx = x.size
c1 = p['okin_c1_fence']
intercept = p['okin_initialred_fence']
if udir < 360:
udir = udir + 360
if udir > 360:
udir = udir - 360
#Calculate shear reduction by looking through all cells that have plants present and looking downwind of those features
for igrid in range(nx):
if zp[igrid] > 0: # only look at cells with a roughness element
mult = np.ones(x.shape)
h = zp[igrid] #vegetation height at the appropriate cell
if udir >= 180 and udir <= 360:
xrel = -(x - x[igrid])
else:
xrel = x - x[igrid]
for igrid2 in range(nx):
if xrel[igrid2] >= 0 and xrel[igrid2]/h < 20:
# apply okin model
mult[igrid2] = intercept + (1 - intercept) * (1 - math.exp(-xrel[igrid2] * c1 / h))
red = 1 - mult
# fix potential issues for summation
ix = red < 0.00001
red[ix] = 0
ix = red > 1
red[ix] = 1
ix = xrel < 0
red[ix] = 0
# combine all reductions between plants
red_all = red_all + red
# cant have more than 100% reduction
ix = red_all > 1
red_all[ix] = 1
# convert to a multiple
mult_all = 1 - red_all
ustarfence = s['ustar'][0,:] * mult_all
ix = ustarfence < 0.01
ustarfence[ix] = 0.01 #some small number so transport code doesnt crash
s['ustar'][0,:] = ustarfence
s['ustars'][0,:] = s['ustar'][0,:] * ets[0,:]
s['ustarn'][0,:] = s['ustar'][0,:] * etn[0,:]
return s | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/fences.py | fences.py |
from __future__ import absolute_import, division
import logging
from scipy import ndimage, misc
from scipy.stats import norm, mode
import numpy as np
import math
#import matplotlib.pyplot as plt
# package modules
import aeolis.wind
from aeolis.utils import *
# from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def run_ph12(s, p, t):
''' Calculates bed level change due to dune erosion
Calculates bed level change due to dune erosion accoording to Palmsten and Holman (2012).
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
t : float
Model time
Returns
-------
dict
Spatial grids
'''
Ho = np.interp(t, p['wave_file'][:, 0], p['wave_file'][:, 1])
Tp = np.interp(t, p['wave_file'][:, 0], p['wave_file'][:, 2])
wl = np.interp(t, p['tide_file'][:, 0], p['tide_file'][:, 1])
zToe = p['dune_toe_elevation']
beta = p['beach_slope']
dt = p['dt_opt']
# wave runup calcs
Kd = 1.26 # Coefficient to account for higher runup on dune
ny = p['ny']
wl = interp_circular(t, p['tide_file'][:, 0], p['tide_file'][:, 1])
Tp = interp_circular(t, p['wave_file'][:, 0], p['wave_file'][:, 2])
for iy in range(ny + 1):
twl = s['R'][iy][0] * Kd + wl
if twl > zToe:
x = s['x'][iy, :]
zb = s['zb'][iy, :]
eta = s['eta'][iy][0]
R = s['R'][iy][0]
sigma_s = s['sigma_s'][iy][0]
# parameter set up
dx = np.abs(x[1] - x[0])
Bt = beta * 0.54 # dune toe slope trajectory
Cs = p['Cs']
dVResidual_prev = 0 # add this parameter in to be consistent with other codes
# find dune base location
st0 = np.nanargmin(np.abs(zb - zToe))
xToe = x[st0]
# dune toe trajectory
zbT = np.ones(len(x)) * np.nan
zbT[st0:] = Bt * (x[st0:] - x[st0]) + zToe
# correct toe trajectory that exceeds actual bed elevation
ix = zbT > zb
zbT[ix] = zb[ix]
# initial volume calcs
Vc = np.cumsum(dx * (zb[st0:] - zbT[st0:]))
Vc = Vc - Vc[0]
# collision calcs
p_collision = 1 - norm.cdf(zToe, eta + wl, sigma_s)
Nc = p_collision * dt / Tp
# volume change calcs
dV = 4 * Cs * (np.max(twl - zToe, 0)) ** 2 * Nc
dVT = dV - dVResidual_prev
if dVT < 0:
ds = 0
else:
ds = np.nanargmin(np.abs(Vc - dVT))
st = st0 + ds
# x_increment = x[st] - xToe
dVResidual = Vc[ds] - dVT
# lets add this residual back to the dune toe so have mass conservation
dz = -dVResidual / dx
numcells = np.size(np.arange(st0, st))
# update morphology
zb_new = zb
zb_new[st0:st] = zbT[st0:st]
#approach to redistribute residual sediment to the lower portion of the dune. needs to be tested
#if numcells <= 1:
# zb_new[st] = zb_new[st] + dz
#elif numcells < 3:
# zb_new[st - 1] = zb_new[st - 1] + dz
#else:
# zb_new[(st0 + 1):st] = zb_new[(st0 + 1):st] + dz / [numcells - 1]
s['zb'][iy,:] = zb_new
#s['dVT'] = dVT
return s | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/erosion.py | erosion.py |
from __future__ import absolute_import, division
import os
import imp
import sys
import time
import glob
import logging
import warnings
import operator
import numpy as np
import scipy.sparse
import pickle
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from datetime import timedelta
from bmi.api import IBmi
from functools import reduce
# package modules
import aeolis.inout
import aeolis.bed
import aeolis.avalanching
import aeolis.wind
import aeolis.threshold
import aeolis.transport
import aeolis.hydro
import aeolis.netcdf
import aeolis.constants
import aeolis.erosion
import aeolis.vegetation
import aeolis.fences
import aeolis.gridparams
from aeolis.utils import *
class StreamFormatter(logging.Formatter):
def format(self, record):
if record.levelname == 'INFO':
return record.getMessage()
else:
return '%s: %s' % (record.levelname, record.getMessage())
# initialize logger
logger = logging.getLogger(__name__)
__version__ = ''
__root__ = os.path.dirname(__file__)
try:
__version__ = open(os.path.join(__root__, 'VERSION')).read().strip()
except:
logger.warning('WARNING: Unknown model version.')
class ModelState(dict):
'''Dictionary-like object to store model state
Model state variables are mutable by default, but can be set
immutable. In the latter case any actions that set the immutable
model state variable are ignored.
'''
def __init__(self, *args, **kwargs):
self.ismutable = set()
super(ModelState, self).__init__(*args, **kwargs)
def __setitem__(self, k, v):
if k not in self.keys() or k in self.ismutable:
super(ModelState, self).__setitem__(k, v)
self.set_mutable(k)
def set_mutable(self, k):
self.ismutable.add(k)
def set_immutable(self, k):
if k in self.ismutable:
self.ismutable.remove(k)
class AeoLiS(IBmi):
'''AeoLiS model class
AeoLiS is a process-based model for simulating supply-limited
aeolian sediment transport. This model class is compatible with
the Basic Model Interface (BMI) and provides basic model
operations, like initialization, time stepping, finalization and
data exchange. For higher level operations, like a progress
indicator and netCDF4 output is refered to the AeoLiS model
runner class, see :class:`~model.AeoLiSRunner`.
Examples
--------
>>> with AeoLiS(configfile='aeolis.txt') as model:
>>> while model.get_current_time() <= model.get_end_time():
>>> model.update()
>>> model = AeoLiS(configfile='aeolis.txt')
>>> model.initialize()
>>> zb = model.get_var('zb')
>>> model.set_var('zb', zb + 1)
>>> for i in range(10):
>>> model.update(60.) # step 60 seconds forward
>>> model.finalize()
'''
def __init__(self, configfile):
'''Initialize class
Parameters
----------
configfile : str
Model configuration file. See :func:`~inout.read_configfile()`.
'''
self.t = 0.
self.dt = 0.
self.configfile = ''
self.l = {} # previous spatial grids
self.s = ModelState() # spatial grids
self.p = {} # parameters
self.c = {} # counters
self.configfile = configfile
def __enter__(self):
self.initialize()
return self
def __exit__(self, *args):
self.finalize()
def initialize(self):
'''Initialize model
Read model configuration file and initialize parameters and
spatial grids dictionary and load bathymetry and bed
composition.
'''
# read configuration file
self.p = aeolis.inout.read_configfile(self.configfile)
aeolis.inout.check_configuration(self.p)
# set nx, ny and nfractions
if self.p['xgrid_file'].ndim == 2:
self.p['ny'], self.p['nx'] = self.p['xgrid_file'].shape
# change from number of points to number of cells
self.p['nx'] -= 1
self.p['ny'] -= 1
else:
self.p['nx'] = len(self.p['xgrid_file'])
self.p['nx'] -= 1
self.p['ny'] = 0
#self.p['nfractions'] = len(self.p['grain_dist'])
self.p['nfractions'] = len(self.p['grain_size'])
# initialize time
self.t = self.p['tstart']
# get model dimensions
nx = self.p['nx']
ny = self.p['ny']
nl = self.p['nlayers']
nf = self.p['nfractions']
# initialize spatial grids
for var, dims in self.dimensions().items():
self.s[var] = np.zeros(self._dims2shape(dims))
self.l[var] = self.s[var].copy()
# initialize grid parameters
self.s, self.p = aeolis.gridparams.initialize(self.s, self.p)
# initialize bed composition
self.s = aeolis.bed.initialize(self.s, self.p)
# initialize wind model
self.s = aeolis.wind.initialize(self.s, self.p)
#initialize vegetation model
self.s = aeolis.vegetation.initialize(self.s, self.p)
#initialize fence model
self.s = aeolis.fences.initialize(self.s, self.p)
# Create interpretation information
if self.p['visualization']:
aeolis.inout.visualize_grid(self.s, self.p)
aeolis.inout.visualize_timeseries(self.p, self.t)
def update(self, dt=-1):
'''Time stepping function
Takes a single step in time. Interpolates wind and
hydrodynamic time series to the current time, updates the soil
moisture, mixes the bed due to wave action, computes wind
velocity threshold and the equilibrium sediment transport
concentration. Subsequently runs one of the available
numerical schemes to compute the instantaneous sediment
concentration and pickup for the next time step and updates
the bed accordingly.
For explicit schemes the time step is maximized by the
Courant-Friedrichs-Lewy (CFL) condition. See
:func:`~model.AeoLiS.set_timestep()`.
Parameters
----------
dt : float, optional
Time step in seconds. The time step specified in the model
configuration file is used in case dt is smaller than
zero. For explicit numerical schemes the time step is
maximized by the CFL confition.
'''
self.p['_time'] = self.t
# store previous state
self.l = self.s.copy()
self.l['zb'] = self.s['zb'].copy()
self.l['dzbavg'] = self.s['dzbavg'].copy()
# interpolate wind time series
self.s = aeolis.wind.interpolate(self.s, self.p, self.t)
# Rotate gridparams, such that the grids is alligned horizontally
self.s = self.grid_rotate(self.p['alpha'])
if np.sum(self.s['uw']) != 0:
self.s = aeolis.wind.shear(self.s, self.p)
#compute sand fence shear
if self.p['process_fences']:
self.s = aeolis.fences.update_fences(self.s, self.p)
# compute vegetation shear
if self.p['process_vegetation']:
self.s = aeolis.vegetation.vegshear(self.s, self.p)
# determine optimal time step
self.dt_prev = self.dt
if not self.set_timestep(dt):
return
# interpolate hydrodynamic time series
self.s = aeolis.hydro.interpolate(self.s, self.p, self.t)
self.s = aeolis.hydro.update(self.s, self.p, self.dt, self.t)
# mix top layer
self.s = aeolis.bed.mixtoplayer(self.s, self.p)
# compute threshold
if np.sum(self.s['uw']) != 0:
self.s = aeolis.threshold.compute(self.s, self.p)
# compute saltation velocity and equilibrium transport
self.s = aeolis.transport.equilibrium(self.s, self.p)
# compute instantaneous transport
if self.p['scheme'] == 'euler_forward':
self.s.update(self.euler_forward())
elif self.p['scheme'] == 'euler_backward':
self.s.update(self.euler_backward())
elif self.p['scheme'] == 'crank_nicolson':
self.s.update(self.crank_nicolson())
else:
logger.log_and_raise('Unknown scheme [%s]' % self.p['scheme'], exc=ValueError)
# update bed
self.s = aeolis.bed.update(self.s, self.p)
# avalanching
self.s = aeolis.avalanching.angele_of_repose(self.s, self.p)
self.s = aeolis.avalanching.avalanche(self.s, self.p)
# reset original bed in marine zone (wet)
self.s = aeolis.bed.wet_bed_reset(self.s, self.p)
# calculate average bed level change over time
self.s = aeolis.bed.average_change(self.l, self.s, self.p)
# compute dune erosion
if self.p['process_dune_erosion']:
self.s = aeolis.erosion.run_ph12(self.s, self.p, self.t)
self.s = aeolis.avalanching.angele_of_repose(self.s, self.p) #Since the aeolian module is only run for winds above threshold, also run avalanching routine here
self.s = aeolis.avalanching.avalanche(self.s, self.p)
# grow vegetation
if self.p['process_vegetation']:
self.s = aeolis.vegetation.germinate(self.s, self.p)
self.s = aeolis.vegetation.grow(self.s, self.p)
# increment time
self.t += self.dt * self.p['accfac']
self._count('time')
# Rotate gridparams back to original grid orientation
self.s = self.grid_rotate(-self.p['alpha'])
# Visualization of the model results after the first time step as a check for interpretation
if self.c['time'] == 1 and self.p['visualization']:
aeolis.inout.visualize_spatial(self.s, self.p)
def finalize(self):
'''Finalize model'''
pass
def get_current_time(self):
'''
Returns
-------
float
Current simulation time
'''
return self.t
def get_end_time(self):
'''
Returns
-------
float
Final simulation time
'''
return self.p['tstop']
def get_start_time(self):
'''
Returns
-------
float
Initial simulation time
'''
return self.p['tstart']
def get_var(self, var):
'''Returns spatial grid or model configuration parameter
If the given variable name matches with a spatial grid, the
spatial grid is returned. If not, the given variable name is
matched with a model configuration parameter. If a match is
found, the parameter value is returned. Otherwise, nothing is
returned.
Parameters
----------
var : str
Name of spatial grid or model configuration parameter
Returns
-------
np.ndarray or int, float, str or list
Spatial grid or model configuration parameter
Examples
--------
>>> # returns bathymetry grid
... model.get_var('zb')
>>> # returns simulation duration
... model.get_var('tstop')
See Also
--------
model.AeoLiS.set_var
'''
if var in self.s:
if var in ['Ct', 'Cu']:
return self.s[var] / self.p['accfac']
else:
return self.s[var]
elif var in self.p:
return self.p[var]
else:
return None
def get_var_count(self):
'''
Returns
-------
int
Number of spatial grids
'''
return len(self.s)
def get_var_name(self, i):
'''Returns name of spatial grid by index (in alphabetical order)
Parameters
----------
i : int
Index of spatial grid
Returns
-------
str or -1
Name of spatial grid or -1 in case index exceeds the number of grids
'''
if len(self.s) > i:
return sorted(self.s.keys())[i]
else:
return -1
def get_var_rank(self, var):
'''Returns rank of spatial grid
Parameters
----------
var : str
Name of spatial grid
Returns
-------
int
Rank of spatial grid or -1 if not found
'''
if var in self.s:
return len(self.s[var].shape)
else:
return -1
def get_var_shape(self, var):
'''Returns shape of spatial grid
Parameters
----------
var : str
Name of spatial grid
Returns
-------
tuple or int
Dimensions of spatial grid or -1 if not found
'''
if var in self.s:
return self.s[var].shape
else:
return -1
def get_var_type(self, var):
'''Returns variable type of spatial grid
Parameters
----------
var : str
Name of spatial grid
Returns
-------
str or int
Variable type of spatial grid or -1 if not found
'''
if var in self.s:
return 'double'
else:
return -1
def inq_compound(self):
logger.log_and_raise('Method not yet implemented [inq_compound]', exc=NotImplementedError)
def inq_compound_field(self):
logger.log_and_raise('Method not yet implemented [inq_compound_field]', exc=NotImplementedError)
def set_var(self, var, val):
'''Sets spatial grid or model configuration parameter
If the given variable name matches with a spatial grid, the
spatial grid is set. If not, the given variable name is
matched with a model configuration parameter. If a match is
found, the parameter value is set. Otherwise, nothing is set.
Parameters
----------
var : str
Name of spatial grid or model configuration parameter
val : np.ndarray or int, float, str or list
Spatial grid or model configuration parameter
Examples
--------
>>> # set bathymetry grid
... model.set_var('zb', np.array([[0.,0., ... ,0.]]))
>>> # set simulation duration
... model.set_var('tstop', 3600.)
See Also
--------
model.AeoLiS.get_var
'''
if var in self.s:
self.s[var] = val
elif var in self.p:
self.p[var] = val
def set_var_index(self, i, val):
'''Set spatial grid by index (in alphabetical order)
Parameters
----------
i : int
Index of spatial grid
val : np.ndarray
Spatial grid
'''
var = self.get_var_name(i)
self.set_var(var, val)
def set_var_slice(self):
logger.log_and_raise('Method not yet implemented [set_var_slice]', exc=NotImplementedError)
def set_timestep(self, dt=-1.):
'''Determine optimal time step
If no time step is given the optimal time step is
determined. For explicit numerical schemes the time step is
based in the Courant-Frierichs-Lewy (CFL) condition. For
implicit numerical schemes the time step specified in the
model configuration file is used. Alternatively, a preferred
time step is given that is maximized by the CFL condition in
case of an explicit numerical scheme.
Returns True except when:
1. No time step could be determined, for example when there is
no wind and the numerical scheme is explicit. In this case the
time step is set arbitrarily to one second.
2. Or when the time step is smaller than -1. In this case the
time is updated with the absolute value of the time step, but
no model execution is performed. This funcionality can be used
to skip fast-forward in time.
Parameters
----------
df : float, optional
Preferred time step
Returns
-------
bool
False if determination of time step was unsuccessful, True otherwise
'''
if dt > 0.:
self.dt = dt
elif dt < -1:
self.dt = dt
self.t += np.abs(dt)
return False
else:
self.dt = self.p['dt']
if self.p['scheme'] == 'euler_forward':
if self.p['CFL'] > 0.:
dtref = np.max(np.abs(self.s['uws']) / self.s['ds']) + \
np.max(np.abs(self.s['uwn']) / self.s['dn'])
if dtref > 0.:
self.dt = np.minimum(self.dt, self.p['CFL'] / dtref)
else:
self.dt = np.minimum(self.dt, 1.)
return False
if self.p['max_bedlevel_change'] != 999. and np.max(self.s['dzb']) != 0. and self.dt_prev != 0.:
dt_zb = self.dt_prev * self.p['max_bedlevel_change'] / np.max(self.s['dzb'])
self.dt = np.minimum(self.dt, dt_zb)
self.p['dt_opt'] = self.dt
return True
def grid_rotate(self, angle):
s = self.s
p = self.p
s['x'], s['y'] = rotate(s['x'], s['y'], angle, origin=(np.mean(s['x']), np.mean(s['y'])))
s['taus'], s['taun'] = rotate(s['taus'], s['taun'], angle, origin=(0, 0))
s['taus0'], s['taun0'] = rotate(s['taus0'], s['taun0'], angle, origin=(0, 0))
s['ustars'], s['ustarn'] = rotate(s['ustars'], s['ustarn'], angle, origin=(0, 0))
s['ustars0'], s['ustarn0'] = rotate(s['ustars0'], s['ustarn0'], angle, origin=(0, 0))
s['uws'], s['uwn'] = rotate(s['uws'], s['uwn'], angle, origin=(0, 0))
for i in range(p['nfractions']):
s['qs'][:,:,i], s['qn'][:,:,i] = rotate(s['qs'][:,:,i], s['qn'][:,:,i], angle, origin=(0, 0))
self.s['udir'] += angle
return s
def euler_forward(self):
'''Convenience function for explicit solver based on Euler forward scheme
See Also
--------
model.AeoLiS.solve
'''
if self.p['solver'].lower() == 'trunk':
solve = self.solve(alpha=0., beta=1)
elif self.p['solver'].lower() == 'pieter':
solve = self.solve_pieter(alpha=0., beta=1)
elif self.p['solver'].lower() == 'steadystate':
solve = self.solve_steadystate()
elif self.p['solver'].lower() == 'steadystatepieter':
solve = self.solve_steadystatepieter()
return solve
def euler_backward(self):
'''Convenience function for implicit solver based on Euler backward scheme
See Also
--------
model.AeoLiS.solve
'''
if self.p['solver'].lower() == 'trunk':
solve = self.solve(alpha=1., beta=1)
elif self.p['solver'].lower() == 'pieter':
solve = self.solve_pieter(alpha=1., beta=1)
elif self.p['solver'].lower() == 'steadystate':
solve = self.solve_steadystate()
elif self.p['solver'].lower() == 'steadystatepieter':
solve = self.solve_steadystatepieter()
return solve
def crank_nicolson(self):
'''Convenience function for semi-implicit solver based on Crank-Nicolson scheme
See Also
--------
model.AeoLiS.solve
'''
if self.p['solver'].lower() == 'trunk':
solve = self.solve(alpha=.5, beta=1)
elif self.p['solver'].lower() == 'pieter':
solve = self.solve_pieter(alpha=.5, beta=1)
elif self.p['solver'].lower() == 'steadystate':
solve = self.solve_steadystate()
elif self.p['solver'].lower() == 'steadystatepieter':
solve = self.solve_steadystatepieter()
return solve
def solve_steadystate(self):
'''Implements the steady state solution
'''
# upwind scheme:
beta = 1.
l = self.l
s = self.s
p = self.p
Ct = s['Ct'].copy()
pickup = s['pickup'].copy()
# compute transport weights for all sediment fractions
w_init, w_air, w_bed = aeolis.transport.compute_weights(s, p)
if self.t == 0.:
# use initial guess for first time step
if p['grain_dist'] != None:
w = p['grain_dist'].reshape((1,1,-1))
w = w.repeat(p['ny']+1, axis=0)
w = w.repeat(p['nx']+1, axis=1)
else:
w = w_init.copy()
else:
w = w_init.copy()
# set model state properties that are added to warnings and errors
logprops = dict(minwind=s['uw'].min(),
maxdrop=(l['uw']-s['uw']).max(),
time=self.t,
dt=self.dt)
nf = p['nfractions']
us = np.zeros((p['ny']+1,p['nx']+1))
un = np.zeros((p['ny']+1,p['nx']+1))
us_plus = np.zeros((p['ny']+1,p['nx']+1))
un_plus = np.zeros((p['ny']+1,p['nx']+1))
us_min = np.zeros((p['ny']+1,p['nx']+1))
un_min = np.zeros((p['ny']+1,p['nx']+1))
Cs = np.zeros(us.shape)
Cn = np.zeros(un.shape)
Cs_plus = np.zeros(us.shape)
Cn_plus = np.zeros(un.shape)
Cs_min = np.zeros(us.shape)
Cn_min = np.zeros(un.shape)
for i in range(nf):
us[:,:] = s['us'][:,:,i]
un[:,:] = s['un'][:,:,i]
us_plus[:,1:] = s['us'][:,:-1,i]
un_plus[1:,:] = s['un'][:-1,:,i]
us_min[:,:-1] = s['us'][:,1:,i]
un_min[:-1,:] = s['un'][1:,:,i]
#boundary values
us[:,0] = s['us'][:,0,i]
un[0,:] = s['un'][0,:,i]
us_plus[:,0] = s['us'][:,0,i]
un_plus[0,:] = s['un'][0,:,i]
us_min[:,-1] = s['us'][:,-1,i]
un_min[-1,:] = s['un'][-1,:,i]
# define matrix coefficients to solve linear system of equations
Cs = s['dn'] * s['dsdni'] * us[:,:]
Cn = s['ds'] * s['dsdni'] * un[:,:]
Cs_plus = s['dn'] * s['dsdni'] * us_plus[:,:]
Cn_plus = s['ds'] * s['dsdni'] * un_plus[:,:]
Cs_min = s['dn'] * s['dsdni'] * us_min[:,:]
Cn_min = s['ds'] * s['dsdni'] * un_min[:,:]
Ti = 1 / p['T']
beta = abs(beta)
if beta >= 1.:
# define upwind direction
ixs = np.asarray(us[:,:] >= 0., dtype=float)
ixn = np.asarray(un[:,:] >= 0., dtype=float)
sgs = 2. * ixs - 1.
sgn = 2. * ixn - 1.
else:
# or centralizing weights
ixs = beta + np.zeros(us)
ixn = beta + np.zeros(un)
sgs = np.zeros(us)
sgn = np.zeros(un)
# initialize matrix diagonals
A0 = np.zeros(s['zb'].shape)
Apx = np.zeros(s['zb'].shape)
Ap1 = np.zeros(s['zb'].shape)
Ap2 = np.zeros(s['zb'].shape)
Amx = np.zeros(s['zb'].shape)
Am1 = np.zeros(s['zb'].shape)
Am2 = np.zeros(s['zb'].shape)
# populate matrix diagonals
A0 = sgs * Cs + sgn * Cn + Ti
Apx = Cn_min * (1. - ixn)
Ap1 = Cs_min * (1. - ixs)
Amx = -Cn_plus * ixn
Am1 = -Cs_plus * ixs
# add boundaries
A0[:,0] = 1.
Apx[:,0] = 0.
Amx[:,0] = 0.
Am2[:,0] = 0.
Am1[:,0] = 0.
A0[:,-1] = 1.
Apx[:,-1] = 0.
Ap1[:,-1] = 0.
Ap2[:,-1] = 0.
Amx[:,-1] = 0.
if p['boundary_offshore'] == 'flux':
Ap2[:,0] = 0.
Ap1[:,0] = 0.
elif p['boundary_offshore'] == 'constant':
Ap2[:,0] = 0.
Ap1[:,0] = 0.
elif p['boundary_offshore'] == 'uniform':
Ap2[:,0] = 0.
Ap1[:,0] = -1.
elif p['boundary_offshore'] == 'gradient':
Ap2[:,0] = s['ds'][:,1] / s['ds'][:,2]
Ap1[:,0] = -1. - s['ds'][:,1] / s['ds'][:,2]
elif p['boundary_offshore'] == 'circular':
logger.log_and_raise('Cross-shore cricular boundary condition not yet implemented', exc=NotImplementedError)
else:
logger.log_and_raise('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'], exc=ValueError)
if p['boundary_onshore'] == 'flux':
Am2[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'constant':
Am2[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'uniform':
Am2[:,-1] = 0.
Am1[:,-1] = -1.
elif p['boundary_onshore'] == 'gradient':
Am2[:,-1] = s['ds'][:,-2] / s['ds'][:,-3]
Am1[:,-1] = -1. - s['ds'][:,-2] / s['ds'][:,-3]
elif p['boundary_offshore'] == 'circular':
logger.log_and_raise('Cross-shore cricular boundary condition not yet implemented', exc=NotImplementedError)
else:
logger.log_and_raise('Unknown onshore boundary condition [%s]' % self.p['boundary_onshore'], exc=ValueError)
if p['boundary_lateral'] == 'constant':
A0[0,:] = 1.
Apx[0,:] = 0.
Ap1[0,:] = 0.
Amx[0,:] = 0.
Am1[0,:] = 0.
A0[-1,:] = 1.
Apx[-1,:] = 0.
Ap1[-1,:] = 0.
Amx[-1,:] = 0.
Am1[-1,:] = 0.
#logger.log_and_raise('Lateral constant boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'uniform':
logger.log_and_raise('Lateral uniform boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'gradient':
logger.log_and_raise('Lateral gradient boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'circular':
pass
else:
logger.log_and_raise('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'], exc=ValueError)
# construct sparse matrix
if p['ny'] > 0:
j = p['nx']+1
A = scipy.sparse.diags((Apx.flatten()[:j],
Amx.flatten()[j:],
Am2.flatten()[2:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Ap2.flatten()[:-2],
Apx.flatten()[j:],
Amx.flatten()[:j]),
(-j*p['ny'],-j,-2,-1,0,1,2,j,j*p['ny']), format='csr')
else:
A = scipy.sparse.diags((Am2.flatten()[2:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Ap2.flatten()[:-2]),
(-2,-1,0,1,2), format='csr')
# solve transport for each fraction separately using latest
# available weights
# renormalize weights for all fractions equal or larger
# than the current one such that the sum of all weights is
# unity
w = aeolis.transport.renormalize_weights(w, i)
# iteratively find a solution of the linear system that
# does not violate the availability of sediment in the bed
for n in range(p['max_iter']):
self._count('matrixsolve')
# compute saturation levels
ix = s['Cu'] > 0.
S_i = np.zeros(s['Cu'].shape)
S_i[ix] = s['Ct'][ix] / s['Cu'][ix]
s['S'] = S_i.sum(axis=-1)
# create the right hand side of the linear system
y_i = np.zeros(s['zb'].shape)
y_i[:,1:-1] = (
(w[:,1:-1,i] * s['Cuf'][:,1:-1,i] * Ti) * (1. - s['S'][:,1:-1]) +
(w[:,1:-1,i] * s['Cu'][:,1:-1,i] * Ti) * s['S'][:,1:-1]
)
# add boundaries
if p['boundary_offshore'] == 'flux':
y_i[:,0] = p['offshore_flux'] * s['Cu0'][:,0,i]
if p['boundary_onshore'] == 'flux':
y_i[:,-1] = p['onshore_flux'] * s['Cu0'][:,-1,i]
if p['boundary_offshore'] == 'constant':
y_i[:,0] = p['constant_offshore_flux'] / s['u'][:,0,i]
if p['boundary_onshore'] == 'constant':
y_i[:,-1] = p['constant_onshore_flux'] / s['u'][:,-1,i]
# solve system with current weights
Ct_i = scipy.sparse.linalg.spsolve(A, y_i.flatten())
Ct_i = prevent_tiny_negatives(Ct_i, p['max_error'])
# check for negative values
if Ct_i.min() < 0.:
ix = Ct_i < 0.
logger.warning(format_log('Removing negative concentrations',
nrcells=np.sum(ix),
fraction=i,
iteration=n,
minvalue=Ct_i.min(),
coords=np.argwhere(ix.reshape(y_i.shape)),
**logprops))
Ct_i[~ix] *= 1. + Ct_i[ix].sum() / Ct_i[~ix].sum()
Ct_i[ix] = 0.
# determine pickup and deficit for current fraction
Cu_i = s['Cu'][:,:,i].flatten()
mass_i = s['mass'][:,:,0,i].flatten()
w_i = w[:,:,i].flatten()
pickup_i = (w_i * Cu_i - Ct_i) / p['T'] * self.dt
deficit_i = pickup_i - mass_i
ix = (deficit_i > p['max_error']) \
& (w_i * Cu_i > 0.)
# quit the iteration if there is no deficit, otherwise
# back-compute the maximum weight allowed to get zero
# deficit for the current fraction and progress to
# the next iteration step
if not np.any(ix):
logger.debug(format_log('Iteration converged',
steps=n,
fraction=i,
**logprops))
pickup_i = np.minimum(pickup_i, mass_i)
break
else:
w_i[ix] = (mass_i[ix] * p['T'] / self.dt \
+ Ct_i[ix]) / Cu_i[ix]
w[:,:,i] = w_i.reshape(y_i.shape)
# throw warning if the maximum number of iterations was reached
if np.any(ix):
logger.warning(format_log('Iteration not converged',
nrcells=np.sum(ix),
fraction=i,
**logprops))
# check for unexpected negative values
if Ct_i.min() < 0:
logger.warning(format_log('Negative concentrations',
nrcells=np.sum(Ct_i<0.),
fraction=i,
minvalue=Ct_i.min(),
**logprops))
if w_i.min() < 0:
logger.warning(format_log('Negative weights',
nrcells=np.sum(w_i<0),
fraction=i,
minvalue=w_i.min(),
**logprops))
Ct[:,:,i] = Ct_i.reshape(y_i.shape)
pickup[:,:,i] = pickup_i.reshape(y_i.shape)
# check if there are any cells where the sum of all weights is
# smaller than unity. these cells are supply-limited for all
# fractions. Log these events.
ix = 1. - np.sum(w, axis=2) > p['max_error']
if np.any(ix):
self._count('supplylim')
logger.warning(format_log('Ran out of sediment',
nrcells=np.sum(ix),
minweight=np.sum(w, axis=-1).min(),
**logprops))
qs = Ct * s['us']
qn = Ct * s['un']
q = np.hypot(qs, qn)
return dict(Ct=Ct,
qs=qs,
qn=qn,
pickup=pickup,
w=w,
w_init=w_init,
w_air=w_air,
w_bed=w_bed,
q=q)
def solve(self, alpha=.5, beta=1.):
'''Implements the explicit Euler forward, implicit Euler backward and semi-implicit Crank-Nicolson numerical schemes
Determines weights of sediment fractions, sediment pickup and
instantaneous sediment concentration. Returns a partial
spatial grid dictionary that can be used to update the global
spatial grid dictionary.
Parameters
----------
alpha : float, optional
Implicitness coefficient (0.0 for Euler forward, 1.0 for Euler backward or 0.5 for Crank-Nicolson, default=0.5)
beta : float, optional
Centralization coefficient (1.0 for upwind or 0.5 for centralized, default=1.0)
Returns
-------
dict
Partial spatial grid dictionary
Examples
--------
>>> model.s.update(model.solve(alpha=1., beta=1.) # euler backward
>>> model.s.update(model.solve(alpha=.5, beta=1.) # crank-nicolson
See Also
--------
model.AeoLiS.euler_forward
model.AeoLiS.euler_backward
model.AeoLiS.crank_nicolson
transport.compute_weights
transport.renormalize_weights
'''
l = self.l
s = self.s
p = self.p
Ct = s['Ct'].copy()
pickup = s['pickup'].copy()
# compute transport weights for all sediment fractions
w_init, w_air, w_bed = aeolis.transport.compute_weights(s, p)
if self.t == 0.:
if type(p['bedcomp_file']) == np.ndarray:
w = w_init.copy()
else:
# use initial guess for first time step
w = p['grain_dist'].reshape((1,1,-1))
w = w.repeat(p['ny']+1, axis=0)
w = w.repeat(p['nx']+1, axis=1)
else:
w = w_init.copy()
# set model state properties that are added to warnings and errors
logprops = dict(minwind=s['uw'].min(),
maxdrop=(l['uw']-s['uw']).max(),
time=self.t,
dt=self.dt)
nf = p['nfractions']
us = np.zeros((p['ny']+1,p['nx']+1))
un = np.zeros((p['ny']+1,p['nx']+1))
us_plus = np.zeros((p['ny']+1,p['nx']+1))
un_plus = np.zeros((p['ny']+1,p['nx']+1))
us_min = np.zeros((p['ny']+1,p['nx']+1))
un_min = np.zeros((p['ny']+1,p['nx']+1))
Cs = np.zeros(us.shape)
Cn = np.zeros(un.shape)
Cs_plus = np.zeros(us.shape)
Cn_plus = np.zeros(un.shape)
Cs_min = np.zeros(us.shape)
Cn_min = np.zeros(un.shape)
for i in range(nf):
us[:,:] = s['us'][:,:,i]
un[:,:] = s['un'][:,:,i]
us_plus[:,1:] = s['us'][:,:-1,i]
un_plus[1:,:] = s['un'][:-1,:,i]
us_min[:,:-1] = s['us'][:,1:,i]
un_min[:-1,:] = s['un'][1:,:,i]
#boundary values
us_plus[:,0] = s['us'][:,0,i]
un_plus[0,:] = s['un'][0,:,i]
us_min[:,-1] = s['us'][:,-1,i]
un_min[-1,:] = s['un'][-1,:,i]
# define matrix coefficients to solve linear system of equations
Cs = self.dt * s['dn'] * s['dsdni'] * us[:,:]
Cn = self.dt * s['ds'] * s['dsdni'] * un[:,:]
Cs_plus = self.dt * s['dn'] * s['dsdni'] * us_plus[:,:]
Cn_plus = self.dt * s['ds'] * s['dsdni'] * un_plus[:,:]
Cs_min = self.dt * s['dn'] * s['dsdni'] * us_min[:,:]
Cn_min = self.dt * s['ds'] * s['dsdni'] * un_min[:,:]
Ti = self.dt / p['T']
beta = abs(beta)
if beta >= 1.:
# define upwind direction
ixs = np.asarray(s['us'][:,:,i] >= 0., dtype=float)
ixn = np.asarray(s['un'][:,:,i] >= 0., dtype=float)
sgs = 2. * ixs - 1.
sgn = 2. * ixn - 1.
else:
# or centralizing weights
ixs = beta + np.zeros(Cs.shape)
ixn = beta + np.zeros(Cn.shape)
sgs = np.zeros(Cs.shape)
sgn = np.zeros(Cn.shape)
# initialize matrix diagonals
A0 = np.zeros(s['zb'].shape)
Apx = np.zeros(s['zb'].shape)
Ap1 = np.zeros(s['zb'].shape)
Ap2 = np.zeros(s['zb'].shape)
Amx = np.zeros(s['zb'].shape)
Am1 = np.zeros(s['zb'].shape)
Am2 = np.zeros(s['zb'].shape)
# populate matrix diagonals
A0 = 1. + (sgs * Cs + sgn * Cn + Ti) * alpha
Apx = Cn_min * alpha * (1. - ixn)
Ap1 = Cs_min * alpha * (1. - ixs)
Amx = -Cn_plus * alpha * ixn
Am1 = -Cs_plus * alpha * ixs
# add boundaries
A0[:,0] = 1.
Apx[:,0] = 0.
Amx[:,0] = 0.
Am2[:,0] = 0.
Am1[:,0] = 0.
A0[:,-1] = 1.
Apx[:,-1] = 0.
Ap1[:,-1] = 0.
Ap2[:,-1] = 0.
Amx[:,-1] = 0.
if (p['boundary_offshore'] == 'flux') | (p['boundary_offshore'] == 'noflux'):
Ap2[:,0] = 0.
Ap1[:,0] = 0.
elif p['boundary_offshore'] == 'constant':
Ap2[:,0] = 0.
Ap1[:,0] = 0.
elif p['boundary_offshore'] == 'uniform':
Ap2[:,0] = 0.
Ap1[:,0] = -1.
elif p['boundary_offshore'] == 'gradient':
Ap2[:,0] = s['ds'][:,1] / s['ds'][:,2]
Ap1[:,0] = -1. - s['ds'][:,1] / s['ds'][:,2]
elif p['boundary_offshore'] == 'circular':
logger.log_and_raise('Cross-shore cricular boundary condition not yet implemented', exc=NotImplementedError)
else:
logger.log_and_raise('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'], exc=ValueError)
if (p['boundary_onshore'] == 'flux') | (p['boundary_offshore'] == 'noflux'):
Am2[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'constant':
Am2[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'uniform':
Am2[:,-1] = 0.
Am1[:,-1] = -1.
elif p['boundary_onshore'] == 'gradient':
Am2[:,-1] = s['ds'][:,-2] / s['ds'][:,-3]
Am1[:,-1] = -1. - s['ds'][:,-2] / s['ds'][:,-3]
elif p['boundary_offshore'] == 'circular':
logger.log_and_raise('Cross-shore cricular boundary condition not yet implemented', exc=NotImplementedError)
else:
logger.log_and_raise('Unknown onshore boundary condition [%s]' % self.p['boundary_onshore'], exc=ValueError)
if p['boundary_lateral'] == 'constant':
A0[0,:] = 1.
Apx[0,:] = 0.
Ap1[0,:] = 0.
Amx[0,:] = 0.
Am1[0,:] = 0.
A0[-1,:] = 1.
Apx[-1,:] = 0.
Ap1[-1,:] = 0.
Amx[-1,:] = 0.
Am1[-1,:] = 0.
#logger.log_and_raise('Lateral constant boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'uniform':
logger.log_and_raise('Lateral uniform boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'gradient':
logger.log_and_raise('Lateral gradient boundary condition not yet implemented', exc=NotImplementedError)
elif p['boundary_lateral'] == 'circular':
pass
else:
logger.log_and_raise('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'], exc=ValueError)
# construct sparse matrix
if p['ny'] > 0:
j = p['nx']+1
A = scipy.sparse.diags((Apx.flatten()[:j],
Amx.flatten()[j:],
Am2.flatten()[2:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Ap2.flatten()[:-2],
Apx.flatten()[j:],
Amx.flatten()[:j]),
(-j*p['ny'],-j,-2,-1,0,1,2,j,j*p['ny']), format='csr')
else:
A = scipy.sparse.diags((Am2.flatten()[2:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Ap2.flatten()[:-2]),
(-2,-1,0,1,2), format='csr')
# solve transport for each fraction separately using latest
# available weights
# renormalize weights for all fractions equal or larger
# than the current one such that the sum of all weights is
# unity
# Christa: seems to have no significant effect on weights,
# numerical check to prevent any deviation from unity
w = aeolis.transport.renormalize_weights(w, i)
# iteratively find a solution of the linear system that
# does not violate the availability of sediment in the bed
for n in range(p['max_iter']):
self._count('matrixsolve')
# compute saturation levels
ix = s['Cu'] > 0.
S_i = np.zeros(s['Cu'].shape)
S_i[ix] = s['Ct'][ix] / s['Cu'][ix]
s['S'] = S_i.sum(axis=-1)
# create the right hand side of the linear system
y_i = np.zeros(s['zb'].shape)
y_im = np.zeros(s['zb'].shape) # implicit terms
y_ex = np.zeros(s['zb'].shape) # explicit terms
y_im[:,1:-1] = (
(w[:,1:-1,i] * s['Cuf'][:,1:-1,i] * Ti) * (1. - s['S'][:,1:-1]) +
(w[:,1:-1,i] * s['Cu'][:,1:-1,i] * Ti) * s['S'][:,1:-1]
)
y_ex[:,1:-1] = (
(l['w'][:,1:-1,i] * l['Cuf'][:,1:-1,i] * Ti) * (1. - s['S'][:,1:-1]) \
+ (l['w'][:,1:-1,i] * l['Cu'][:,1:-1,i] * Ti) * s['S'][:,1:-1] \
- (
sgs[:,1:-1] * Cs[:,1:-1] +\
sgn[:,1:-1] * Cn[:,1:-1] + Ti
) * l['Ct'][:,1:-1,i] \
+ ixs[:,1:-1] * Cs_plus[:,1:-1] * l['Ct'][:,:-2,i] \
- (1. - ixs[:,1:-1]) * Cs_min[:,1:-1] * l['Ct'][:,2:,i] \
+ ixn[:,1:-1] * Cn_plus[:,1:-1] * np.roll(l['Ct'][:,1:-1,i], 1, axis=0) \
- (1. - ixn[:,1:-1]) * Cn_min[:,1:-1] * np.roll(l['Ct'][:,1:-1,i], -1, axis=0) \
)
y_i[:,1:-1] = l['Ct'][:,1:-1,i] + alpha * y_im[:,1:-1] + (1. - alpha) * y_ex[:,1:-1]
# add boundaries
if p['boundary_offshore'] == 'flux':
y_i[:,0] = p['offshore_flux'] * s['Cu0'][:,0,i]
if p['boundary_onshore'] == 'flux':
y_i[:,-1] = p['onshore_flux'] * s['Cu0'][:,-1,i]
if p['boundary_offshore'] == 'constant':
y_i[:,0] = p['constant_offshore_flux'] / s['u'][:,0,i]
if p['boundary_onshore'] == 'constant':
y_i[:,-1] = p['constant_onshore_flux'] / s['u'][:,-1,i]
# solve system with current weights
Ct_i = scipy.sparse.linalg.spsolve(A, y_i.flatten())
Ct_i = prevent_tiny_negatives(Ct_i, p['max_error'])
# check for negative values
if Ct_i.min() < 0.:
ix = Ct_i < 0.
logger.warning(format_log('Removing negative concentrations',
nrcells=np.sum(ix),
fraction=i,
iteration=n,
minvalue=Ct_i.min(),
coords=np.argwhere(ix.reshape(y_i.shape)),
**logprops))
if Ct_i[~ix].sum() != 0:
Ct_i[~ix] *= 1. + Ct_i[ix].sum() / Ct_i[~ix].sum()
else:
Ct_i[~ix] = 0
#Ct_i[~ix] *= 1. + Ct_i[ix].sum() / Ct_i[~ix].sum()
Ct_i[ix] = 0.
# determine pickup and deficit for current fraction
Cu_i = s['Cu'][:,:,i].flatten()
mass_i = s['mass'][:,:,0,i].flatten()
w_i = w[:,:,i].flatten()
pickup_i = (w_i * Cu_i - Ct_i) / p['T'] * self.dt
deficit_i = pickup_i - mass_i
ix = (deficit_i > p['max_error']) \
& (w_i * Cu_i > 0.)
# quit the iteration if there is no deficit, otherwise
# back-compute the maximum weight allowed to get zero
# deficit for the current fraction and progress to
# the next iteration step
if not np.any(ix):
logger.debug(format_log('Iteration converged',
steps=n,
fraction=i,
**logprops))
pickup_i = np.minimum(pickup_i, mass_i)
break
else:
w_i[ix] = (mass_i[ix] * p['T'] / self.dt \
+ Ct_i[ix]) / Cu_i[ix]
w[:,:,i] = w_i.reshape(y_i.shape)
# throw warning if the maximum number of iterations was reached
if np.any(ix):
logger.warning(format_log('Iteration not converged',
nrcells=np.sum(ix),
fraction=i,
**logprops))
# check for unexpected negative values
if Ct_i.min() < 0:
logger.warning(format_log('Negative concentrations',
nrcells=np.sum(Ct_i<0.),
fraction=i,
minvalue=Ct_i.min(),
**logprops))
if w_i.min() < 0:
logger.warning(format_log('Negative weights',
nrcells=np.sum(w_i<0),
fraction=i,
minvalue=w_i.min(),
**logprops))
Ct[:,:,i] = Ct_i.reshape(y_i.shape)
pickup[:,:,i] = pickup_i.reshape(y_i.shape)
# check if there are any cells where the sum of all weights is
# smaller than unity. these cells are supply-limited for all
# fractions. Log these events.
ix = 1. - np.sum(w, axis=2) > p['max_error']
if np.any(ix):
self._count('supplylim')
logger.warning(format_log('Ran out of sediment',
nrcells=np.sum(ix),
minweight=np.sum(w, axis=-1).min(),
**logprops))
qs = Ct * s['us']
qn = Ct * s['un']
return dict(Ct=Ct,
qs=qs,
qn=qn,
pickup=pickup,
w=w,
w_init=w_init,
w_air=w_air,
w_bed=w_bed)
def solve_steadystatepieter(self):
beta = 1.
l = self.l
s = self.s
p = self.p
Ct = s['Ct'].copy()
qs = s['qs'].copy()
qn = s['qn'].copy()
pickup = s['pickup'].copy()
Ts = p['T']
# compute transport weights for all sediment fractions
w_init, w_air, w_bed = aeolis.transport.compute_weights(s, p)
if self.t == 0.:
# use initial guess for first time step
w = p['grain_dist'].reshape((1,1,-1))
w = w.repeat(p['ny']+1, axis=0)
w = w.repeat(p['nx']+1, axis=1)
return dict(w=w)
else:
w = w_init.copy()
# set model state properties that are added to warnings and errors
logprops = dict(minwind=s['uw'].min(),
maxdrop=(l['uw']-s['uw']).max(),
time=self.t,
dt=self.dt)
nf = p['nfractions']
ufs = np.zeros((p['ny']+1,p['nx']+2))
ufn = np.zeros((p['ny']+2,p['nx']+1))
for i in range(nf): #loop over fractions
#define velocity fluxes
ufs[:,1:-1] = 0.5*s['us'][:,:-1,i] + 0.5*s['us'][:,1:,i]
ufn[1:-1,:] = 0.5*s['un'][:-1,:,i] + 0.5*s['un'][1:,:,i]
#boundary values
ufs[:,0] = s['us'][:,0,i]
ufs[:,-1] = s['us'][:,-1,i]
if p['boundary_lateral'] == 'circular':
ufn[0,:] = 0.5*s['un'][0,:,i] + 0.5*s['un'][-1,:,i]
ufn[-1,:] = ufn[0,:]
else:
ufn[0,:] = s['un'][0,:,i]
ufn[-1,:] = s['un'][-1,:,i]
beta = abs(beta)
if beta >= 1.:
# define upwind direction
ixfs = np.asarray(ufs >= 0., dtype=float)
ixfn = np.asarray(ufn >= 0., dtype=float)
else:
# or centralizing weights
ixfs = beta + np.zeros(ufs)
ixfn = beta + np.zeros(ufn)
# initialize matrix diagonals
A0 = np.zeros(s['zb'].shape)
Apx = np.zeros(s['zb'].shape)
Ap1 = np.zeros(s['zb'].shape)
Amx = np.zeros(s['zb'].shape)
Am1 = np.zeros(s['zb'].shape)
# populate matrix diagonals
#A0 += s['dsdn'] / self.dt #time derivative
A0 += s['dsdn'] / Ts #source term
A0[:,1:] -= s['dn'][:,1:] * ufs[:,1:-1] * (1. - ixfs[:,1:-1]) #lower x-face
Am1[:,1:] -= s['dn'][:,1:] * ufs[:,1:-1] * ixfs[:,1:-1] #lower x-face
A0[:,:-1] += s['dn'][:,:-1] * ufs[:,1:-1] * ixfs[:,1:-1] #upper x-face
Ap1[:,:-1] += s['dn'][:,:-1] * ufs[:,1:-1] * (1. - ixfs[:,1:-1]) #upper x-face
A0[1:,:] -= s['ds'][1:,:] * ufn[1:-1,:] * (1. - ixfn[1:-1,:]) #lower y-face
Amx[1:,:] -= s['ds'][1:,:] * ufn[1:-1,:] * ixfn[1:-1,:] #lower y-face
A0[:-1,:] += s['ds'][:-1,:] * ufn[1:-1,:] * ixfn[1:-1,:] #upper y-face
Apx[:-1,:] += s['ds'][:-1,:] * ufn[1:-1,:] * (1. - ixfn[1:-1,:]) #upper y-face
# add boundaries
# offshore boundary (i=0)
if p['boundary_offshore'] == 'flux':
#nothing to be done
pass
elif p['boundary_offshore'] == 'constant':
#constant sediment concentration (Ct) in the air
A0[:,0] = 1.
Apx[:,0] = 0.
Amx[:,0] = 0.
Ap1[:,0] = 0.
Am1[:,0] = 0.
elif p['boundary_offshore'] == 'gradient':
#remove the flux at the inner face of the cell
A0[:,0] -= s['dn'][:,0] * ufs[:,1] * ixfs[:,1] #upper x-face
Ap1[:,0] -= s['dn'][:,0] * ufs[:,1] * (1. - ixfs[:,1]) #upper x-face
elif p['boundary_offshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'])
#onshore boundary (i=nx)
if p['boundary_onshore'] == 'flux':
#nothing to be done
pass
elif p['boundary_onshore'] == 'constant':
#constant sediment concentration (hC) in the air
A0[:,-1] = 1.
Apx[:,-1] = 0.
Amx[:,-1] = 0.
Ap1[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'gradient':
#remove the flux at the inner face of the cell
A0[:,-1] += s['dn'][:,-1] * ufs[:,-2] * (1. - ixfs[:,-2]) #lower x-face
Am1[:,-1] += s['dn'][:,-1] * ufs[:,-2] * ixfs[:,-2] #lower x-face
elif p['boundary_onshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_onshore'])
#lateral boundaries (j=0; j=ny)
if p['boundary_lateral'] == 'flux':
#nothing to be done
pass
elif p['boundary_lateral'] == 'constant':
#constant sediment concentration (hC) in the air
A0[0,:] = 1.
Apx[0,:] = 0.
Amx[0,:] = 0.
Ap1[0,:] = 0.
Am1[0,:] = 0.
A0[-1,:] = 1.
Apx[-1,:] = 0.
Amx[-1,:] = 0.
Ap1[-1,:] = 0.
Am1[-1,:] = 0.
elif p['boundary_lateral'] == 'gradient':
#remove the flux at the inner face of the cell
A0[0,:] -= s['ds'][0,:] * ufn[1,:] * ixfn[1,:] #upper y-face
Apx[0,:] -= s['ds'][0,:] * ufn[1,:] * (1. - ixfn[1,:]) #upper y-face
A0[-1,:] += s['ds'][-1,:] * ufn[-2,:] * (1. - ixfn[-2,:]) #lower y-face
Amx[-1,:] += s['ds'][-1,:] * ufn[-2,:] * ixfn[-2,:] #lower y-face
elif p['boundary_lateral'] == 'circular':
A0[0,:] -= s['ds'][0,:] * ufn[0,:] * (1. - ixfn[0,:]) #lower y-face
Amx[0,:] -= s['ds'][0,:] * ufn[0,:] * ixfn[0,:] #lower y-face
A0[-1,:] += s['ds'][-1,:] * ufn[-1,:] * ixfn[-1,:] #upper y-face
Apx[-1,:] += s['ds'][-1,:] * ufn[-1,:] * (1. - ixfn[-1,:]) #upper y-face
else:
raise ValueError('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'])
# construct sparse matrix
if p['ny'] > 0:
j = p['nx']+1
A = scipy.sparse.diags((Apx.flatten()[:j],
Amx.flatten()[j:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Apx.flatten()[j:],
Amx.flatten()[:j]),
(-j*p['ny'],-j,-1,0,1,j,j*p['ny']), format='csr')
else:
j = p['nx']+1
ny = 0
A = scipy.sparse.diags((Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1]),
(-1, 0, 1), format='csr')
# solve transport for each fraction separately using latest
# available weights
# renormalize weights for all fractions equal or larger
# than the current one such that the sum of all weights is
# unity
w = aeolis.transport.renormalize_weights(w, i)
# iteratively find a solution of the linear system that
# does not violate the availability of sediment in the bed
for n in range(p['max_iter']):
self._count('matrixsolve')
# define upwind face value
# sediment concentration
Ctxfs_i = np.zeros(ufs.shape)
Ctxfn_i = np.zeros(ufn.shape)
Ctxfs_i[:,1:-1] = ixfs[:,1:-1] * Ct[:,:-1,i] \
+ (1. - ixfs[:,1:-1]) * Ct[:,1:,i]
Ctxfn_i[1:-1,:] = ixfn[1:-1,:] * Ct[:-1,:,i] \
+ (1. - ixfn[1:-1,:]) * Ct[1:,:,i]
if p['boundary_lateral'] == 'circular':
Ctxfn_i[0,:] = ixfn[0,:] * Ct[-1,:,i] \
+ (1. - ixfn[0,:]) * Ct[0,:,i]
# calculate pickup
D_i = s['dsdn'] / Ts * Ct[:,:,i]
A_i = s['dsdn'] / Ts * s['mass'][:,:,0,i] + D_i # Availability
U_i = s['dsdn'] / Ts * w[:,:,i] * s['Cu'][:,:,i]
#deficit_i = E_i - A_i
E_i= np.minimum(U_i, A_i)
#pickup_i = E_i - D_i
# create the right hand side of the linear system
# sediment concentration
yCt_i = np.zeros(s['zb'].shape)
yCt_i += E_i - D_i #source term
yCt_i[:,1:] += s['dn'][:,1:] * ufs[:,1:-1] * Ctxfs_i[:,1:-1] #lower x-face
yCt_i[:,:-1] -= s['dn'][:,:-1] * ufs[:,1:-1] * Ctxfs_i[:,1:-1] #upper x-face
yCt_i[1:,:] += s['ds'][1:,:] * ufn[1:-1,:] * Ctxfn_i[1:-1,:] #lower y-face
yCt_i[:-1,:] -= s['ds'][:-1,:] * ufn[1:-1,:] * Ctxfn_i[1:-1,:] #upper y-face
# boundary conditions
# offshore boundary (i=0)
if p['boundary_offshore'] == 'flux':
yCt_i[:,0] += s['dn'][:,0] * ufs[:,0] * s['Cu0'][:,0,i] * p['offshore_flux']
elif p['boundary_offshore'] == 'constant':
#constant sediment concentration (Ct) in the air
yCt_i[:,0] = p['constant_offshore_flux']
elif p['boundary_offshore'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[:,0] += s['dn'][:,1] * ufs[:,1] * Ctxfs_i[:,1]
elif p['boundary_offshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'])
# onshore boundary (i=nx)
if p['boundary_onshore'] == 'flux':
yCt_i[:,-1] += s['dn'][:,-1] * ufs[:,-1] * s['Cu0'][:,-1,i] * p['onshore_flux']
elif p['boundary_onshore'] == 'constant':
#constant sediment concentration (Ct) in the air
yCt_i[:,-1] = p['constant_onshore_flux']
elif p['boundary_onshore'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[:,-1] -= s['dn'][:,-2] * ufs[:,-2] * Ctxfs_i[:,-2]
elif p['boundary_onshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown onshore boundary condition [%s]' % self.p['boundary_onshore'])
#lateral boundaries (j=0; j=ny)
if p['boundary_lateral'] == 'flux':
yCt_i[0,:] += s['ds'][0,:] * ufn[0,:] * s['Cu0'][0,:,i] * p['lateral_flux'] #lower y-face
yCt_i[-1,:] -= s['ds'][-1,:] * ufn[-1,:] * s['Cu0'][-1,:,i] * p['lateral_flux'] #upper y-face
elif p['boundary_lateral'] == 'constant':
#constant sediment concentration (hC) in the air
yCt_i[0,:] = 0.
yCt_i[-1,:] = 0.
elif p['boundary_lateral'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[-1,:] -= s['ds'][-2,:] * ufn[-2,:] * Ctxfn_i[-2,:] #lower y-face
yCt_i[0,:] += s['ds'][1,:] * ufn[1,:] * Ctxfn_i[1,:] #upper y-face
elif p['boundary_lateral'] == 'circular':
yCt_i[0,:] += s['ds'][0,:] * ufn[0,:] * Ctxfn_i[0,:] #lower y-face
yCt_i[-1,:] -= s['ds'][-1,:] * ufn[-1,:] * Ctxfn_i[-1,:] #upper y-face
else:
raise ValueError('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'])
# print("ugs = %.*g" % (3,s['ugs'][10,10]))
# print("ugn = %.*g" % (3,s['ugn'][10,10]))
# print("%.*g" % (3,np.amax(np.absolute(y_i))))
# solve system with current weights
Ct_i = Ct[:,:,i].flatten()
Ct_i += scipy.sparse.linalg.spsolve(A, yCt_i.flatten())
Ct_i = prevent_tiny_negatives(Ct_i, p['max_error'])
# check for negative values
if Ct_i.min() < 0.:
ix = Ct_i < 0.
# logger.warn(format_log('Removing negative concentrations',
# nrcells=np.sum(ix),
# fraction=i,
# iteration=n,
# minvalue=Ct_i.min(),
# **logprops))
Ct_i[~ix] *= 1. + Ct_i[ix].sum() / Ct_i[~ix].sum()
Ct_i[ix] = 0.
# determine pickup and deficit for current fraction
Cu_i = s['Cu'][:,:,i].flatten()
mass_i = s['mass'][:,:,0,i].flatten()
w_i = w[:,:,i].flatten()
Ts_i = Ts
pickup_i = (w_i * Cu_i - Ct_i) / Ts_i * self.dt # Dit klopt niet! enkel geldig bij backward euler
deficit_i = pickup_i - mass_i
ix = (deficit_i > p['max_error']) \
& (w_i * Cu_i > 0.)
pickup[:,:,i] = pickup_i.reshape(yCt_i.shape)
Ct[:,:,i] = Ct_i.reshape(yCt_i.shape)
# quit the iteration if there is no deficit, otherwise
# back-compute the maximum weight allowed to get zero
# deficit for the current fraction and progress to
# the next iteration step
if not np.any(ix):
logger.debug(format_log('Iteration converged',
steps=n,
fraction=i,
**logprops))
pickup_i = np.minimum(pickup_i, mass_i)
break
else:
w_i[ix] = (mass_i[ix] * Ts_i / self.dt \
+ Ct_i[ix]) / Cu_i[ix]
w[:,:,i] = w_i.reshape(yCt_i.shape)
# throw warning if the maximum number of iterations was
# reached
if np.any(ix):
logger.warn(format_log('Iteration not converged',
nrcells=np.sum(ix),
fraction=i,
**logprops))
# check for unexpected negative values
if Ct_i.min() < 0:
logger.warn(format_log('Negative concentrations',
nrcells=np.sum(Ct_i<0.),
fraction=i,
minvalue=Ct_i.min(),
**logprops))
if w_i.min() < 0:
logger.warn(format_log('Negative weights',
nrcells=np.sum(w_i<0),
fraction=i,
minvalue=w_i.min(),
**logprops))
# end loop over frations
# check if there are any cells where the sum of all weights is
# smaller than unity. these cells are supply-limited for all
# fractions. Log these events.
ix = 1. - np.sum(w, axis=2) > p['max_error']
if np.any(ix):
self._count('supplylim')
# logger.warn(format_log('Ran out of sediment',
# nrcells=np.sum(ix),
# minweight=np.sum(w, axis=-1).min(),
# **logprops))
qs = Ct * s['us']
qn = Ct * s['un']
return dict(Ct=Ct,
qs=qs,
qn=qn,
pickup=pickup,
w=w,
w_init=w_init,
w_air=w_air,
w_bed=w_bed)
def solve_pieter(self, alpha=.5, beta=1.):
'''Implements the explicit Euler forward, implicit Euler backward and semi-implicit Crank-Nicolson numerical schemes
Determines weights of sediment fractions, sediment pickup and
instantaneous sediment concentration. Returns a partial
spatial grid dictionary that can be used to update the global
spatial grid dictionary.
Parameters
----------
alpha : float, optional
Implicitness coefficient (0.0 for Euler forward, 1.0 for Euler backward or 0.5 for Crank-Nicolson, default=0.5)
beta : float, optional
Centralization coefficient (1.0 for upwind or 0.5 for centralized, default=1.0)
Returns
-------
dict
Partial spatial grid dictionary
Examples
--------
>>> model.s.update(model.solve(alpha=1., beta=1.) # euler backward
>>> model.s.update(model.solve(alpha=.5, beta=1.) # crank-nicolson
See Also
--------
model.AeoLiS.euler_forward
model.AeoLiS.euler_backward
model.AeoLiS.crank_nicolson
transport.compute_weights
transport.renormalize_weights
'''
l = self.l
s = self.s
p = self.p
Ct = s['Ct'].copy()
qs = s['qs'].copy()
qn = s['qn'].copy()
pickup = s['pickup'].copy()
Ts = p['T']
# compute transport weights for all sediment fractions
w_init, w_air, w_bed = aeolis.transport.compute_weights(s, p)
if self.t == 0.:
# use initial guess for first time step
w = p['grain_dist'].reshape((1,1,-1))
w = w.repeat(p['ny']+1, axis=0)
w = w.repeat(p['nx']+1, axis=1)
return dict(w=w)
else:
w = w_init.copy()
# set model state properties that are added to warnings and errors
logprops = dict(minwind=s['uw'].min(),
maxdrop=(l['uw']-s['uw']).max(),
time=self.t,
dt=self.dt)
nf = p['nfractions']
ufs = np.zeros((p['ny']+1,p['nx']+2))
ufn = np.zeros((p['ny']+2,p['nx']+1))
for i in range(nf): #loop over fractions
#define velocity fluxes
ufs[:,1:-1] = 0.5*s['us'][:,:-1,i] + 0.5*s['us'][:,1:,i]
ufn[1:-1,:] = 0.5*s['un'][:-1,:,i] + 0.5*s['un'][1:,:,i]
#boundary values
ufs[:,0] = s['us'][:,0,i]
ufs[:,-1] = s['us'][:,-1,i]
if p['boundary_lateral'] == 'circular':
ufn[0,:] = 0.5*s['un'][0,:,i] + 0.5*s['un'][-1,:,i]
ufn[-1,:] = ufn[0,:]
else:
ufn[0,:] = s['un'][0,:,i]
ufn[-1,:] = s['un'][-1,:,i]
beta = abs(beta)
if beta >= 1.:
# define upwind direction
ixfs = np.asarray(ufs >= 0., dtype=float)
ixfn = np.asarray(ufn >= 0., dtype=float)
else:
# or centralizing weights
ixfs = beta + np.zeros(ufs)
ixfn = beta + np.zeros(ufn)
# initialize matrix diagonals
A0 = np.zeros(s['zb'].shape)
Apx = np.zeros(s['zb'].shape)
Ap1 = np.zeros(s['zb'].shape)
Amx = np.zeros(s['zb'].shape)
Am1 = np.zeros(s['zb'].shape)
# populate matrix diagonals
A0 += s['dsdn'] / self.dt #time derivative
A0 += s['dsdn'] / Ts * alpha #source term
A0[:,1:] -= s['dn'][:,1:] * ufs[:,1:-1] * (1. - ixfs[:,1:-1]) * alpha #lower x-face
Am1[:,1:] -= s['dn'][:,1:] * ufs[:,1:-1] * ixfs[:,1:-1] * alpha #lower x-face
A0[:,:-1] += s['dn'][:,:-1] * ufs[:,1:-1] * ixfs[:,1:-1] * alpha #upper x-face
Ap1[:,:-1] += s['dn'][:,:-1] * ufs[:,1:-1] * (1. - ixfs[:,1:-1]) * alpha #upper x-face
A0[1:,:] -= s['ds'][1:,:] * ufn[1:-1,:] * (1. - ixfn[1:-1,:]) * alpha #lower y-face
Amx[1:,:] -= s['ds'][1:,:] * ufn[1:-1,:] * ixfn[1:-1,:] * alpha #lower y-face
A0[:-1,:] += s['ds'][:-1,:] * ufn[1:-1,:] * ixfn[1:-1,:] * alpha #upper y-face
Apx[:-1,:] += s['ds'][:-1,:] * ufn[1:-1,:] * (1. - ixfn[1:-1,:]) * alpha #upper y-face
# add boundaries
# offshore boundary (i=0)
if p['boundary_offshore'] == 'flux':
#nothing to be done
pass
elif p['boundary_offshore'] == 'constant':
#constant sediment concentration (Ct) in the air
A0[:,0] = 1.
Apx[:,0] = 0.
Amx[:,0] = 0.
Ap1[:,0] = 0.
Am1[:,0] = 0.
elif p['boundary_offshore'] == 'gradient':
#remove the flux at the inner face of the cell
A0[:,0] -= s['dn'][:,0] * ufs[:,1] * ixfs[:,1] * alpha #upper x-face
Ap1[:,0] -= s['dn'][:,0] * ufs[:,1] * (1. - ixfs[:,1]) * alpha #upper x-face
elif p['boundary_offshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'])
#onshore boundary (i=nx)
if p['boundary_onshore'] == 'flux':
#nothing to be done
pass
elif p['boundary_onshore'] == 'constant':
#constant sediment concentration (hC) in the air
A0[:,-1] = 1.
Apx[:,-1] = 0.
Amx[:,-1] = 0.
Ap1[:,-1] = 0.
Am1[:,-1] = 0.
elif p['boundary_onshore'] == 'gradient':
#remove the flux at the inner face of the cell
A0[:,-1] += s['dn'][:,-1] * ufs[:,-2] * (1. - ixfs[:,-2]) * alpha #lower x-face
Am1[:,-1] += s['dn'][:,-1] * ufs[:,-2] * ixfs[:,-2] * alpha #lower x-face
elif p['boundary_onshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_onshore'])
#lateral boundaries (j=0; j=ny)
if p['boundary_lateral'] == 'flux':
#nothing to be done
pass
elif p['boundary_lateral'] == 'constant':
#constant sediment concentration (hC) in the air
A0[0,:] = 1.
Apx[0,:] = 0.
Amx[0,:] = 0.
Ap1[0,:] = 0.
Am1[0,:] = 0.
A0[-1,:] = 1.
Apx[-1,:] = 0.
Amx[-1,:] = 0.
Ap1[-1,:] = 0.
Am1[-1,:] = 0.
elif p['boundary_lateral'] == 'gradient':
#remove the flux at the inner face of the cell
A0[0,:] -= s['ds'][0,:] * ufn[1,:] * ixfn[1,:] * alpha #upper y-face
Apx[0,:] -= s['ds'][0,:] * ufn[1,:] * (1. - ixfn[1,:]) * alpha #upper y-face
A0[-1,:] += s['ds'][-1,:] * ufn[-2,:] * (1. - ixfn[-2,:]) * alpha #lower y-face
Amx[-1,:] += s['ds'][-1,:] * ufn[-2,:] * ixfn[-2,:] * alpha #lower y-face
elif p['boundary_lateral'] == 'circular':
A0[0,:] -= s['ds'][0,:] * ufn[0,:] * (1. - ixfn[0,:]) * alpha #lower y-face
Amx[0,:] -= s['ds'][0,:] * ufn[0,:] * ixfn[0,:] * alpha #lower y-face
A0[-1,:] += s['ds'][-1,:] * ufn[-1,:] * ixfn[-1,:] * alpha #upper y-face
Apx[-1,:] += s['ds'][-1,:] * ufn[-1,:] * (1. - ixfn[-1,:]) * alpha #upper y-face
else:
raise ValueError('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'])
# construct sparse matrix
if p['ny'] > 0:
j = p['nx']+1
A = scipy.sparse.diags((Apx.flatten()[:j],
Amx.flatten()[j:],
Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1],
Apx.flatten()[j:],
Amx.flatten()[:j]),
(-j*p['ny'],-j,-1,0,1,j,j*p['ny']), format='csr')
else:
A = scipy.sparse.diags((Am1.flatten()[1:],
A0.flatten(),
Ap1.flatten()[:-1]),
(-1,0,1), format='csr')
# solve transport for each fraction separately using latest
# available weights
# renormalize weights for all fractions equal or larger
# than the current one such that the sum of all weights is
# unity
w = aeolis.transport.renormalize_weights(w, i)
# iteratively find a solution of the linear system that
# does not violate the availability of sediment in the bed
for n in range(p['max_iter']):
self._count('matrixsolve')
# print("iteration nr = %d" % n)
# define upwind face value
# sediment concentration
Ctxfs_i = np.zeros(ufs.shape)
Ctxfn_i = np.zeros(ufn.shape)
Ctxfs_i[:,1:-1] = ixfs[:,1:-1] * ( alpha * Ct[:,:-1,i] \
+ (1. - alpha ) * l['Ct'][:,:-1,i] ) \
+ (1. - ixfs[:,1:-1]) * ( alpha * Ct[:,1:,i] \
+ (1. - alpha ) * l['Ct'][:,1:,i] )
Ctxfn_i[1:-1,:] = ixfn[1:-1,:] * (alpha * Ct[:-1,:,i] \
+ (1. - alpha ) * l['Ct'][:-1,:,i] ) \
+ (1. - ixfn[1:-1,:]) * ( alpha * Ct[1:,:,i] \
+ (1. - alpha ) * l['Ct'][1:,:,i] )
if p['boundary_lateral'] == 'circular':
Ctxfn_i[0,:] = ixfn[0,:] * (alpha * Ct[-1,:,i] \
+ (1. - alpha ) * l['Ct'][-1,:,i] ) \
+ (1. - ixfn[0,:]) * ( alpha * Ct[0,:,i] \
+ (1. - alpha ) * l['Ct'][0,:,i] )
Ctxfn_i[-1,:] = Ctxfn_i[0,:]
# calculate pickup
D_i = s['dsdn'] / Ts * ( alpha * Ct[:,:,i] \
+ (1. - alpha ) * l['Ct'][:,:,i] )
A_i = s['dsdn'] / Ts * s['mass'][:,:,0,i] + D_i # Availability
U_i = s['dsdn'] / Ts * ( w[:,:,i] * alpha * s['Cu'][:,:,i] \
+ (1. - alpha ) * l['w'][:,:,i] * l['Cu'][:,:,i] )
#deficit_i = E_i - A_i
E_i= np.minimum(U_i, A_i)
#pickup_i = E_i - D_i
# create the right hand side of the linear system
# sediment concentration
yCt_i = np.zeros(s['zb'].shape)
yCt_i -= s['dsdn'] / self.dt * ( Ct[:,:,i] \
- l['Ct'][:,:,i] ) #time derivative
yCt_i += E_i - D_i #source term
yCt_i[:,1:] += s['dn'][:,1:] * ufs[:,1:-1] * Ctxfs_i[:,1:-1] #lower x-face
yCt_i[:,:-1] -= s['dn'][:,:-1] * ufs[:,1:-1] * Ctxfs_i[:,1:-1] #upper x-face
yCt_i[1:,:] += s['ds'][1:,:] * ufn[1:-1,:] * Ctxfn_i[1:-1,:] #lower y-face
yCt_i[:-1,:] -= s['ds'][:-1,:] * ufn[1:-1,:] * Ctxfn_i[1:-1,:] #upper y-face
# boundary conditions
# offshore boundary (i=0)
if p['boundary_offshore'] == 'flux':
yCt_i[:,0] += s['dn'][:,0] * ufs[:,0] * s['Cu0'][:,0,i] * p['offshore_flux']
elif p['boundary_offshore'] == 'constant':
#constant sediment concentration (Ct) in the air (for now = 0)
yCt_i[:,0] = 0.
elif p['boundary_offshore'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[:,0] += s['dn'][:,1] * ufs[:,1] * Ctxfs_i[:,1] #upper x-face
elif p['boundary_offshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown offshore boundary condition [%s]' % self.p['boundary_offshore'])
# onshore boundary (i=nx)
if p['boundary_onshore'] == 'flux':
yCt_i[:,-1] += s['dn'][:,-1] * ufs[:,-1] * s['Cu0'][:,-1,i] * p['onshore_flux']
elif p['boundary_onshore'] == 'constant':
#constant sediment concentration (Ct) in the air (for now = 0)
yCt_i[:,-1] = 0.
elif p['boundary_onshore'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[:,-1] -= s['dn'][:,-2] * ufs[:,-2] * Ctxfs_i[:,-2] #lower x-face
elif p['boundary_onshore'] == 'circular':
raise NotImplementedError('Cross-shore cricular boundary condition not yet implemented')
else:
raise ValueError('Unknown onshore boundary condition [%s]' % self.p['boundary_onshore'])
#lateral boundaries (j=0; j=ny)
if p['boundary_lateral'] == 'flux':
yCt_i[0,:] += s['ds'][0,:] * ufn[0,:] * s['Cu0'][0,:,i] * p['lateral_flux'] #lower y-face
yCt_i[-1,:] -= s['ds'][-1,:] * ufn[-1,:] * s['Cu0'][-1,:,i] * p['lateral_flux'] #upper y-face
elif p['boundary_lateral'] == 'constant':
#constant sediment concentration (hC) in the air
yCt_i[0,:] = 0.
yCt_i[-1,:] = 0.
elif p['boundary_lateral'] == 'gradient':
#remove the flux at the inner face of the cell
yCt_i[-1,:] -= s['ds'][-2,:] * ufn[-2,:] * Ctxfn_i[-2,:] #lower y-face
yCt_i[0,:] += s['ds'][1,:] * ufn[1,:] * Ctxfn_i[1,:] #upper y-face
elif p['boundary_lateral'] == 'circular':
yCt_i[0,:] += s['ds'][0,:] * ufn[0,:] * Ctxfn_i[0,:] #lower y-face
yCt_i[-1,:] -= s['ds'][-1,:] * ufn[-1,:] * Ctxfn_i[-1,:] #upper y-face
else:
raise ValueError('Unknown lateral boundary condition [%s]' % self.p['boundary_lateral'])
# print("ugs = %.*g" % (3,s['ugs'][10,10]))
# print("ugn = %.*g" % (3,s['ugn'][10,10]))
# print("%.*g" % (3,np.amax(np.absolute(y_i))))
# solve system with current weights
Ct_i = Ct[:,:,i].flatten()
Ct_i += scipy.sparse.linalg.spsolve(A, yCt_i.flatten())
Ct_i = prevent_tiny_negatives(Ct_i, p['max_error'])
# check for negative values
if Ct_i.min() < 0.:
ix = Ct_i < 0.
# logger.warn(format_log('Removing negative concentrations',
# nrcells=np.sum(ix),
# fraction=i,
# iteration=n,
# minvalue=Ct_i.min(),
# **logprops))
Ct_i[~ix] *= 1. + Ct_i[ix].sum() / Ct_i[~ix].sum()
Ct_i[ix] = 0.
# determine pickup and deficit for current fraction
Cu_i = s['Cu'][:,:,i].flatten()
mass_i = s['mass'][:,:,0,i].flatten()
w_i = w[:,:,i].flatten()
Ts_i = Ts
pickup_i = (w_i * Cu_i - Ct_i) / Ts_i * self.dt # Dit klopt niet! enkel geldig bij backward euler
deficit_i = pickup_i - mass_i
ix = (deficit_i > p['max_error']) \
& (w_i * Cu_i > 0.)
pickup[:,:,i] = pickup_i.reshape(yCt_i.shape)
Ct[:,:,i] = Ct_i.reshape(yCt_i.shape)
# quit the iteration if there is no deficit, otherwise
# back-compute the maximum weight allowed to get zero
# deficit for the current fraction and progress to
# the next iteration step
if not np.any(ix):
logger.debug(format_log('Iteration converged',
steps=n,
fraction=i,
**logprops))
pickup_i = np.minimum(pickup_i, mass_i)
break
else:
w_i[ix] = (mass_i[ix] * Ts_i / self.dt \
+ Ct_i[ix]) / Cu_i[ix]
w[:,:,i] = w_i.reshape(yCt_i.shape)
# throw warning if the maximum number of iterations was
# reached
if np.any(ix):
logger.warn(format_log('Iteration not converged',
nrcells=np.sum(ix),
fraction=i,
**logprops))
# check for unexpected negative values
if Ct_i.min() < 0:
logger.warn(format_log('Negative concentrations',
nrcells=np.sum(Ct_i<0.),
fraction=i,
minvalue=Ct_i.min(),
**logprops))
if w_i.min() < 0:
logger.warn(format_log('Negative weights',
nrcells=np.sum(w_i<0),
fraction=i,
minvalue=w_i.min(),
**logprops))
# end loop over frations
# check if there are any cells where the sum of all weights is
# smaller than unity. these cells are supply-limited for all
# fractions. Log these events.
ix = 1. - np.sum(w, axis=2) > p['max_error']
if np.any(ix):
self._count('supplylim')
# logger.warn(format_log('Ran out of sediment',
# nrcells=np.sum(ix),
# minweight=np.sum(w, axis=-1).min(),
# **logprops))
qs = Ct * s['us']
qn = Ct * s['un']
qs = Ct * s['us']
qn = Ct * s['un']
q = np.hypot(qs, qn)
return dict(Ct=Ct,
qs=qs,
qn=qn,
pickup=pickup,
w=w,
w_init=w_init,
w_air=w_air,
w_bed=w_bed,
q=q)
def get_count(self, name):
'''Get counter value
Parameters
----------
name : str
Name of counter
'''
if name in self.c:
return self.c[name]
else:
return 0
def _count(self, name, n=1):
'''Increase counter
Parameters
----------
name : str
Name of counter
n : int, optional
Increment of counter (default: 1)
'''
if name not in self.c:
self.c[name] = 0
self.c[name] += n
def _dims2shape(self, dims):
'''Converts named dimensions to numbered shape
Supports only dimension names that can be found in the model
parameters dictionary. The dimensions ``nx`` and ``ny`` are
increased by one, so they match the size of the spatial grids
rather than the number of spatial cells in the model.
Parameters
----------
dims : iterable
Iterable with strings specifying dimension names
Returns
-------
tuple
Shape of spatial grid
'''
shape = []
for dim in dims:
shape.append(self.p[dim])
if dim in ['nx', 'ny']:
shape[-1] += 1
return tuple(shape)
@staticmethod
def dimensions(var=None):
'''Static method that returns named dimensions of all spatial grids
Parameters
----------
var : str, optional
Name of spatial grid
Returns
-------
tuple or dict
Tuple with named dimensions of requested spatial grid or
dictionary with all named dimensions of all spatial
grids. Returns nothing if requested spatial grid is not
defined.
'''
dims = {s:d
for d, states in aeolis.constants.MODEL_STATE.items()
for s in states}
if var is not None:
if var in dims:
return dims[var]
else:
return None
else:
return dims
class AeoLiSRunner(AeoLiS):
'''AeoLiS model runner class
This runner class is a convenience class for the BMI-compatible
AeoLiS model class (:class:`~model.AeoLiS()`). It implements a
time loop, a progress indicator and netCDF4 output. It also
provides the definition of a callback function that can be used to
interact with the AeoLiS model during runtime.
The command-line function ``aeolis`` is available that uses this
class to start an AeoLiS model run.
Examples
--------
>>> # run with default settings
... AeoLiSRunner().run()
>>> AeoLiSRunner(configfile='aeolis.txt').run()
>>> model = AeoLiSRunner(configfile='aeolis.txt')
>>> model.run(callback=lambda model: model.set_var('zb', zb))
>>> model.run(callback='bar.py:add_bar')
See Also
--------
console.aeolis
'''
def __init__(self, configfile='aeolis.txt'):
'''Initialize class
Reads model configuration file without parsing all referenced
files for the progress indicator and netCDF output. If no
configuration file is given, the default settings are used.
Parameters
----------
configfile : str, optional
Model configuration file. See :func:`~inout.read_configfile()`.
'''
super(AeoLiSRunner, self).__init__(configfile=configfile)
self.t0 = None
self.tout = 0.
self.tlog = 0.
self.plog = -1.
self.trestart = 0.
self.n = 0 # time step counter
self.o = {} # output stats
self.changed = False
self.cwd = None
self.set_configfile(configfile)
if os.path.exists(self.configfile):
self.p = aeolis.inout.read_configfile(self.configfile, parse_files=False)
self.changed = False
elif self.configfile.upper() == 'DEFAULT':
self.changed = True
self.configfile = os.path.abspath('aeolis.txt')
self.p = aeolis.constants.DEFAULT_CONFIG
# add default profile and time series
self.p.update(dict(nx = 99,
ny = 0,
xgrid_file = np.arange(0.,100.,1.),
bed_file = np.linspace(-5.,5.,100.),
wind_file = np.asarray([[0.,10.,0.],
[3601.,10.,0.]])))
else:
logger.log_and_raise('Configuration file not found [%s]' % self.configfile, exc=IOError)
def run(self, callback=None, restartfile=None):
'''Start model time loop
Changes current working directory to the model directory,
prints model configuration parameters and progress indicator
to the screen, writes netCDF4 output and calls a callback
function upon request.
Parameters
----------
callback : str or function
The callback function is called at the start of every
single time step and takes the AeoLiS model object as
input. The callback function can be used to interact with
the model during simulation (e.g. update the bed with new
measurements). See for syntax
:func:`~model.AeoLiSRunner.parse_callback()`.
restartfile : str
Path to previously written restartfile. The model state is
loaded from this file after initialization of the model.
See Also
--------
model.AeoLiSRunner.parse_callback
'''
# http://www.patorjk.com/software/taag/
# font: Colossal
if (logger.hasHandlers()):
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
# initialize file logger
filehandler = logging.FileHandler('%s.log' % os.path.splitext(self.configfile)[0], mode='w')
filehandler.setLevel(logging.INFO)
filehandler.setFormatter(logging.Formatter('%(asctime)-15s %(name)-8s %(levelname)-8s %(message)s'))
logger.addHandler(filehandler)
# initialize console logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(20)
streamhandler.setFormatter(StreamFormatter())
logger.addHandler(streamhandler)
logger.info('**********************************************************')
logger.info(' ')
logger.info(' d8888 888 d8b .d8888b. ')
logger.info(' d88888 888 Y8P d88P Y88b ')
logger.info(' d88P888 888 Y88b. ')
logger.info(' d88P 888 .d88b. .d88b. 888 888 "Y888b. ')
logger.info(' d88P 888 d8P Y8b d88""88b 888 888 "Y88b. ')
logger.info(' d88P 888 88888888 888 888 888 888 "888 ')
logger.info(' d8888888888 Y8b. Y88..88P 888 888 Y88b d88P ')
logger.info(' d88P 888 "Y8888 "Y88P" 88888888 888 "Y8888P" ')
logger.info(' ')
logger.info(' Version: %-45s' % __version__)
# logger.info(' Git hash: %-45s' % __gitversion__) # commenting for now until we implement pyproject.toml
logger.info(' ')
# set working directory
fpath, fname = os.path.split(self.configfile)
if fpath != os.getcwd():
self.cwd = os.getcwd()
os.chdir(fpath)
logger.info('Changed working directory to: %s\n', fpath)
# print settings
self.print_params()
# write settings
self.write_params()
# parse callback
if callback is not None:
callback = self.parse_callback(callback)
else:
callback = self.parse_callback(self.p['callback'])
if callback is not None:
logger.info('Applying callback function: %s()\n', callback.__name__)
# initialize model
self.initialize()
self.load_hotstartfiles()
# load restartfile
if self.load_restartfile(restartfile):
logger.info('Loaded model state from restart file: %s\n', restartfile)
# start model loop
self.t0 = time.time()
self.output_write()
while self.t <= self.p['tstop']:
if callback is not None:
callback(self)
self.update()
self.output_write()
self.print_progress()
# finalize model
self.finalize()
self.print_stats()
if self.cwd is not None:
os.chdir(self.cwd)
logging.shutdown()
logging.shutdown()
def set_configfile(self, configfile):
'''Set model configuration file name'''
self.changed = False
if os.path.exists(configfile):
self.configfile = os.path.abspath(configfile)
else:
self.configfile = configfile
def set_params(self, **kwargs):
'''Set model configuration parameters'''
if len(kwargs) > 0:
self.changed = True
self.p.update(kwargs)
def get_statistic(self, var, stat='avg'):
'''Return statistic of spatial grid
Parameters
----------
var : str
Name of spatial grid
stat : str
Name of statistic (avg, sum, var, min or max)
Returns
-------
numpy.ndarray
Statistic of spatial grid
'''
if stat in ['min', 'max', 'sum']:
return self.o[var][stat]
elif stat == 'avg':
if self.n > 0:
return self.o[var]['sum'] / self.n
else:
return np.zeros(self.o[var]['sum'].shape)
elif stat == 'var':
if self.n > 1:
return (self.o[var]['var'] - self.o[var]['sum']**2 / self.n) \
/ (self.n - 1)
else:
return np.zeros(self.o[var]['var'].shape)
else:
return None
def get_var(self, var, clear=True):
'''Returns spatial grid, statistic or model configuration parameter
Overloads the :func:`~model.AeoLiS.get_var()` function and
extends it with the functionality to return statistics on
spatial grids by adding a postfix to the variable name
(e.g. Ct_avg). Supported statistics are avg, sum, var, min and
max.
Parameters
----------
var : str
Name of spatial grid or model configuration
parameter. Spatial grid name can be extended with a
postfix to request a statistic (_avg, _sum, _var, _min or
_max).
clear : bool
Clear output statistics afterwards.
Returns
-------
np.ndarray or int, float, str or list
Spatial grid, statistic or model configuration parameter
Examples
--------
>>> # returns average sediment concentration
... model.get_var('Ct_avg')
>>> # returns variance in wave height
... model.get_var('Hs_var')
See Also
--------
model.AeoLiS.get_var
'''
self.clear = clear
if '_' in var:
var, stat = var.split('_')
if var in self.o:
return self.get_statistic(var, stat)
# TODO: delete in future releases
if '.' in var:
warnings.warn('The use of "%s" is deprecated, use '
'"%s" instead.' % (var, var.replace('.','_')), DeprecationWarning)
var, stat = var.split('.')
if var in self.o:
return self.get_statistic(var, stat)
return super(AeoLiSRunner, self).get_var(var)
def initialize(self):
'''Initialize model
Overloads the :func:`~model.AeoLiS.initialize()` function, but
also initializes output statistics.
'''
super(AeoLiSRunner, self).initialize()
self.output_init()
def update(self, dt=-1):
'''Time stepping function
Overloads the :func:`~model.AeoLiS.update()` function,
but also updates output statistics and clears output
statistics upon request.
Parameters
----------
dt : float, optional
Time step in seconds.
'''
if self.clear or self.dt < -1:
self.output_clear()
self.clear = False
super(AeoLiSRunner, self).update(dt=dt)
self.output_update()
def write_params(self):
'''Write updated model configuration to configuration file
Creates a backup in case the model configration file already
exists.
See Also
--------
inout.backup
'''
if self.changed:
aeolis.inout.backup(self.configfile)
aeolis.inout.write_configfile(self.configfile, self.p)
self.changed = False
def output_init(self):
'''Initialize netCDF4 output file and output statistics dictionary'''
self.p['output_vars'] = makeiterable(self.p['output_vars'])
self.p['output_types'] = makeiterable(self.p['output_types'])
# determine unique combinations of variables and types
self.p['_output_vars'] = {}
for var in self.p['output_vars']:
if '_' in var:
var0, ext = var.split('_')
# TODO: delete in future release
elif '.' in var:
warnings.warn('The use of "%s" is deprecated, use '
'"%s" instead.' % (var, var.replace('.','_')), DeprecationWarning)
var0, ext = var.split('.')
else:
var0, ext = var, None
if var0 not in self.p['_output_vars']:
self.p['_output_vars'][var0] = []
if ext not in self.p['_output_vars'][var0]:
self.p['_output_vars'][var0].append(ext)
for ext in self.p['output_types']:
if ext not in self.p['_output_vars'][var0]:
self.p['_output_vars'][var0].append(ext)
aeolis.netcdf.initialize(self.p['output_file'],
self.p['_output_vars'],
self.s,
self.p,
self.dimensions())
self.output_clear()
def output_clear(self):
'''Clears output statistics dictionary
Creates a matrix for minimum, maximum, variance and summed
values for each output variable and sets the time step counter
to zero.
'''
for k in self.p['_output_vars'].keys():
s = self.get_var_shape(k)
self.o[k] = dict(min=np.zeros(s) + np.inf,
max=np.zeros(s) - np.inf,
var=np.zeros(s),
sum=np.zeros(s))
self.n = 0
def output_update(self):
'''Updates output statistics dictionary
Updates matrices with minimum, maximum, variance and summed
values for each output variable with current spatial grid
values and increases time step counter with one.
'''
for k, exts in self.p['_output_vars'].items():
v = self.get_var(k, clear=False).copy()
if 'min' in exts:
self.o[k]['min'] = np.minimum(self.o[k]['min'], v)
if 'max' in exts:
self.o[k]['max'] = np.maximum(self.o[k]['max'], v)
if 'sum' in exts or 'avg' in exts or 'var' in exts:
self.o[k]['sum'] = self.o[k]['sum'] + v
if 'var' in exts:
self.o[k]['var'] = self.o[k]['var'] + v**2
#also update the q variable here
self.s['q'] = np.hypot(self.s['qs'], self.s['qn'])
self.n += 1
def output_write(self):
'''Appends output to netCDF4 output file
If the time since the last output is equal or larger than the
set output interval, append current output to the netCDF4
output file. Computes the average and variance values based on
available output statistics and clear output statistics
dictionary.
'''
if self.t - self.tout >= self.p['output_times'] or self.t == 0.:
variables = {}
variables['time'] = self.t
for k, exts in self.p['_output_vars'].items():
for ext in exts:
if ext is None:
variables[k] = self.get_var(k, clear=False).copy()
else:
variables['%s_%s' % (k, ext)] = self.get_statistic(k, ext)
aeolis.netcdf.append(self.p['output_file'], variables)
self.output_clear()
self.tout = self.t
if self.p['restart'] and self.t - self.trestart >= self.p['restart']:
self.dump_restartfile()
self.trestart = self.t
def load_hotstartfiles(self):
'''Load model state from hotstart files
Hotstart files are plain text representations of model state
variables that can be used to hotstart the (partial) model
state. Hotstart files should have the name of the model state
variable it contains and have the extension
`.hotstart`. Hotstart files differ from restart files in that
restart files contain entire model states and are pickled
Python objects.
See Also
--------
model.AeoLiSRunner.load_restartfile
'''
for fname in glob.glob('*.hotstart'):
var = os.path.splitext(fname)[0]
if var in self.s.keys():
shp = self.s[var].shape
self.s[var] = np.loadtxt(fname).reshape(shp)
self.s.set_immutable(var)
logger.info('Loaded "%s" from hotstart file.' % var)
else:
logger.warning('Unrecognized hotstart file [%s]' % fname)
def load_restartfile(self, restartfile):
'''Load model state from restart file
Parameters
----------
restartfile : str
Path to previously written restartfile.
'''
if restartfile:
if os.path.exists(restartfile):
with open(restartfile, 'r') as fp:
state = pickle.load(fp)
self.t = state['t']
self.p = state['p']
self.s = state['s']
self.l = state['l']
self.c = state['c']
self.trestart = self.t
return True
else:
logger.log_and_raise('Restart file not found [%s]' % restartfile, exc=IOError)
return False
def dump_restartfile(self):
'''Dump model state to restart file'''
restartfile = '%s.r%010d' % (os.path.splitext(self.p['output_file'])[0], int(self.t))
with open(restartfile, 'w') as fp:
pickle.dump({'t':self.t,
'p':self.p,
's':self.s,
'l':self.l,
'c':self.c}, fp)
logger.info('Written restart file [%s]' % restartfile)
def parse_callback(self, callback):
'''Parses callback definition and returns function
The callback function can be specified in two formats:
- As a native Python function
- As a string refering to a Python script and function,
separated by a colon (e.g. ``example/callback.py:function``)
Parameters
----------
callback : str or function
Callback definition
Returns
-------
function
Python callback function
'''
if isinstance(callback, str):
if ':' in callback:
fname, func = callback.split(':')
if os.path.exists(fname):
mod = imp.load_source('callback', fname)
if hasattr(mod, func):
return getattr(mod, func)
elif hasattr(callback, '__call__'):
return callback
elif callback is None:
return callback
logger.warning('Invalid callback definition [%s]', callback)
return None
def print_progress(self, fraction=.01, min_interval=1., max_interval=60.):
'''Print progress to screen
Parameters
----------
fraction : float, optional
Fraction of simulation at which to print progress (default: 1%)
min_interval : float, optional
Minimum time in seconds between subsequent progress prints (default: 1s)
max_interval : float, optional
Maximum time in seconds between subsequent progress prints (default: 60s)
'''
p = (self.t-self.p['tstart']) / (self.p['tstop']-self.p['tstart'])
pr = np.ceil(p/fraction)*fraction
t = time.time()
interval = t - self.tlog
if self.get_count('time') == 1:
logger.info(' Time elapsed / Total time / Time remaining / Average Timestep')
self.dt_array = []
self.dt_array.append(self.dt)
if (np.mod(p, fraction) < .01 and self.plog != pr) or interval > max_interval:
t1 = timedelta(0, round(t-self.t0))
t2 = timedelta(0, round((t-self.t0)/p))
t3 = timedelta(0, round((t-self.t0)*(1.-p)/p))
dt_avg = np.average(self.dt_array)
logger.info('%05.1f%% %12s / %10s / %14s / %0.1f' % (p * 100., t1, t2, t3, dt_avg))
self.tlog = time.time()
self.plog = pr
self.dt_array = []
def print_params(self):
'''Print model configuration parameters to screen'''
maxl = np.max([len(par) for par in self.p.keys()])
fmt1 = ' %%-%ds = %%s' % maxl
fmt2 = ' %%-%ds %%s' % maxl
logger.info('**********************************************************')
logger.info('PARAMETER SETTINGS ')
logger.info('**********************************************************')
for par, val in sorted(self.p.items()):
if isiterable(val):
if par.endswith('_file'):
logger.info(fmt1 % (par, '%s.txt' % par.replace('_file', '')))
elif len(val) > 0:
logger.info(fmt1 % (par, aeolis.inout.print_value(val[0])))
for v in val[1:]:
logger.info(fmt2 % ('', aeolis.inout.print_value(v)))
else:
logger.info(fmt1 % (par, ''))
else:
logger.info(fmt1 % (par, aeolis.inout.print_value(val)))
logger.info('**********************************************************')
logger.info('')
def print_stats(self):
'''Print model run statistics to screen'''
n_time = self.get_count('time')
n_matrixsolve = self.get_count('matrixsolve')
n_supplylim = self.get_count('supplylim')
logger.info('')
logger.info('**********************************************************')
fmt = '%-20s : %s'
logger.info(fmt % ('# time steps', aeolis.inout.print_value(n_time)))
logger.info(fmt % ('# matrix solves', aeolis.inout.print_value(n_matrixsolve)))
logger.info(fmt % ('# supply lim', aeolis.inout.print_value(n_supplylim)))
logger.info(fmt % ('avg. solves per step',
aeolis.inout.print_value(float(n_matrixsolve) / n_time)))
logger.info(fmt % ('avg. time step',
aeolis.inout.print_value(float(self.p['tstop']) / n_time)))
logger.info('**********************************************************')
logger.info('')
class WindGenerator():
'''Wind velocity time series generator
Generates a random wind velocity time series with given mean and
maximum wind speed, duration and time resolution. The wind
velocity time series is generated using a Markov Chain Monte Carlo
(MCMC) approach based on a Weibull distribution. The wind time
series can be written to an AeoLiS-compatible wind input file
assuming a constant wind direction of zero degrees.
The command-line function ``aeolis-wind`` is available that uses
this class to generate AeoLiS wind input files.
Examples
--------
>>> wind = WindGenerator(mean_speed=10.).generate(duration=24*3600.)
>>> wind.write_time_series('wind.txt')
>>> wind.plot()
>>> wind.hist()
See Also
--------
console.wind
'''
# source:
# http://www.lutralutra.co.uk/2012/07/02/simulating-a-wind-speed-time-series-in-python/
def __init__(self,
mean_speed=9.0,
max_speed=30.0,
dt=60.,
n_states=30,
shape=2.,
scale=2.):
self.mean_speed=mean_speed
self.max_speed=max_speed
self.n_states=n_states
self.t=0.
self.dt=dt
# setup matrix
n_rows = n_columns = n_states
self.bin_size = float(max_speed)/n_states
# weibull parameters
weib_shape=shape
weib_scale=scale*float(mean_speed)/np.sqrt(np.pi);
# wind speed bins
self.bins = np.arange(self.bin_size/2.0,
float(max_speed) + self.bin_size/2.0,
self.bin_size)
# distribution of probabilities, normalised
fdpWind = self.weibullpdf(self.bins, weib_scale, weib_shape)
fdpWind = fdpWind / sum(fdpWind)
# decreasing function
G = np.empty((n_rows, n_columns,))
for x in range(n_rows):
for y in range(n_columns):
G[x][y] = 2.0**float(-abs(x-y))
# initial value of the P matrix
P0 = np.diag(fdpWind)
# initital value of the p vector
p0 = fdpWind
P, p = P0, p0
rmse = np.inf
while rmse > 1e-10:
pp = p
r = self.matmult4(P,self.matmult4(G,p))
r = r/sum(r)
p = p+0.5*(p0-r)
P = np.diag(p)
rmse = np.sqrt(np.mean((p - pp)**2))
N=np.diag([1.0/i for i in self.matmult4(G,p)])
MTM=self.matmult4(N,self.matmult4(G,P))
self.MTMcum = np.cumsum(MTM,1)
def __getitem__(self, s):
return np.asarray(self.wind_speeds[s])
def generate(self, duration=3600.):
# initialise series
self.state = 0
self.states = []
self.wind_speeds = []
self.randoms1 = []
self.randoms2 = []
self.update()
self.t = 0.
while self.t < duration:
self.update()
return self
def update(self):
r1 = np.random.uniform(0,1)
r2 = np.random.uniform(0,1)
self.randoms1.append(r1)
self.randoms2.append(r2)
self.state = next(j for j,v in enumerate(self.MTMcum[self.state]) if v > r1)
self.states.append(self.state)
u = np.maximum(0., self.bins[self.state] - 0.5 + r2 * self.bin_size)
self.wind_speeds.append(u)
self.t += self.dt
def get_time_series(self):
u = np.asarray(self.wind_speeds)
t = np.arange(len(u)) * self.dt
return t, u
def write_time_series(self, fname):
t, u = self.get_time_series()
M = np.concatenate((np.asmatrix(t),
np.asmatrix(u),
np.zeros((1, len(t)))), axis=0).T
np.savetxt(fname, M)
def plot(self):
t, u = self.get_time_series()
fig, axs = plt.subplots(figsize=(10,4))
axs.plot(t, u
, '-k')
axs.set_ylabel('wind speed [m/s]')
axs.set_xlabel('time [s]')
axs.set_xlim((0, np.max(t)))
axs.grid()
return fig, axs
def hist(self):
fig, axs = plt.subplots(figsize=(10,4))
axs.hist(self.wind_speeds, bins=self.bins, normed=True, color='k')
axs.set_xlabel('wind speed [m/s]')
axs.set_ylabel('occurence [-]')
axs.grid()
return fig, axs
@staticmethod
def weibullpdf(data, scale, shape):
return [(shape/scale)
* ((x/scale)**(shape-1))
* np.exp(-1*(x/scale)**shape)
for x in data]
@staticmethod
def matmult4(m, v):
return [reduce(operator.add, map(operator.mul,r,v)) for r in m] | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/model.py | model.py |
from __future__ import absolute_import, division
import logging
import numpy as np
from matplotlib import pyplot as plt
from numba import njit
from scipy.interpolate import NearestNDInterpolator
# package modules
from aeolis.utils import *
# initialize logger
logger = logging.getLogger(__name__)
def interpolate(s, p, t):
'''Interpolate hydrodynamic and meteorological conditions to current time step
Interpolates the hydrodynamic and meteorological time series to
the current time step, if available. Meteorological parameters are
stored as dictionary rather than a single value.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
t : float
Current time
Returns
-------
dict
Spatial grids
'''
if p['process_tide']:
# Check if SWL or zs are not provided by some external model
# In that case, skip initialization
if ('zs' not in p['external_vars']) :
if p['tide_file'] is not None:
s['SWL'][:,:] = interp_circular(t,
p['tide_file'][:,0],
p['tide_file'][:,1])
else:
s['SWL'][:,:] = 0.
# apply complex mask
s['SWL'] = apply_mask(s['SWL'], s['tide_mask'])
# External model input:
elif ('zs' in p['external_vars']):
s['SWL'] = s['zs'][:]
s['SWL'] = apply_mask(s['SWL'], s['tide_mask'])
# The dry points have to be filtered out, to prevent issues with run-up calculation later
iwet = s['zs'] - s['zb'] > 2. * p['eps']
s['SWL'][~iwet] = np.NaN
mask = np.where(~np.isnan(s['SWL']))
interp = NearestNDInterpolator(np.transpose(mask), s['SWL'][mask])
s['SWL'] = interp( * np.indices(s['SWL'].shape))
# fig, ax = plt.subplots()
# pc = plt.pcolormesh(s['x'], s['y'], s['SWL'])#, vmin=1, vmax=1.3)
# ax.set_aspect('equal')
# fig.colorbar(pc, ax=ax)
# plt.show()
print('!Be carefull, according to current implementation of importing waterlevel from Flexible Mesh, SWL is equal to DSWL = zs!')
logger.warning('!Be carefull, according to current implementation of importing waterlevel from Flexible Mesh, SWL is equal to DSWL = zs!')
else:
s['SWL'] = s['zb'] * 0.
# Check if Hs or Tp are not provided by some external model
# In that case, skip initialization
if ('Hs' not in p['external_vars']) and ('Tp' not in p['external_vars']):
if p['process_wave'] and p['wave_file'] is not None:
# First compute wave height, than run-up + set-up and finally wave height including set-up for mixing
# determine water depth
h = np.maximum(0., s['SWL'] - s['zb'])
s['Hs'][:,:] = interp_circular(t,
p['wave_file'][:,0],
p['wave_file'][:,1])
s['Tp'][:,:] = interp_circular(t,
p['wave_file'][:,0],
p['wave_file'][:,2])
s['Hs'] = np.minimum(h * p['gamma'], s['Hs'])
# apply complex mask
s['Hs'] = apply_mask(s['Hs'], s['wave_mask'])
s['Tp'] = apply_mask(s['Tp'], s['wave_mask'])
else:
s['Hs'] = s['zb'] * 0.
s['Tp'] = s['zb'] * 0.
# apply complex mask (also for external model input)
else:
s['Hs'] = apply_mask(s['Hs'], s['wave_mask'])
s['Tp'] = apply_mask(s['Tp'], s['wave_mask'])
if p['process_runup']:
ny = p['ny']
if ('Hs' not in p['external_vars']):
for iy in range(ny + 1): # do this computation seperately on every y for now so alongshore variable wave runup can be added in the future
hs = s['Hs'][iy][0]
tp = s['Tp'][iy][0]
wl = s['SWL'][iy][0]
eta, sigma_s, R = calc_runup_stockdon(hs, tp, p['beach_slope'])
s['R'][iy][:] = R
s['eta'][iy][:] = eta
s['sigma_s'][iy][:] = sigma_s
if hasattr(s['runup_mask'], "__len__"):
s['eta'][iy][:] = apply_mask(s['eta'][iy][:], s['runup_mask'][iy][:])
s['R'][iy][:] = apply_mask(s['R'][iy][:], s['runup_mask'][iy][:])
s['TWL'][iy][:] = s['SWL'][iy][:] + s['R'][iy][:]
s['DSWL'][iy][:] = s['SWL'][iy][:] + s['eta'][iy][:] # Was s['zs'] before
if ('Hs' in p['external_vars']):
eta, sigma_s, R = calc_runup_stockdon(s['Hs'], s['Tp'], p['beach_slope'])
s['R'][:] = R
if hasattr(s['runup_mask'], "__len__"):
s['eta'] = apply_mask(s['eta'], s['runup_mask'])
s['R'] = apply_mask(s['R'], s['runup_mask'])
s['TWL'][:] = s['SWL'][:] + s['R'][:]
s['DSWL'][:] = s['SWL'][:] # + s['eta'][:] # DSWL is actually provided by FM (?)
if p['process_wave'] and p['wave_file'] is not None:
h_mix = np.maximum(0., s['TWL'] - s['zb'])
s['Hsmix'][:,:] = interp_circular(t,
p['wave_file'][:,0],
p['wave_file'][:,1])
s['Hsmix'] = np.minimum(h_mix * p['gamma'], s['Hsmix'])
# apply complex mask
s['Hsmix'] = apply_mask(s['Hsmix'], s['wave_mask'])
if p['process_moist'] and p['method_moist_process'].lower() == 'surf_moisture' and p['meteo_file'] is not None:
m = interp_array(t,
p['meteo_file'][:,0],
p['meteo_file'][:,1:], circular=True)
#Meteorological parameters (Symbols according to KNMI, units according to the Penman's equation)
# T: Temperature, Degrees Celsius
# Q : Global radiation, MJ/m2/d
# RH : Precipitation, mm/h
# P : Atmospheric pressure, kPa
# U: Relative humidity, %
s['meteo'] = dict(zip(('T','Q','RH','P','U') , m))
# Ensure compatibility with XBeach: zs >= zb
s['zs'] = s['SWL'].copy()
ix = (s['zb'] > s['zs'])
s['zs'][ix] = s['zb'][ix]
return s
def update(s, p, dt,t):
'''Update soil moisture content
Updates soil moisture content in all cells. The soil moisure
content is computed either with the infiltration-method or
surface_moist method. The infiltration method accounts for surface moisture
as a function of runup and the subsequent infiltration and evaporation.
The surface_moist method takes into account the effect of wave runup,
precipitation, evaporation, infiltration, and capillary rise from the
groundwater table.
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
dt : float
Current time step
Returns
-------
dict
Spatial grids
'''
# Groundwater level Boussinesq (1D CS-transects)
if p['process_groundwater']:
#Initialize GW levels
if t == p['tstart']:
s['gw'][:,:] = p['in_gw']
s['gw_prev'] = s['gw']
s['wetting'] = s['gw'] > s['gw_prev']
#Specify wetting or drying conditions in previous timestep
s['wetting'] = s['gw'] > s['gw_prev']
#Save groundwater from previous timestep
s['gw_prev'] = s['gw']
#Decrease timestep for GW computations
dt_gw = int(dt / p['tfac_gw'])
for i in range(int(dt / dt_gw)):
t_gw = t + i * dt_gw
interpolate(s,p,t_gw)
#Define index of shoreline location
shl_ix =np.argmax(s['zb'] > s['DSWL'],axis=1) - 1
#Define index of runup limit
runup_ix =np.argmax(s['zb'] > s['TWL'],axis=1) - 1
# Landward boundary condition
if p['boundary_gw'].lower() == 'no_flow':
#Define landward boundary dgw/dx=0
bound = 0
elif p['boundary_gw'].lower() == 'static':
#Define landward boundary
bound = 1
else:
logger.log_and_raise('Unknown landward groundwater boundary condition' % p['boundary_gw'], exc=ValueError)
#Runge-Kutta timestepping
f1 = Boussinesq(s['gw'],s['DSWL'], s['ds'], p['GW_stat'], p['K_gw'], p['ne_gw'], p['D_gw'],shl_ix, bound,s['zb'],p['process_seepage_face'])
f2 = Boussinesq(s['gw'] + dt_gw / 2 * f1,s['DSWL'], s['ds'], p['GW_stat'], p['K_gw'], p['ne_gw'], p['D_gw'], shl_ix, bound,s['zb'],p['process_seepage_face'])
f3 = Boussinesq(s['gw'] + dt_gw / 2 * f2,s['DSWL'], s['ds'], p['GW_stat'], p['K_gw'], p['ne_gw'], p['D_gw'], shl_ix, bound,s['zb'],p['process_seepage_face'])
f4 = Boussinesq(s['gw'] + dt_gw * f3,s['DSWL'], s['ds'], p['GW_stat'], p['K_gw'], p['ne_gw'], p['D_gw'], shl_ix, bound,s['zb'],p['process_seepage_face'])
#Update groundwater level
s['gw'] = s['gw'] + dt_gw / 6 * (f1 + 2 * f2 + 2 * f3 + f4)
#Add infiltration from wave runup according to Nielsen (1990)
if p['process_wave']:
#Compute f(x) = distribution of infiltrated water
fx=np.zeros(s['gw'].shape)
fx_ix=np.zeros_like(shl_ix)
runup_overheight_distr(fx, fx_ix, shl_ix, runup_ix, s['x'])
# Update groundwater level with overheight due to runup
s['gw'] = s['gw'] + p['Cl_gw'] * fx
# Apply GW complex mask
s['gw'] = apply_mask(s['gw'], s['gw_mask'])
# Do not allow GW levels above ground level
s['gw']=np.minimum(s['gw'], s['zb'])
# Define cells below setup level
ixg=s['zb'] < s['DSWL']
# Set gw level to setup level in cells below setup level
s['gw'][ixg]=s['DSWL'][ixg]
# Compute surface moisture with infiltration method using Darcy
if p['process_moist']:
if p['method_moist_process'].lower() == 'infiltration':
F1 = -np.log(.5) / p['Tdry']
ix = s['TWL'] - s['zb'] > p['eps']
s['moist'][ ix] = p['porosity']
s['moist'][~ix] *= np.exp(-F1 * dt)
s['moist'][:,:] = np.maximum(0.,np.minimum(p['porosity'],\
s['moist'][:,:]))
# Compute surface moisture accounting for runup, capillary rise and precipitation/evaporation
elif p['method_moist_process'].lower() == 'surf_moisture':
if p['process_groundwater'] is None :
logger.log_and_raise('process_groundwater is not activated, the groundwater level is not computed within the program but set constant at 0 m', exc=ValueError)
#Infiltration
F1 = -np.log(.5) / p['Tdry']
s['moist'] += np.minimum(0,(s['moist']-p['fc'])*(np.exp(-F1*dt)-1))
#If the cell is flooded (runup) in this timestep, assume satiation
ix = s['TWL'] > s['zb']
s['moist'][ix] = p['satd_moist']
#Update surface moisture with respect to evaporation, condensation, and precipitation
met = s['meteo']
evo = evaporation(s,p,met)
evo = evo / 24. / 3600. / 1000. # convert evaporation from mm/day to m/s
pcp = met['RH'] / 3600. / 1000. # convert precipitation from mm/hr to m/s
s['moist'][~ix] = np.maximum(s['moist'][~ix] + (pcp - evo[~ix]) * dt / p['thick_moist'], p['resw_moist'])
s['moist'][~ix] = np.minimum(s['moist'][~ix],p['satd_moist'])
#Compute surface moisture due to capillary processes (van Genuchten and Mualem II)
#Compute distance from gw table to the soil surface
h=np.maximum(0,(s['zb'] - s['gw']) * 100) #h in cm to match convention of alfa (cm-1)
if p['process_scanning']:
#Initialize value of surface moisture due to capillary rise
if t == 0:
s['moist_swr'] = p['resw_moist'] + (p['satw_moist'] - p['resw_moist']) \
/ (1 + abs(p['alfaw_moist'] * h) ** p['nw_moist']) ** p['mw_moist']
s['h_delta'][:,:]=np.maximum(0,(s['zb'] - s['gw'])*100)
s['scan_w'][:,:] == False
s['scan_d'][:,:] == False
else:
#Compute h_delta
s['h_delta'] = hdelta(s['wetting'], s['scan_w'], s['gw'],s['gw_prev'],s['scan_d'],s['h_delta'],s['zb'],s['scan_w_moist'],s['w_h'],p['satd_moist'],s['d_h'],p['satw_moist'],p['alfaw_moist'],p['resw_moist'],p['mw_moist'],p['nw_moist'])
#Compute moisture of h for the wetting curve
s['w_h'] = p['resw_moist'] + (p['satw_moist'] - p['resw_moist']) \
/ (1 + abs(p['alfaw_moist'] * h) ** p['nw_moist']) ** p['mw_moist']
#Compute moisture of h_delta for the wetting curve
s['w_hdelta'] = p['resw_moist'] + (p['satw_moist'] - p['resw_moist']) \
/ (1 + abs(p['alfaw_moist'] * s['h_delta']) ** p['nw_moist']) ** p['mw_moist']
#Compute moisture of h for the drying curve
s['d_h'] = p['resd_moist'] + (p['satd_moist'] - p['resd_moist']) \
/ (1 + abs(p['alfad_moist'] * h) ** p['nd_moist']) ** p['md_moist']
#Compute moisture of h_delta for the drying curve
s['d_hdelta'] = p['resd_moist'] + (p['satd_moist'] - p['resd_moist']) \
/ (1 + abs(p['alfad_moist'] * s['h_delta']) ** p['nd_moist']) ** p['md_moist']
#Compute moisture content with the wetting scanning curve
s['scan_w_moist'] = np.maximum(np.minimum(s['w_h'] + (p['satw_moist'] - s['w_h']) / np.maximum(p['satw_moist'] - s['w_hdelta'],0.0001) \
* (s['d_hdelta'] - s['w_hdelta']),s['d_h']),s['w_h'])
#Compute moisture content with the drying scanning curve
s['scan_d_moist'] = np.maximum(np.minimum(s['w_h'] + (s['w_hdelta'] - s['w_h']) / np.maximum(p['satd_moist'] - s['w_h'],0.0001) \
* (s['d_h'] - s['w_h']), s['d_h']),s['w_h'])
#Select SWR curve to compute moisture content due to capillary processes
s['moist_swr'], s['scan_d'], s['scan_w'] = SWR_curve(s['wetting'],s['gw'],s['gw_prev'],s['scan_w'],s['moist_swr'],s['w_h'],s['scan_d'],s['scan_w_moist'],s['d_h'],s['scan_d_moist'])
else:
ixw = s['wetting'] == True
s['moist_swr'][ixw] = p['resw_moist'] + (p['satw_moist'] - p['resw_moist']) \
/ (1 + abs(p['alfaw_moist'] * h[ixw]) ** p['nw_moist']) ** p['mw_moist']
s['moist_swr'][~ixw] = p['resd_moist'] + (p['satd_moist'] - p['resd_moist']) \
/ (1 + abs(p['alfad_moist'] * h[~ixw]) ** p['nd_moist']) ** p['md_moist']
#Update surface moisture with respect to capillary processes
s['moist'] = np.minimum(np.maximum(s['moist'],s['moist_swr']),p['satd_moist'])
else:
logger.log_and_raise('Unknown moisture process formulation [%s]' % p['method_moist_process'], exc=ValueError)
# salinitation
if p['process_salt']:
met = s['meteo']
F2 = -np.log(.5) / p['Tsalt']
s['salt'][ ix,0] = 1.
s['salt'][~ix,0] *= np.exp(-F2 * dt)
pcp = met['RH'] / 3600. / 1000. # convert precipitation from mm/hr to m/s
s['salt'][:,:,0] = np.minimum(1., s['salt'][:,:,0] + pcp * dt / p['layer_thickness'])
return s
@njit
def Boussinesq (GW, DSWL, ds, GW_stat, K_gw, ne_gw, D_gw,shl_ix, bound,zb,process_seepage_face):
'''
Add description
'''
#Define seaward boundary gw=setup
for i in range (len(GW[:,0])):
GW[i,shl_ix[i]] = DSWL[i,shl_ix[i]]
GW[i,shl_ix[i-1]] = DSWL[i,shl_ix[i-1]]
# GW[:,shl_ix] = s['DSWL'][:,shl_ix]
# GW[:,shl_ix-1] = s['DSWL'][:,shl_ix-1]
if bound == 0:
#Define landward boundary dgw/dx=0
GW[:,-1] = GW[:,-4]
GW[:,-2] = GW[:,-4]
GW[:,-3] = GW[:,-4]
elif bound == 1:
#Define landward boundary
GW[:,-1] = GW_stat
GW[:,-2] = GW_stat
GW[:,-3] = GW_stat
#Set GW levels to ground level within seepage face
if process_seepage_face:
ixs = np.argmin(GW + 0.05 >= zb,axis=1)
for i in range(len(ixs)):
if shl_ix[i] < ixs[i] - 1:
GW[i,shl_ix[i]:ixs[i]-1] = zb[i,shl_ix[i]:ixs[i]-1]
#Compute groundwater level change dGW/dt (Boussinesq equation)
dGW = np.zeros(GW.shape)
a = np.zeros(GW.shape)
b = np.zeros(GW.shape)
c = np.zeros(GW.shape)
for i in range(len(a[:,0])):
if shl_ix[i] < len(a[0,:]) - 3:
a[i,shl_ix[i]:-2]=(GW[i,shl_ix[i]+1:-1] - 2 * GW[i,shl_ix[i]:-2] + GW[i,shl_ix[i]-1:-3]) / ds[i,shl_ix[i]:-2] ** 2
b[i,shl_ix[i]:-2]=(GW[i,shl_ix[i]:-2] * (GW[i,shl_ix[i]+1:-1] + GW[i,shl_ix[i]-1:-3])) / ds[i,shl_ix[i]:-2]
c[i,shl_ix[i]+1:-3]=(b[i,shl_ix[i]+2:-2]-b[i,shl_ix[i]:-4])/ds[i,shl_ix[i]+1:-3]
dGW[i,shl_ix[i]+1:-3]=K_gw / ne_gw * (D_gw * a[i,shl_ix[i]+1:-3] + c[i,shl_ix[i]+1:-3])
return dGW
@njit
def runup_overheight_distr(fx, fx_ix,shl_ix,runup_ix, x):
'''
Add description
'''
for i in range(len(fx[:,0])):
#Define index of peak f(x)
fx_ix[i] = (shl_ix[i]) + (2/3 * (runup_ix[i] - shl_ix[i]))
#Compute f(X)
fx[i,shl_ix[i]:fx_ix[i]] = (x[i,shl_ix[i]:fx_ix[i]] - x[i,shl_ix[i]]) / (2 / 3 * (x[i,runup_ix[i]] - x[i,shl_ix[i]]))
fx[i,fx_ix[i]+1:runup_ix[i]] = 3 - (x[i,fx_ix[i]+1:runup_ix[i]]- x[i,shl_ix[i]]) / (1 / 3 * (x[i,runup_ix[i]] - x[i,shl_ix[i]]))
fx[i,fx_ix[i]]=1
return fx
@njit
def hdelta(wetting, scan_w, gw,gw_prev,scan_d,h_delta,zb,scan_w_moist,w_h,satd_moist,d_h,satw_moist,alfaw_moist,resw_moist,mw_moist,nw_moist):
for i in range(len(wetting[:,0])):
for j in range(len(wetting[0,:])):
#Compute h delta on the main drying and wetting curve
if scan_w[i,j] == False and wetting[i,j] == True and gw[i,j] < gw_prev[i,j] or scan_d[i,j] == False and wetting[i,j] == False and gw[i,j] > gw_prev[i,j]:
h_delta[i,j]=np.maximum(0,(zb[i,j] - gw[i,j])*100)
#Compute h_delta if there is a reversal on the wetting scanning curve
if scan_w[i,j] == True and wetting[i,j] == True and gw[i,j] < gw_prev[i,j]:
#Solve hdelta from drying scanning curve for which moist(h) on drying scanning curve equals moist(h) on wetting scanning curve
#intermediate solution:
w_hdelta_int = np.minimum((scan_w_moist[i,j] - w_h[i,j]) * (satd_moist - w_h[i,j]) / (d_h[i,j] - w_h[i,j]) + w_h[i,j], satw_moist)
#Solve hdelta from wetting curve
h_delta[i,j] = np.maximum( 1 / alfaw_moist * (((satw_moist - resw_moist) \
/ np.maximum((w_hdelta_int - resw_moist),0.00001)) ** (1 / mw_moist) - 1) ** (1 / nw_moist),0)
#Compute h_delta if there is a reversal on the drying scanning curve
if scan_d[i,j] == True and wetting[i,j] == False and gw[i,j] > gw_prev[i,j]:
#Solve hdelta from wetting scanning curve for which moist(h) on wetting scanning curve equals moist(h) on drying scanning curve
#Simple iteration method
hdelta_it=0 #initialize hdelta
F_hdelta =1
while F_hdelta > 0.01:
hdelta_it = hdelta_it + 0.01
w_hdelta = (resw_moist + (satw_moist - resw_moist) / (1 + np.abs(alfaw_moist * hdelta_it) ** nw_moist) ** mw_moist)
d_hdelta = (resd_moist + (satd_moist - resd_moist) / (1 + np.abs(alfad_moist * hdelta_it) ** nd_moist) ** md_moist)
F_hdelta = w_h + (satw_moist - w_h) / np.maximum(satw_moist - w_hdelta,0.0001) * (d_hdelta - w_hdelta) - scan_d_moist
hdelta[i,j] = hdelta_it
return hdelta
@njit
def SWR_curve(wetting,gw,gw_prev,scan_w,moist_swr,w_h,scan_d,scan_w_moist,d_h,scan_d_moist):
for i in range(len(wetting[:,0])):
for j in range(len(wetting[0,:])):
#Wetting conditions main curve
if gw[i,j] >= gw_prev[i,j] and wetting[i,j] == True and scan_w[i,j] == False:
moist_swr[i,j]=w_h[i,j]
scan_w[i,j] = False
scan_d[i,j] = False
#wetting conditions, timestep of reversal - move onto wetting scanning curve
elif gw[i,j] >= gw_prev[i,j] and wetting[i,j] == False:
moist_swr[i,j] = scan_w_moist[i,j]
scan_w[i,j] = scan_w_moist[i,j] > w_h[i,j]
scan_d[i,j] = False
#wetting conditions - followed a wetting scanning curve in previous timestep - continue following scanning curve unless main curve is reached
elif gw[i,j] >= gw_prev[i,j] and wetting[i,j] == True and scan_w[i,j] == True:
moist_swr[i,j] = scan_w_moist[i,j]
scan_w[i,j] = scan_w_moist[i,j] > w_h[i,j]
scan_d[i,j] = False
#Drying conditions main curve
elif gw[i,j] < gw_prev[i,j] and wetting[i,j] == False and scan_d[i,j] == False:
moist_swr[i,j]=d_h[i,j]
scan_d[i,j] = False
scan_w[i,j] = False
#Drying conditions, timestep of reversal - move onto a drying scanning curve
elif gw[i,j] < gw_prev[i,j] and wetting[i,j] == True:
moist_swr[i,j] = scan_d_moist[i,j]
scan_d[i,j] = scan_d_moist[i,j] < d_h[i,j]
scan_w[i,j] = False
#Drying conditions - followed a drying scanning curve in previous timestep - continue following scanning curve unless main curve is reached
elif gw[i,j] < gw_prev[i,j] and wetting[i,j] == False and scan_d[i,j] == True:
moist_swr[i,j] = scan_d_moist[i,j]
scan_d[i,j] = scan_d_moist[i,j] < d_h[i,j]
scan_w[i,j] = False
return moist_swr, scan_d, scan_w
def evaporation(s,p,met):
'''Compute evaporation according to the Penman equation (Shuttleworth, 1993)
Parameters
----------
s : dict
Spatial grids
p : dict
Model configuration parameters
met : dict
meteorologial parameters
T: Temperature, degrees Celsius
Q : Global radiation, MJ/m2/d
P : Atmospheric pressure, kPa
U: Relative humidity, %
Returns
-------
float
Evaporation (mm/day)
'''
l = 2.26 #latent heat of vaporization of water (MJ/kg)
m = vaporation_pressure_slope(met['T']) # [kPa/K]
delta = saturation_pressure(met['T']) * (1. - met['U'] / 100) # vapor pressure deficit [kPa]
gamma = (p['cpair'] * met['P']) / (.622 * l) # [kPa/K]
u2 = .174 / np.log10(p['z'] / 2.) * s['uw'] # [m/s]
evo =(m * met['Q'] + 6.43 * gamma * delta * (1. + 0.86 * u2)) \
/ (l * (m + gamma))
return evo
def vaporation_pressure_slope(T):
'''Compute vaporation pressure slope based on air temperature
Parameters
----------
T : float
Air temperature in degrees Celcius
Returns
-------
float
Vaporation pressure slope
'''
# Tetens, 1930; Murray, 1967
s = 4098. * saturation_pressure(T) / (T + 237.3)**2 # [kPa/K]
return s
def saturation_pressure(T):
'''Compute saturation pressure based on air temperature, Tetens equation
Parameters
----------
T : float
Air temperature in degrees Celcius
Returns
-------
float
Saturation pressure
'''
vp = 0.6108 * np.exp(17.27 * T / (T + 237.3)) # [kPa]
return vp
def calc_runup_stockdon(Ho, Tp, beta):
"""
Calculate runup according to /Stockdon et al 2006.
"""
if hasattr(Ho, "__len__"):
R = np.zeros(np.shape(Ho))
sigma_s = np.zeros(np.shape(Ho))
eta = np.zeros(np.shape(Ho))
Lo = 9.81 * Tp * Tp / (2 * np.pi) #wavelength
iribarren = beta / (Ho / Lo) ** (0.5) #irribarren number
i_iri = (Ho > 0) * (iribarren < 0.3)
R[i_iri] = 0.043 * np.sqrt(Ho[i_iri] * Lo[i_iri]) #formula for dissipative conditions
sigma_s[i_iri] = 0.046 * np.sqrt(Ho[i_iri] * Lo[i_iri]) /2
eta[i_iri] = R[i_iri] - sigma_s[i_iri]
i_iri = (Ho > 0) * (iribarren > 0.3)
nsigma = 2 # nsigma=1 for R16% and nsigma=2 for R2%
eta[i_iri] = 0.35 * beta * np.sqrt(Ho[i_iri] * Lo[i_iri])
sigma_s[i_iri] = np.sqrt(Ho[i_iri] * Lo[i_iri] * (0.563 * (beta * beta) + 0.0004)) * nsigma / 2 / 2
R[i_iri] = 1.1 * (eta[i_iri] + sigma_s[i_iri]) #result for non-dissipative conditions
else:
if Ho > 0 and Tp > 0 and beta > 0:
Lo = 9.81 * Tp * Tp / (2 * np.pi) #wavelength
iribarren = beta / (Ho / Lo) ** (0.5) #irribarren number
if iribarren < 0.3:
R = 0.043 * np.sqrt(Ho * Lo) #formula for dissipative conditions
sigma_s = 0.046 * np.sqrt(Ho * Lo) /2
eta = R - sigma_s
else:
nsigma = 2 # nsigma=1 for R16% and nsigma=2 for R2%
Lo = 9.81 * Tp * Tp /(2 * np.pi)
eta = 0.35 * beta * np.sqrt(Ho * Lo)
sigma_s = np.sqrt(Ho * Lo * (0.563 * (beta * beta) + 0.0004)) * nsigma / 2 / 2
R = 1.1 * (eta + sigma_s) #result for non-dissipative conditions
else:
R = 0
sigma_s = 0
eta = 0
return eta, sigma_s, R | AeoLiS | /AeoLiS-2.1.1.tar.gz/AeoLiS-2.1.1/aeolis/hydro.py | hydro.py |
Aeon
====
Measures how often designated functions, methods, or pieces of code are
executed and what their runtime is. Optionally prints a nice report to
the screen, although the raw data is available for further processing as
well.
Outline
-------
1. Mark parts of the code that should be monitored with the provided
context manager or decorators.
2. Tell your program to output the report or provide you the data when
it's done.
3. Run your program.
4. \?\?\?\?\?
5. Profit.
Basic Usage
-----------
How to designate code that should be monitored.
A free-standing piece of code.
.. code:: python
from aeon import timer
with timer('my measurement'):
# do stuff here...
# to assign the measurement to a specific group
with timer('my measurement', 'general frobnication'):
# do stuff here
A function.
.. code:: python
from aeon import timer
@timer
def my_function():
pass
A method.
.. code:: python
from aeon import timer
class Foo(object):
@timer.method
def bar(self):
pass
How to see the report.
.. code:: python
from aeon import timer
print timer.report()
print timer # equivalent
Further features
----------------
You can instantiate your own timer if you want to, in case you want to
use several in parallel.
.. code:: python
from aeon import Timer
my_timer= Timer()
with my_timer('my_measurement'):
pass
# or
with my_timer('my_measurement', 'my_group'):
pass
@my_timer
def foo():
pass
class Foo(object):
@my_timer.method
def bar(self):
pass
The timer object can be queried for specific measurements or the data
with which it generates the report.
Also, nothing prevents you from using the Measurement class on its own:
.. code:: python
from aeon import Measurement
m = Measurement()
for i in xrange(100):
m.start()
# stuff happens here
m.stop()
assert m.calls == 100
print m.total_runtime, m.time_per_call
Installation
------------
Installation is easy as:
.. code:: bash
$ sudo pip install aeon
Rationale
---------
The code has originally been used in a computational physics project
where the typical runtime distribution is very dependent on the problem
at hand. It has proven itself useful for giving a feel for where time is
spent during computation and quickly showing when parts of code went on
a riot. In fact, in that project, it is enabled in production since the
overhead is low.
What sets it apart is the possibility to monitor only specific parts of
the code and optionally have these parts logically grouped (by default,
it will use the class or module names).
There are better alternatives for proper benchmarking, like cProfile.
| Aeon | /Aeon-2.0.2.tar.gz/Aeon-2.0.2/README.txt | README.txt |
import warnings
from collections import defaultdict
from functools import update_wrapper
from operator import attrgetter
from os import path
from sys import modules
from time import time
from .measurement_store import MeasurementStore
class Timer(object):
default_group = "default"
def __init__(self):
self.measurements = MeasurementStore()
self._context = []
def __call__(self, func_or_name, group=default_group):
if callable(func_or_name):
func = func_or_name
name = func.__name__
filename = modules[func.__module__].__file__
module = path.splitext(path.basename(filename))[0]
def decorated_func(*args, **kwargs):
with self(name, module):
ret = func(*args, **kwargs)
return ret
update_wrapper(decorated_func, func)
return decorated_func
else:
name = func_or_name
self._context.append((name, group))
if len(self._context) > 1:
warnings.warn("You are nesting measurements in {}::{}.".format(name, group))
return self
def __enter__(self):
if not self._context:
raise ArgumentError("Please use aeon's contextmanager with",
"the measurement name (and optionally group) as ",
"argument.")
self.measurements.start(*self._context[-1])
def __exit__(self, type, value, traceback):
self.measurements.stop(*self._context.pop())
def method(self, met):
"""
Decorator for methods that are to be included in the report.
Basic usage:
from time import sleep
from aeon import timer
class Foo(object):
@timer.method
def bar(self):
sleep(1)
print timer
"""
name = met.__name__
def decorated_method(theirself, *args, **kwargs):
group = theirself.__class__.__name__
with self(name, group):
ret = met(theirself, *args, **kwargs)
return ret
update_wrapper(decorated_method, met)
return decorated_method
def start(self, name, group=default_group):
"""
Start measurement with `name` and `group`.
Measurement is automatically created if it doesn't exist already.
"""
self.measurements.start(name, group)
def stop(self, name, group=default_group):
"""
Stop measurement with `name` and `group`.
"""
self.measurements.stop(name, group)
def stop_last(self):
"""
Stop the measurement that was started last.
Helps avoiding repetitive typing of `name` and `group` when dealing
with a sequence of measurements.
"""
self.measurements.stop_last()
def start_next(self, name, group=default_group):
"""
Stop the last measurement to start a new one with `name` and `group`.
Helps avoiding repetitive typing of `name` and `group` when dealing
with a sequence of measurements.
"""
self.measurements.start_next(name, group)
def total_runtime(self):
"""
Returns the sum of the runtime of all measurements.
"""
return sum([m.total_runtime for m in self.measurements.all()])
def total_walltime(self):
"""
Returns the time that has ellapsed since the timer was created in seconds.
"""
return time() - self.measurements.created
def calls(self, name, group=default_group):
"""
Returns the number of calls to the object of `group` with `name`.
"""
return self.measurements.get(name, group).calls
def time(self, name, group=default_group):
"""
Returns the total runtime of the measurement of `group` with `name`.
"""
return self.measurements.get(name, group).total_runtime
def time_per_call(self, name, group=default_group):
"""
Returns the average runtime for one execution of the measurement
of `group` with `name`.
"""
return self.measurements.get(name, group).time_per_call()
def report(self, max_items=10):
"""
Returns a report of the executed measurements.
This includes the timings by group, as well as the `max_items`
measurements with the most total runtime (by default 10).
"""
msg = "Timings: Showing the up to {} slowest items.\n\n".format(max_items)
separator = "+--------------------+------------------------------+--------+------------+--------------+\n"
msg += separator
msg += "| {:18} | {:28} | {:>6} | {:>10} | {:>12} |\n".format("class/module", "name", "calls", "total (s)", "per call (s)")
msg += separator
msg_row = "| {:18} | {:28} | {:>6} | {:>10.3g} | {:>12.3g} |\n"
shown = 0
for m in sorted(
self.measurements.all(),
key=attrgetter('total_runtime'),
reverse=True):
msg += msg_row.format(m.group, m.name, m.calls, m.total_runtime, m.time_per_call())
shown += 1
if shown >= max_items:
break
msg += separator + "\n"
msg += "Timings grouped by class or module.\n\n"
separator = "+--------------------+----------+------+\n"
msg += separator
msg += "| {:18} | {:>8} | {:>4} |\n".format('class/module', 'time (s)', '%')
msg += separator
for group, tot_t, share in self.grouped_measurements():
msg += "| {:18} | {:>8.3g} | {:>4.3g} |\n".format(group, tot_t, share)
msg += separator + "\n"
seconds = self.total_walltime()
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
msg += "Total wall time %d:%02d:%02d." % (h, m, s)
return msg
def grouped_measurements(self):
"""
Returns a list of tuples (group, runtime, share), sorted by decreasing runtime.
It also describes either how much time went unaccounted for, or if
there are multiple measurements at the same time at some point.
"""
grouped_timings = defaultdict(float)
for m in self.measurements.all():
grouped_timings[m.group] += m.total_runtime
recorded_time = self.total_runtime()
wall_time = self.total_walltime()
grouped_timings = [(group, tot_t, 100 * tot_t / wall_time) for group, tot_t in grouped_timings.iteritems()]
diff = abs(recorded_time - wall_time)
rel_diff = 100 * (1 - recorded_time / wall_time)
rel_diff_desc = "redundant" if recorded_time > wall_time else "untimed"
grouped_timings.append((rel_diff_desc, diff, rel_diff))
grouped_timings = sorted(
grouped_timings,
key=lambda gt: gt[1],
reverse=True)
return grouped_timings
def __str__(self):
return self.report() | Aeon | /Aeon-2.0.2.tar.gz/Aeon-2.0.2/aeon/timer.py | timer.py |
from time import time
from .errors import UnknownMeasurement, NoMeasurementRunning
from .measurement import Measurement
class MeasurementStore(object):
"""
Manages a series of measurements.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the internal data.
"""
self.running_measurement = None
self.store = {}
self.created = time()
def _key(self, name, group=""):
"""
Returns the key with which a measurement is stored in the measurements dict.
"""
return group + "::" + name
def _put(self, measurement):
"""
Store the `measurement` object in the measurements dict.
"""
key = self._key(measurement.name, measurement.group)
self.store[key] = measurement
def all(self):
"""
Return an iterator over all measurements.
"""
return self.store.itervalues()
def exists(self, name, group=""):
"""
Returns True if a measurement with `name` of `group` exists.
"""
key = self._key(name, group)
return key in self.store
def get(self, name, group=""):
"""
Returns the measurement with `name` in `group` or raise an exception.
"""
key = self._key(name, group)
try:
return self.store[key]
except KeyError:
print "Known measurements (in format group::name):\n\t{}.".format(
self.store.keys())
raise UnknownMeasurement("Can't find measurement '{}' of "
"group '{}'.".format(name, group))
def start(self, name, group=""):
"""
Start a measurement with `name` and `group`.
Will create a new measurement if it doesn't exist already.
"""
if self.exists(name, group):
measurement = self.get(name, group)
else:
measurement = Measurement(name, group)
self._put(measurement)
measurement.start()
self.running_measurement = measurement
def stop(self, name, group=""):
"""
Stop the measurement `name` of `group`.
"""
measurement = self.get(name, group)
measurement.stop()
self.running_measurement = None
def stop_last(self):
"""
Stop the last started measurement.
This method exists to avoid repeating the measurement name to
stop the measurement started last when starting measurements by
hand using `start`.
"""
if self.running_measurement is None:
raise NoMeasurementRunning("There is no measurement to stop.")
self.running_measurement.stop()
self.running_measurement = None
def start_next(self, name, group=""):
"""
Stops the last measurement to start a new one with `name` and `group`.
"""
if self.running_measurement is None:
raise NoMeasurementRunning("There is no measurement to stop.")
self.stop_last()
self.start(name, group) | Aeon | /Aeon-2.0.2.tar.gz/Aeon-2.0.2/aeon/measurement_store.py | measurement_store.py |
AeroEvap
========
|Documentation Status| |Downloads per month| |PyPI version|
Python version of aerodynamic mass-transfer approach for open-water evaporation
Documentation
-------------
`ReadTheDocs <https://aeroevap.readthedocs.io/>`__
Installation
------------
Dependencies are only Python 3, NumPy, and pandas.
You may install the dependencies using the provided conda virtual environment (recommended), the environment file can be downloaded `here <https://raw.githubusercontent.com/WSWUP/AeroEvap/master/environment.yml>`__ and installed and activated by
.. code-block:: bash
conda env create -f environment.yml
conda activate aeroevap
Once activated install with PIP:
.. code-block:: bash
pip install aeroevap
.. |Documentation Status| image:: https://readthedocs.org/projects/aeroevap/badge/?version=latest
:target: https://aeroevap.readthedocs.io/en/latest/?badge=latest
.. |Downloads per month| image:: https://img.shields.io/pypi/dm/aeroevap.svg
:target: https://pypi.python.org/pypi/AeroEvap/
.. |PyPI version| image:: https://img.shields.io/pypi/v/aeroevap.svg
:target: https://pypi.python.org/pypi/AeroEvap/
| AeroEvap | /AeroEvap-0.0.2.post2.tar.gz/AeroEvap-0.0.2.post2/README.rst | README.rst |
import cmath as cm
import numpy as np
import math as m
import pandas as pd
import multiprocessing as mp
class Aero(object):
"""
Manages meterological time series input/output for aerodynamic
mass-transfer evaporation calculation and contains methods for batch and
single calculations.
An :obj:`Aero` object allows the aerodynamic mass-transfer evaporation
estimation to be calculated from meterological data that is stored in a
:obj:`pandas.DataFrame` with a date or datetime-like index. The
:attr:`Aero.df` can be assigned on initialization or later, it can also be
reassigned at anytime.
The :meth:`Aero.single_calc` static method calculates evaporation for a
single measurement set and can be used without creating an :obj:`Aero`
object, e.g. in another module. For calculating evaporation for a time
series of input meterological data use the :meth:`Aero.run` method which
uses multiple processors (if they are available).
"""
def __init__(self, df=None):
if df is not None and not isinstance(df, pd.DataFrame):
raise TypeError("Must assign a pandas.DataFrame object")
self._df = df
def run(self, sensor_height, timestep, variable_names=None, nproc=None):
"""
Run aerodynamic mass-transfer evaporation routine on time series data
that contains necessary input in-place and in parallel.
Arguments:
sensor_height (float): height of sensor in meters.
timestep (float or int): sensor sampling frequency in seconds.
Keyword Arguments:
variable_names (None or dict): default None. Dictionary with user
variable names as keys and variable names needed for
:mod:`aeroevap` as values. If None, the needed input variables
must be named correctly in the :attr:`Aero.df` dataframe: 'WS',
'P', 'T_air', 'T_skin', and 'RH' for windspeed, air pressure,
air temperature, skin temperature, and relative humidity
resepctively.
nproc (None or int): default None. If none use half of the available
cores for parallel calculations.
Returns:
None
Hint:
A :obj:`pandas.DataFrame` must be assigned to the :attr:`Aero.df`
instance property before calling :meth:`Aero.run`. If the names of
the required meterological variables in the dataframe are not
named correctly you may pass a dictionary to the ``variable_names``
argument which maps your names to those used by ``AeroEvap``. For
example if your surface temperature column is named 'surface_temp'
then
>>> variable_names = {'surface_temp' : 'T_skin'}
"""
if not isinstance(self._df, pd.DataFrame):
print(
'ERROR: no pandas.DataFrame assigned to Aero.df, please '
'assign first.'
)
return
if variable_names is not None:
df = self._df.rename(columns=variable_names)
else:
df = self._df
df['date'] = df.index
df['SH'] = sensor_height
df['dt'] = timestep
input_vars = ['date', 'WS', 'P', 'T_air', 'T_skin', 'RH', 'SH', 'dt']
if not set(input_vars).issubset(df.columns):
print(
'ERROR: missing on or more needed columns for calculation:\n'
'{}'.format(', '.join(input_vars))
)
return
# run each input using n processors
inputs = df[input_vars].values.tolist()
if not nproc:
nproc = mp.cpu_count() // 2 # use half cores
pool = mp.Pool(processes=nproc)
results = pool.map(_calc,inputs)
pool.close()
pool.join()
results_df = pd.concat(results)
output_vars = ['E', 'Ce', 'VPD', 'stability']
self._df[output_vars] = results_df[output_vars]
for el in ['date', 'SH', 'dt']:
if el in self._df:
self._df.drop(el, axis=1, inplace=True)
@property
def df(self):
"""
:obj:`pandas.DataFrame` containing input time series meterological data
and calculated variables after running :meth:`Aero.run`.
"""
if isinstance(self._df, pd.DataFrame):
return self._df
@df.setter
def df(self, df):
if not isinstance(df, pd.DataFrame):
raise TypeError("Must assign a pandas.DataFrame object")
self._df = df
@staticmethod
def single_calc(datetime, wind, pressure, T_air, T_skin, RH,
sensor_height, timestep):
"""
Estimates open water evaporation using the aerodynamic mass transfer
approach for a single time period.
Arguments:
datetime (datetime.datetime or str): date-time of measurements for
error logging.
wind (float): windspeed in m/s
pressure (float): air pressure in mbar
T_air (float): air temperature in C
T_skin (float): skin temperature (water surface) in C
RH (float): relative humidity (0-100)
sensor_height (float): sensor height in m
timestep (int or float): measurement frequency in seconds
Returns (tuple):
evaporation (mm/timestep), bulk transfer coefficient (Ce), vapor pressure deficit (kPa), and MOST stability (z/L)
"""
check=np.array(
[wind, pressure, T_air, T_skin, RH, sensor_height, timestep]
)
if np.isnan(check).any():
#print('One or more variables missing on {}'.format(datetime))
return np.nan, np.nan, np.nan, np.nan
###########################################################
#Constants
K=0.41 #von Karman constant
g=9.81 #gravity (m/s^2)
a=0.0123 #Charnock constant
############################################################
#Calculate meterological variables
#Sensort height (m)
z=sensor_height
#Convert from Celcius to Kelvin
T_air=T_air+273.15
T_skin=T_skin+273.15
#Potential temperatures (air and skin) Kelvin
T_air_pot=(T_air)*(1000./pressure)**(0.286)
T_skin_pot=(T_skin)*(1000./pressure)**(0.286)
#Atmospheric vapor pressure (kPa) (2m)
e_air=(RH/100)*(0.6108*m.exp(((17.27*(T_air-273.15))/((T_air-273.15)+237.3))))
#Atmospheric specific humidity (kg/kg) (2m)
q_air=0.62*e_air/(pressure/10-0.38*e_air)
#Saturated Water-Surface vapor pressure (kPa) (0m)
e_sat=0.6108*m.exp(((17.27*(T_skin-273.15))/((T_skin-273.15)+237.3)))
#Saturated specific humidity at water surface (kg/kg) (0m)
q_sat=0.62*e_sat/(pressure/10-0.38*e_sat)
#Vapor Pressure Deficit (kPa)
VPD=e_sat-e_air
#Density of air (kg/m^3)
density_air=(pressure/10*1000)/((T_air)*286.9*(1+0.61*q_air))
#Kinematic viscocity
#Estimated using data from Montgomery, 1947 in Verburg, 2010
v=(4.94*10**-8*(T_air-273.15)+1.7185*10**-5)/density_air
#Virtual Temperature
Tv=T_air*(1+q_air*0.61)
###############################################################
# Bulk Transfer Coefficient Iteration, Ce
#Stable Condition (z/L > 0)
#Initial Values for Iteration
#Stability Function (Momentum)
Sm=0
#Stability Function (Temperature)
St=0
#Stability Function (Vapor)
Sq=0
#Friction velocity
Us=0
#Roughness Length of Momentem
zo=.00010
#Friction Velocity of Momentum
u_f=(K*(wind-Us))/(cm.log(z/zo)-Sm)
#Roughness Length of Vapor
zoq=7.4*zo*cm.exp(-2.25*(zo*u_f/v)**.25)
#Roughness Legnth of Temperature
zot=7.4*zo*cm.exp(-2.25*(zo*u_f/v)**.25)
#Scaling Potential Temperature
t_fv=(K*(T_air_pot-T_skin_pot))/(cm.log(z/zot)-St)
#Scaling Humidity
q_f=np.divide(K*(q_air-q_sat), cm.log(z/zoq)-Sq)
#Monin-Obhukov Length
L=np.divide(Tv*u_f**2, K*g*t_fv)
try:
# avoid crash with bad values causing log of 0 or neg values
for x in range(0, 199):
#Friction Velocity of Momentum
u_f=np.divide((K*(wind-u_f)),(cm.log(z/zo)-Sm))
#Scaling Potential Temperature
t_fv=np.divide((K*(T_air_pot-T_skin_pot)),(cm.log(z/zot)-St))
#Scaling Humidity
q_f=np.divide((K*(q_air-q_sat)),(cm.log(z/zoq)-Sq))
#Stability Function of Momentum
Sm=np.float64(-5.2*(z))/L
#Stability Function of Vapor
Sq=np.divide(np.float64(-5.2*(z)),L)
#Roughness Length of Momemtum
zc=np.divide(a*u_f**2,g)
zs=np.divide(0.11*v,u_f)
zo=zc+zs;
#Roughness Length of Vapor
zoq=7.4*zo*cm.exp(-2.25*(zo*u_f/v)**.25)
#Monin-Obhukov Length
L=np.divide((Tv*u_f**2),(K*g*t_fv))
except:
print('Could not converge on {} for stable conditions'.format(
datetime
)
)
return np.nan, np.nan, np.nan
stability_s = z/L
if ~np.isreal(L):
Ce_s=np.nan
else:
if np.real(stability_s) > 0:
Ce_s=np.real(K**2/((cm.log(z/zo)-Sm)*(cm.log(z/zoq)-Sq)))
else:
Ce_s=np.nan
###############################################################
#Unstable Conditions (z/L< 0)
#Initial Values for Iteration
#Stability Function (Momentum)
Sm=0
#Stability Function (Temperature)
St=0
#Stability Function (Vapor)
Sq=0
#Friction velocity
Us=0
#Roughness Length of Momentem
zo=.00010
#Friction Velocity of Momentum
u_f=(K*(wind-Us))/(m.log(z/zo)-Sm);
#Roughness Length of Vapor
zoq=7.4*zo*m.exp(-2.25*(zo*u_f/v)**.25);
#Roughness Legnth of Temperature
zot=7.4*zo*m.exp(-2.25*(zo*u_f/v)**.25);
#Scaling Potential Temperature
t_fv=(K*(T_air_pot-T_skin_pot))/(m.log(z/zot)-St);
#Scaling Humidity
q_f=(K*(q_air-q_sat))/(m.log(z/zoq)-Sq);
#Monin-Obhukov Length
L=np.divide((Tv*u_f**2),(K*g*t_fv))
try:
for x in range(0, 199):
#Friction Velocity of Momentum
u_f=np.divide((K*(wind-u_f)),(cm.log(z/zo)-Sm))
#Scaling Temperature
t_fv=np.divide((K*(T_air_pot-T_skin_pot)),(cm.log(z/zot)-St))
#Scaling Humidity
q_f=np.divide((K*(q_air-q_sat)),(cm.log(z/zoq)-Sq))
#Input for Stability function calculations
x=(1-16*(z/L))**.25
#Stability Function of Momentum
Sm=2*cm.log((1+x)/2)+cm.log((1+x**2)/2)-2*cm.atan(x)+(m.pi/2)
#Stability Function of Vapor
Sq=2*cm.log((1+x**2)/2)
#Roughness Length of Momemtum
zc=np.divide(a*u_f**2,g)
zs=np.divide(0.11*v,u_f)
zo=zc+zs
#Roughness Length of Vapor
zoq=7.4*zo*cm.exp(-2.25*(zo*u_f/v)**.25)
#Monin-Obhukov Length
L=np.divide((Tv*u_f**2),(K*g*t_fv))
except:
print('Could not converge on {} for unstable conditions'.format(
datetime
)
)
stability_u = z/L
if ~np.isreal(L):
Ce_u=np.nan
else:
if np.real(stability_u) < 0:
Ce_u=np.real(K**2/((cm.log(z/zo)-Sm)*(cm.log(z/zoq)-Sq)))
else:
Ce_u=np.nan
#################################################################
#Neutral Conditions, z/L=0
#Initial Conditions
zo=.00010
try:
# avoid crash with bad values causing log of 0 or neg values
for x in range(0,199):
#Friction Velocity of Momentum
u_f=np.divide((K*wind),(np.emath.log(z/zo)))
#Roughness Length of Momemtum
zc=np.divide((a*u_f**2),g)
zs=np.divide(0.11*v,u_f)
zo=zc+zs
#Roughness Length of Vapor
zoq=7.4*zo*m.exp(-2.25*(zo*u_f/v)**.25)
except:
print('Could not converge on {} for neutral conditions'.format(
datetime
)
)
Ce_n=np.divide(
(K**2),
((np.emath.log(np.divide(z,zo)))*(np.emath.log(np.divide(z,zoq))))
)
################################################################
#Assign correct Ce value (stable, unstable, or neutral)
if cm.isfinite(Ce_s):
Ce=Ce_s
stability = stability_s
else:
if cm.isfinite(Ce_u):
Ce=Ce_u
stability = stability_u
else:
Ce=Ce_n
stability = 0
################################################################
#Calculated evaporation in mm/timestep
E=density_air*Ce*(q_sat-q_air)*wind*timestep
return E, Ce, VPD, np.real(stability)
def _calc(input_list):
"""
Helper function for parrallel calculations, needs to be at top-level for
pickling.
date = input_list[0]
WS = input_list[1]
P = input_list[2]
T_air = input_list[3]
T_skin = input_list[4]
RH = input_list[5]
SH = input_list[6]
dt = input_list[7]
"""
date = input_list[0]
return pd.DataFrame(
index=[date], columns=['E', 'Ce', 'VPD', 'stability'], data=[
Aero.single_calc(
date, input_list[1], input_list[2], input_list[3],
input_list[4], input_list[5],input_list[6],input_list[7]
)
]
) | AeroEvap | /AeroEvap-0.0.2.post2.tar.gz/AeroEvap-0.0.2.post2/aeroevap/aero.py | aero.py |
# Tutorial
Basic usage with an example dataset. Note, this tutorial uses the matplotlib graphing module which is *not* a dependency of ``AeroEvap``, be sure to install it to your environment before running this tutorial if you want the plots to display correctly.
```
import pandas as pd
import numpy as np
from aeroevap import Aero
from IPython.display import IFrame
import matplotlib.pyplot as plt
%matplotlib inline
```
## Example data
This example uses buoy data from a location near Savannah, GA (NOAA station ID is 41008). The buoy is maintained by the National Data Buoy Center (NDBC), more buoy information is shown in the embededd page below. The meterologicla data used in this example is hosted by NOAA and downloaded directly and formatted for a month of data.
```
IFrame(src='https://www.ndbc.noaa.gov/station_page.php?station=41008', width=700, height=500)
```
The line below downloads the time series of current year buoy standard meterological data directly from the NDBC.
Input units:
| WDIR | WSPD | GST | WVHT | DPD | APD | MWD | PRES | ATMP | WTMP | DEWP | VIS | TIDE |
|:-------|:-------|:------|:-------|:------|:------|:------|:-------|:-------|:-------|:-------|:------|:-------|
| degT | m/s | m/s | m | sec | sec | deg | hPa | degC | degC | degC | nmi | ft |
```
# get standard meterological data from National Data Buoy Center
met_df = pd.read_csv(
'https://www.ndbc.noaa.gov/data/l_stdmet/41008.txt',
delim_whitespace=True, skiprows=[1], na_values=[999.0]
)
```
Make a datetime index and clean up the dataframe.
```
met_df.index = pd.to_datetime(
dict(
year=met_df['#YY'],
month=met_df.MM,
day=met_df.DD,
hour=met_df.hh,
minute=met_df.mm
)
)
met_df.index.name = 'date'
met_df.drop(['#YY','MM','DD','hh','mm'], axis=1, inplace=True)
met_df.head()
```
Because the input dataset does not include relative humitidy we can estimate it using an approximation to the Clausius–Clapeyron relation using air and dewpoint temperatures. Relative humitidy is needed in the aerodynamic mass-transfer evaporation calculations.
```
# vapor pressure and saturation vapor pressure using Clausius–Clapeyron relation
met_df['e'] = 0.611 * np.exp( 5423 * ((1/273) - (1/(met_df.DEWP+273.15))) )
met_df['es'] = 0.611 * np.exp( 5423 * ((1/273) - (1/(met_df.ATMP+273.15))) )
# calculate relative humitidy
met_df['RH'] = 100 * (met_df.e/met_df.es)
plt.figure(figsize=(8,4))
met_df.RH.plot()
plt.ylabel('estimated relative humitidy')
```
In this case we do *not* need to convert air pressure to millibars because 1 hPa = 1 mbar.
# Create an ``Aero`` object
The ``Aero`` object allows for loading a ``pandas.DataFrame`` containing meterological data required for calculating aerodynamic mass-transfer open water evaporation in parrallel. The object can be initialized from a ``pandas.DataFrame`` or the ``pandas.DataFrame`` can be assigned later, e.g.
```
Aero_empty = Aero()
Aero_with_df = Aero(met_df)
Aero_empty.df is None
# the df property can be assigned after initialization:
Aero_empty.df = met_df
# the data has been added
Aero_empty.df.head()
# this will not work, df needs to be a dataframe
Aero_empty.df = 'high five'
```
**Tip:** the ``df`` is a property of the ``Aero`` class which means it can be assigned or reassigned if, for example, you wanted to run the evaporation calculations on a modified version of input meterological time series without creating a new ``Aero`` instance.
## Input variables and units
The meterological variables needed for running the aerodynamic mass-transfer estimation of evaporation are the following:
| variable | units | naming |
|-------------------|-------|--------|
| wind speed | m/s | WS |
| air pressure | mbar | P |
| air temperature | C | T_air |
| skin temperature | C | T_skin |
| relative humidity | 0-100 | RH |
where the "naming" column refers to the internal names expected by the ``Aero.run`` method, i.e. the column headers in the dataframe should either be named accordingly or a dictionary that maps your column names to those internal names can be passed (see examples below).
To run the evaporation calculation you will also need the anemometer height in meters and the temporal sampling frequency of the data in seconds.
## Run calculation on time series
```
# make a naming dict to match up columns with Aero variable names
names = {
'WSPD' : 'WS',
'ATMP' : 'T_air',
'WTMP' : 'T_skin',
'PRES' : 'P'
}
```
Now we are ready to run the aerodynamic mass-transer evaporation on all the time series in our dataframe. Lastly, the sensor height of the anemometer and temporal sampling frequency of the data needs to be supplied.
This example assumes there are 8 physical and logical processors available for parallelization, if not specified the ``Aero.run`` routine wil try to use half of the avialble processors.
```
np.seterr('ignore')
# create a new Aero object and calculate evaporation on all rows
A = Aero(met_df)
A.run(sensor_height=4, timestep=600, variable_names=names)
```
After the calculations are complete three variables will be added to the ``Aero.df`` dataframe: 'E', 'Ce', 'VPD', and 'stability' which are evaporation in mm/timestep, bulk transfer coefficient, vapor pressure deficit (kPa), and the Monin-Obhukov Similarity Theory stability parameter (z/L).
```
A.df[['E', 'Ce', 'VPD', 'stability']].head()
```
View the calculated evaporation,
```
plt.figure(figsize=(8,4))
A.df.E.plot()
plt.ylabel('evaporation mm/10 min')
```
The calculated open-water evaporation is shown below after creating a daily sum.
```
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
A.df.E.resample('D').sum().plot()
plt.ylabel('evaporation mm/day')
```
And the wind speed relation versus the calculated evaporation.
```
plt.figure(figsize=(8,4))
plt.scatter(A.df.WSPD.resample('D').mean(), A.df.E.resample('D').sum())
plt.ylabel('evaporation mm/day')
plt.xlabel('mean daily wind speed m/s')
```
We can use the Monin-Obhukov Similarity Theory stability parameter (z/L) for relating the wind speed to the bulk transfer coefficient as well by classifying them by unstable, stable, and neutral conditions.
```
stable = np.real(A.df.stability) > 0
unstable = np.real(A.df.stability) < 0
neutral = np.real(A.df.stability) == 0
plt.figure(figsize=(8,6))
plt.scatter(A.df.WSPD[stable], A.df.Ce[stable], marker='x', color='blue', label='stable')
plt.scatter(A.df.WSPD[unstable], A.df.Ce[unstable], marker='x', color='red', label='unstable')
plt.scatter(A.df.WSPD[neutral], A.df.Ce[neutral], marker='o', color='black', label='neutral')
plt.ylim(0,0.006)
plt.ylabel(r'$C_e$', fontsize=12)
plt.xlabel('Wind speed m/s')
plt.legend()
```
## Single calculation
The ``Aero`` class also provides a method ``Aero.single_calc`` that can be used on a single set of meterological data to calculate the instantaneous open-water evaporation. It requires the same inputs as ``Aero.run`` however the inputs are scalars as opposed to time series. For example using the first timestamp of our example buoy data we can calculate E, Ce, VPD, and stability:
```
datetime = '2019-08-01 00:00:00'
wind = 3.3
pressure = 1021.2
T_air = 18.1
T_skin = 18.4
RH = 80.26
sensor_height = 4
timestep = 600
E, Ce, VPD, stability = Aero.single_calc(
datetime,
wind,
pressure,
T_air,
T_skin,
RH,
sensor_height,
timestep
)
E, Ce, VPD, stability
```
| AeroEvap | /AeroEvap-0.0.2.post2.tar.gz/AeroEvap-0.0.2.post2/notebooks/tutorial.ipynb | tutorial.ipynb |
# Tutorial
Basic usage with an example dataset. Note, this tutorial uses the matplotlib graphing module which is *not* a dependency of ``AeroEvap``, be sure to install it to your environment before running this tutorial if you want the plots to display correctly.
```
import pandas as pd
import numpy as np
from aeroevap import Aero
from IPython.display import IFrame
import matplotlib.pyplot as plt
%matplotlib inline
```
## Example data
This example uses buoy data from a location near Savannah, GA (NOAA station ID is 41008). The buoy is maintained by the National Data Buoy Center (NDBC), more buoy information is shown in the embededd page below. The meterologicla data used in this example is hosted by NOAA and downloaded directly and formatted for a month of data.
```
IFrame(src='https://www.ndbc.noaa.gov/station_page.php?station=41008', width=700, height=500)
```
The line below downloads the time series of current year buoy standard meterological data directly from the NDBC.
Input units:
| WDIR | WSPD | GST | WVHT | DPD | APD | MWD | PRES | ATMP | WTMP | DEWP | VIS | TIDE |
|:-------|:-------|:------|:-------|:------|:------|:------|:-------|:-------|:-------|:-------|:------|:-------|
| degT | m/s | m/s | m | sec | sec | deg | hPa | degC | degC | degC | nmi | ft |
```
# get standard meterological data from National Data Buoy Center
met_df = pd.read_csv(
'https://www.ndbc.noaa.gov/data/l_stdmet/41008.txt',
delim_whitespace=True, skiprows=[1], na_values=[999.0]
)
```
Make a datetime index and clean up the dataframe.
```
met_df.index = pd.to_datetime(
dict(
year=met_df['#YY'],
month=met_df.MM,
day=met_df.DD,
hour=met_df.hh,
minute=met_df.mm
)
)
met_df.index.name = 'date'
met_df.drop(['#YY','MM','DD','hh','mm'], axis=1, inplace=True)
met_df.head()
```
Because the input dataset does not include relative humitidy we can estimate it using an approximation to the Clausius–Clapeyron relation using air and dewpoint temperatures. Relative humitidy is needed in the aerodynamic mass-transfer evaporation calculations.
```
# vapor pressure and saturation vapor pressure using Clausius–Clapeyron relation
met_df['e'] = 0.611 * np.exp( 5423 * ((1/273) - (1/(met_df.DEWP+273.15))) )
met_df['es'] = 0.611 * np.exp( 5423 * ((1/273) - (1/(met_df.ATMP+273.15))) )
# calculate relative humitidy
met_df['RH'] = 100 * (met_df.e/met_df.es)
plt.figure(figsize=(8,4))
met_df.RH.plot()
plt.ylabel('estimated relative humitidy')
```
In this case we do *not* need to convert air pressure to millibars because 1 hPa = 1 mbar.
# Create an ``Aero`` object
The ``Aero`` object allows for loading a ``pandas.DataFrame`` containing meterological data required for calculating aerodynamic mass-transfer open water evaporation in parrallel. The object can be initialized from a ``pandas.DataFrame`` or the ``pandas.DataFrame`` can be assigned later, e.g.
```
Aero_empty = Aero()
Aero_with_df = Aero(met_df)
Aero_empty.df is None
# the df property can be assigned after initialization:
Aero_empty.df = met_df
# the data has been added
Aero_empty.df.head()
# this will not work, df needs to be a dataframe
Aero_empty.df = 'high five'
```
**Tip:** the ``df`` is a property of the ``Aero`` class which means it can be assigned or reassigned if, for example, you wanted to run the evaporation calculations on a modified version of input meterological time series without creating a new ``Aero`` instance.
## Input variables and units
The meterological variables needed for running the aerodynamic mass-transfer estimation of evaporation are the following:
| variable | units | naming |
|-------------------|-------|--------|
| wind speed | m/s | WS |
| air pressure | mbar | P |
| air temperature | C | T_air |
| skin temperature | C | T_skin |
| relative humidity | 0-100 | RH |
where the "naming" column refers to the internal names expected by the ``Aero.run`` method, i.e. the column headers in the dataframe should either be named accordingly or a dictionary that maps your column names to those internal names can be passed (see examples below).
To run the evaporation calculation you will also need the anemometer height in meters and the temporal sampling frequency of the data in seconds.
## Run calculation on time series
```
# make a naming dict to match up columns with Aero variable names
names = {
'WSPD' : 'WS',
'ATMP' : 'T_air',
'WTMP' : 'T_skin',
'PRES' : 'P'
}
```
Now we are ready to run the aerodynamic mass-transer evaporation on all the time series in our dataframe. Lastly, the sensor height of the anemometer and temporal sampling frequency of the data needs to be supplied.
This example assumes there are 8 physical and logical processors available for parallelization, if not specified the ``Aero.run`` routine wil try to use half of the avialble processors.
```
np.seterr('ignore')
# create a new Aero object and calculate evaporation on all rows
A = Aero(met_df)
A.run(sensor_height=4, timestep=600, variable_names=names)
```
After the calculations are complete three variables will be added to the ``Aero.df`` dataframe: 'E', 'Ce', 'VPD', and 'stability' which are evaporation in mm/timestep, bulk transfer coefficient, vapor pressure deficit (kPa), and the Monin-Obhukov Similarity Theory stability parameter (z/L).
```
A.df[['E', 'Ce', 'VPD', 'stability']].head()
```
View the calculated evaporation,
```
plt.figure(figsize=(8,4))
A.df.E.plot()
plt.ylabel('evaporation mm/10 min')
```
The calculated open-water evaporation is shown below after creating a daily sum.
```
import matplotlib.pyplot as plt
plt.figure(figsize=(8,4))
A.df.E.resample('D').sum().plot()
plt.ylabel('evaporation mm/day')
```
And the wind speed relation versus the calculated evaporation.
```
plt.figure(figsize=(8,4))
plt.scatter(A.df.WSPD.resample('D').mean(), A.df.E.resample('D').sum())
plt.ylabel('evaporation mm/day')
plt.xlabel('mean daily wind speed m/s')
```
We can use the Monin-Obhukov Similarity Theory stability parameter (z/L) for relating the wind speed to the bulk transfer coefficient as well by classifying them by unstable, stable, and neutral conditions.
```
stable = np.real(A.df.stability) > 0
unstable = np.real(A.df.stability) < 0
neutral = np.real(A.df.stability) == 0
plt.figure(figsize=(8,6))
plt.scatter(A.df.WSPD[stable], A.df.Ce[stable], marker='x', color='blue', label='stable')
plt.scatter(A.df.WSPD[unstable], A.df.Ce[unstable], marker='x', color='red', label='unstable')
plt.scatter(A.df.WSPD[neutral], A.df.Ce[neutral], marker='o', color='black', label='neutral')
plt.ylim(0,0.006)
plt.ylabel(r'$C_e$', fontsize=12)
plt.xlabel('Wind speed m/s')
plt.legend()
```
## Single calculation
The ``Aero`` class also provides a method ``Aero.single_calc`` that can be used on a single set of meterological data to calculate the instantaneous open-water evaporation. It requires the same inputs as ``Aero.run`` however the inputs are scalars as opposed to time series. For example using the first timestamp of our example buoy data we can calculate E, Ce, VPD, and stability:
```
datetime = '2019-08-01 00:00:00'
wind = 3.3
pressure = 1021.2
T_air = 18.1
T_skin = 18.4
RH = 80.26
sensor_height = 4
timestep = 600
E, Ce, VPD, stability = Aero.single_calc(
datetime,
wind,
pressure,
T_air,
T_skin,
RH,
sensor_height,
timestep
)
E, Ce, VPD, stability
```
| AeroEvap | /AeroEvap-0.0.2.post2.tar.gz/AeroEvap-0.0.2.post2/notebooks/.ipynb_checkpoints/tutorial-checkpoint.ipynb | tutorial-checkpoint.ipynb |
import os
class LogFile(object):
"""
An object representing a log file to store data
:param base_name: Basic file name to be created i.e. tuna_01, tuna_02, etc.
:param path: Path to file, recommended in /home/user/... or /media/usb_stick/...
"""
def __init__(self, base_name, path):
self.base_name = str(base_name)
self.name = self.make_file(path)
def make_file(self, path):
name = self.base_name
name += "_00.csv"
path_name = path
for i in range(100):
path_name = path
name_l = list(name)
name_l[-1-5] = str(int(i / 10))
name_l[-1-4] = str(int(i % 10))
name = "".join(name_l)
path_name += '/'
path_name += name
if os.path.exists(path_name) is False:
break
return path_name
def write_data_log(self, time, pres, lat, lon, alt, vz, temp, hum, counts,
tof, period, c_sum, glitch, l_tof, rej_rat):
data_array = [time, pres, lat, lon, alt, vz, temp, hum, counts, tof, period, c_sum, glitch, l_tof, rej_rat]
log = open(self.name, "a+")
data_array = ",".join(str(i) for i in data_array)
data_array = data_array.replace("]", "").replace("[", "")
log.write(data_array)
log.write('\n')
log.flush()
log.close()
def make_headers(self, date, time, epoch, info_str, bbs, gsc, ucass_id):
log = open(self.name, "a+")
log.write(date)
log.write(',')
log.write(time)
log.write(',')
log.write(str(epoch))
log.write('\n')
log.write(info_str)
log.write('\n')
log.write("ub0,ub1,ub2,ub3,ub4,ub5,ub6,ub7,ub8,ub9,ub10,ub11,ub12,ub13,ub14,ub15,ub16,GSC,ID\n")
bb_str = ",".join(str(i) for i in bbs)
bb_str = bb_str.replace("]", "").replace("[", "")
log.write(bb_str)
log.write(',')
log.write(str(gsc))
log.write(',')
log.write(str(ucass_id))
log.write('\n')
log.write("time,pres,lat,lon,alt,vz,temp,hum,b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15,b16,b1ToF,b3ToF"
",b5ToF,b7ToF,period,CSum,glitch,longToF,RejRat\n")
log.flush()
log.close() | AeroSAM-logger | /AeroSAM_logger-0.0.24.tar.gz/AeroSAM_logger-0.0.24/log/__init__.py | __init__.py |
from pymavlink import mavutil
import time
class MavlinkConnection(object):
"""
An object to represent a connection with an FC via the MAVLINK protocol
"""
def __init__(self, port, baudrate):
self.start_date = []
self.start_time = []
self.all_data_received = 0
self.got_system_time = 0
self.got_global_position_int = 0
self.got_scaled_pressure = 0
self.master = mavutil.mavlink_connection(port, baud=baudrate)
self.wait_for_connection()
self.master.wait_heartbeat()
self.lat = 0
self.lon = 0
self.alt_m = 0
self.vz_ms = 0
self.press_hPa = 0
self.epoch_time = 0
self.boot_time = 0
self.master.mav.request_data_stream_send(self.master.target_system, self.master.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL, 1, 1)
def wait_for_connection(self):
msg = None
while not msg:
self.master.mav.ping_send(time.time(), 0, 0, 0)
msg = self.master.recv_match()
time.sleep(0.5)
def data_packet_handler(self):
wait = True
msg = []
while wait:
msg = self.master.recv_match(blocking=False)
if msg:
break
msg_type = msg.get_type()
if msg_type == "GLOBAL_POSITION_INT":
self.got_global_position_int = 1
self.lat = msg.lat
self.lon = msg.lon
self.alt_m = msg.alt
self.vz_ms = msg.vz
if msg_type == "SCALED_PRESSURE":
self.got_scaled_pressure = 1
self.press_hPa = msg.press_abs
if msg_type == "SYSTEM_TIME":
self.got_system_time = 1
self.boot_time = msg.time_boot_ms
self.epoch_time = msg.time_unix_usec
def fill_info_buffer(self):
timeout = 0
while True:
timeout = timeout+1
self.data_packet_handler()
check = self.got_system_time * self.got_scaled_pressure * self.got_global_position_int
if check == 1:
self.all_data_received = 1
self.got_global_position_int = 0
self.got_scaled_pressure = 0
self.got_system_time = 0
break
elif timeout == 60:
self.all_data_received = 0
break
else:
time.sleep(0.01)
def get_date_time(self):
while True:
self.fill_info_buffer()
if self.epoch_time != 0:
break
epoch_sec = self.epoch_time/1000000
date_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(epoch_sec)))
date_time = date_time.split()
self.start_date = date_time[0]
self.start_time = date_time[1] | AeroSAM-logger | /AeroSAM_logger-0.0.24.tar.gz/AeroSAM_logger-0.0.24/pix/__init__.py | __init__.py |
from time import sleep
from gpiozero import DigitalOutputDevice
import spidev
import struct
class UCASS(object):
"""
A UCASS Object to be used as an SPI slave, uses SPI 0 by default
:param cs_gpio: Which GPIO pin is the chip select (slave select) for this UCASS unit
"""
def __init__(self, cs_gpio):
self.spi = spidev.SpiDev()
self.spi.open(0, 0)
self.spi.mode = 1
self.spi.max_speed_hz = 500000
self.cs = DigitalOutputDevice(cs_gpio, initial_value=True)
self.cs.on()
self.info_string = ""
self.bbs = []
self.hist = []
self.mtof = []
self.period = 0
self.gsc = 0
self.id = 0
self.checksum = 0
self.reject_glitch = 0
self.reject_ratio = 0
self.reject_ltof = 0
def command_byte(self, command):
self.cs.off()
a = self.spi.xfer(command)
sleep(0.01)
return a
def read_info_string(self):
self.info_string = ""
a = self.command_byte([0x3F])
for i in range(60):
sleep(0.00001)
buf = self.spi.xfer([0x06])[0]
self.info_string += chr(buf)
self.cs.on()
def read_config_vars(self):
self.command_byte([0x3C])
self.bbs = []
raw = []
for i in range(38):
sleep(0.00001)
buf = self.spi.xfer([0x06])[0]
raw.append(buf)
self.cs.on()
for i in range(16):
self.bbs.append(byte_to_int16(raw[i*2], raw[i*2+1]))
self.gsc = byte_to_float(raw[32], raw[33], raw[34], raw[35])
self.id = raw[37]
def read_histogram_data(self):
self.command_byte([0x30])
self.hist = []
self.mtof = []
raw = []
index = 0
for i in range(43):
sleep(0.00001)
buf = self.spi.xfer([0x06])[0]
raw.append(buf)
for i in range(16):
self.hist.append(byte_to_int16(raw[i*2], raw[i*2+1]))
index = index+2
for i in range(4):
self.mtof.append(raw[index])
index = index+1
self.period = byte_to_int16(raw[36], raw[37])
self.checksum = byte_to_int16(raw[38], raw[39])
self.reject_glitch = raw[40]
self.reject_ltof = raw[41]
self.reject_ratio = raw[42]
self.cs.on()
def byte_to_int16(lsb, msb):
return (msb << 8) | lsb
def byte_to_float(b1, b2, b3, b4):
arr = bytearray([b1, b2, b3, b4])
return struct.unpack('<f', arr) | AeroSAM-logger | /AeroSAM_logger-0.0.24.tar.gz/AeroSAM_logger-0.0.24/ucass/__init__.py | __init__.py |
# [AeroSandbox](https://peterdsharpe.github.io/AeroSandbox/) :airplane:
by [Peter Sharpe](https://peterdsharpe.github.io) (<pds [at] mit [dot] edu>)
[](https://pepy.tech/project/aerosandbox)
[](https://pepy.tech/project/aerosandbox)
[](https://github.com/peterdsharpe/AeroSandbox/actions/workflows/run-pytest.yml)
[](https://pypi.python.org/pypi/aerosandbox)
[](https://aerosandbox.readthedocs.io/en/master/?badge=master)
[](https://opensource.org/licenses/MIT)
**AeroSandbox is a Python package that helps you design and optimize aircraft and other engineered systems.**
At its heart, AeroSandbox is an optimization suite that combines the ease-of-use of [familiar NumPy syntax](aerosandbox/numpy) with the power of [modern automatic differentiation](./tutorial/10%20-%20Miscellaneous/03%20-%20Resources%20on%20Automatic%20Differentiation.md).
This automatic differentiation dramatically improves optimization performance on large problems: **design problems with tens of thousands of decision variables solve in seconds on a laptop**. AeroSandbox also comes with dozens of end-to-end-differentiable aerospace physics models, allowing you to **simultaneously optimize an aircraft's aerodynamics, structures, propulsion, mission trajectory, stability, and more.**
Keeping AeroSandbox easy to learn and use is a top priority. *Complexity is optional* - you can use AeroSandbox's built-in physics models where helpful, or you can drop in arbitrary custom physics models of your own.
```
pip install aerosandbox[full]
```
-----
### What can I do with AeroSandbox?
Use AeroSandbox to design and optimize entire aircraft:
<table>
<tr>
<td width="50%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/Feather-RC-Glider"><i>Feather</i> (an ultra-lightweight 1-meter-class RC motor glider)</a>
</p>
<img src="https://raw.githubusercontent.com/peterdsharpe/Feather-RC-Glider/master/CAD/feather.png" alt="Feather first page">
</td>
<td width="50%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/solar-seaplane-preliminary-sizing"><i>SEAWAY-Mini</i> (a solar-electric, 13' wingspan seaplane)</a>
</p>
<img src="https://raw.githubusercontent.com/peterdsharpe/solar-seaplane-preliminary-sizing/main/CAD/renders/seaway_mini_packet_Page_1.png" alt="Seaway-Mini first page">
</td>
</tr>
</table>
Use AeroSandbox to support real-world aircraft development programs, all the way from your very first sketch to your first-flight and beyond:
<table>
<tr>
<td width="50%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/DawnDesignTool">Initial concept sketches + sizing of <i>Dawn</i> (a solar-electric airplane for climate science research) in AeroSandbox, Spring 2020</a>
</p>
<img src="./media/images/dawn1-first-sketch.png" alt="Dawn initial design">
</td>
<td width="50%" valign="top">
<p align="center">
<a href="https://youtu.be/CyTzx9UCvyo"><i>Dawn</i> (later renamed <i>SACOS</i>) in first flight, Fall 2022</a>
</p>
<p align="center"><a href="https://www.electra.aero/news/sacos-first-flight">(A massive build effort with excellent engineering and coordination by Electra.aero!)</a></p>
<img src="./media/images/SACOS%20First%20Flight%20Zoomed.jpg" alt="SACOS first flight">
</td>
</tr>
</table>
Use AeroSandbox to explore counterintuitive, complicated design tradeoffs, all at the earliest stages of conceptual design *where these insights make the most difference*:
<table>
<tr>
<td width="33%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/DawnDesignTool">Exploring how big a solar airplane needs to be to fly, as a function of seasonality and latitude</a>
</p>
<img src="https://github.com/peterdsharpe/DawnDesignTool/raw/master/docs/30kg_payload.svg" alt="Dawn seasonality latitude tradespace">
</td>
<td width="33%" valign="top">
<p align="center">
<a href="https://www.popularmechanics.com/military/aviation/a13938789/mit-developing-mach-08-rocket-drone-for-the-air-force/">Exploring how the mission range of <i>Firefly</i>, a Mach 0.8 rocket drone, changes if we add an altitude limit, simultaneously optimizing aircraft design and trajectories</a>
</p>
<img src="./media/images/firefly-range-ceiling-trade.png" alt="Firefly range ceiling trade">
</td>
<td width="33%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/transport-aircraft">Exploring how many LH2 aircraft classes an airline fleet needs to cover the market, considering off-design performance</a>
</p>
<img src="https://github.com/peterdsharpe/transport-aircraft/raw/master/figures/lh2_market_segmentation_2.svg" alt="LH2 Market Coverage">
</td>
</tr>
</table>
Use AeroSandbox as a pure aerodynamics toolkit:
<table>
<tr>
<td width="50%" valign="top">
<p align="center">
VLM simulation of a glider, aileron deflections of +-30°
</p>
<img src="./media/images/vlm3_with_control_surfaces.png" alt="VLM simulation">
</td>
<td width="50%" valign="top">
<p align="center">
Aerodynamic shape optimization of a wing planform, using an arbitrary objective and constraints
</p>
<img src="./media/images/wing_optimization.png" alt="Wing optimization">
</td>
</tr>
</table>
Among many other discplines:
<table>
<tr>
<td width="50%" valign="top">
<p align="center">
Structural optimization of a composite tube spar
</p>
<img src="./media/images/beam-optimization.png" alt="Beam optimization">
</td>
<td width="50%" valign="top">
<p align="center">
Electric motor analysis for propeller matching
</p>
<img src="./media/images/motor_perf.png" alt="Motor performance">
</td>
</tr>
<tr>
<td>
<p align="center" valign="top">
<a href="https://github.com/peterdsharpe/transport-aircraft">Tools to analyze unconventional propulsion (e.g., LH2)</a>
</p>
<img src="https://github.com/peterdsharpe/transport-aircraft/raw/master/figures/three_view_annotated.svg" alt="LH2 airplane three-view">
</td>
<td>
<p align="center" valign="top">
Detailed weights estimation for aircraft ranging from micro-UAVs to airliners
</p>
<img src="https://github.com/peterdsharpe/transport-aircraft/raw/master/figures/mass_budget.png" alt="Mass Budget">
</td>
</tr>
</table>
Easily interface AeroSandbox with all your favorite tools:
<table>
<tr>
<td width="33%" valign="top">
<p align="center">
Other conceptual design tools (AVL, XFLR5, XFoil, ASWING, MSES, etc.)
</p>
<img src="./media/images/airfoil_contours.png" alt="XFoil">
</td>
<td width="33%" valign="top">
<p align="center">
CAD tools via STEP export (SolidWorks, Fusion 360, etc.)
</p>
<p align="center">
(STL, OBJ, etc. supported too)
</p>
<img src="https://github.com/peterdsharpe/solar-seaplane-preliminary-sizing/raw/main/CAD/renders/raytrace-lowres.jpg" alt="CAD">
</td>
<td width="33%" valign="top">
<p align="center">
User-provided models + code (for custom aerodynamics, structures, propulsion, or anything else - e.g., for optimizing flight through a probabilistic wind field, shown below)
</p>
<img src="./media/images/wind_speeds_model.png" alt="Wind speed">
</td>
</tr>
</table>
Or, throw all the airplane-design-specific code out entirely, and use AeroSandbox purely as an optimization solver or as a solver for nonlinear systems of equations (or ODEs, or PDEs):
<table>
<tr>
<td width="50%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/AeroSandbox/blob/develop/tutorial/01%20-%20Optimization%20and%20Math/01%20-%202D%20Rosenbrock.ipynb">Optimize the 2D Rosenbrock function</a>
</p>
<img src="./media/images/optimization.png" alt="Optimization">
</td>
<td width="50%" valign="top">
<p align="center">
<a href="https://github.com/peterdsharpe/AeroSandbox/tree/develop/tutorial/03%20-%20Trajectory%20Optimization%20and%20Optimal%20Control/01%20-%20Solving%20ODEs%20with%20AeroSandbox">Specify the Falkner Skan ODE (nonlinear, 3rd-order BVP) and let AeroSandbox automatically take care of the discretization, solution, and even inverse solving.</a>
</p>
<img src="./media/images/falkner-skan.png" alt="FS ODE">
</td>
</tr>
</table>
And much, much more. Best of all, combine these tools arbitrarily without any loss in optimization speed and without any tedious derivative math, all thanks to AeroSandbox's end-to-end automatic-differentiability.
## Getting Started
### Installation
In short:
* `pip install aerosandbox[full]` for a complete install.
* `pip install aerosandbox` for a lightweight (headless) installation with minimal dependencies. All optimization, numerics, and physics models are included, but optional visualization dependencies are skipped.
For more installation details (e.g., if you're new to Python), [see here](./INSTALLATION.md).
### Tutorials, Examples, and Documentation
To get started, [check out the tutorials folder here](./tutorial/)! All tutorials are viewable in-browser, or you can open them as Jupyter notebooks by cloning this repository.
For a more detailed and theory-heavy introduction to AeroSandbox, [please see this thesis](./tutorial/sharpe-pds-sm-AeroAstro-2021-thesis.pdf).
For a yet-more-detailed developer-level description of AeroSandbox modules, [please see the developer README](aerosandbox/README.md).
For fully-detailed API documentation, see [the documentation website](https://aerosandbox.readthedocs.io/en/master/).
You can print documentation and examples for any AeroSandbox object by using the built-in `help()` function (e.g., `help(asb.Airplane)`). AeroSandbox code is also documented *extensively* in the source and contains hundreds of unit test examples, so examining the source code can also be useful.
### Usage Details
One final point to note: as we're all sensible and civilized here, **all inputs and outputs to AeroSandbox are expressed in base SI units, or derived units thereof** (e.g, m, N, kg, m/s, J, Pa).
The only exception to this rule is when units are explicitly noted via variable name suffix. For example:
* `battery_capacity` -> Joules
* `battery_capacity_watt_hours` -> Watt-hours.
All angles are in radians, except for α and β which are in degrees due to long-standing aerospace convention. (In any case, units are marked on all function docstrings.)
If you wish to use other units, consider using [`aerosandbox.tools.units`](./aerosandbox/tools/units.py) to convert easily.
## Project Details
### Contributing
Please feel free to join the development of AeroSandbox - contributions are always so welcome! If you have a change you'd like to make, the easiest way to do that is by submitting a pull request.
The text file [`CONTRIBUTING.md`](./CONTRIBUTING.md) has more details for developers and power users.
If you've already made several additions and would like to be involved in a more long-term capacity, please message me!
Contact information can be found next to my name near the top of this README.
### Donating
If you like this software, please consider donating to support development [via PayPal](https://paypal.me/peterdsharpe)
or [GitHub Sponsors](https://github.com/sponsors/peterdsharpe/)! Proceeds will go towards more coffee for the grad students.
### Bugs
Please, please report all bugs by [creating a new issue](https://github.com/peterdsharpe/AeroSandbox/issues)!
### Versioning
AeroSandbox loosely uses [semantic versioning](https://semver.org/), which should give you an idea of whether or not you can probably expect backward-compatibility and/or new features from any given update.
For more details, see the [changelog](./CHANGELOG.md).
### Citation & Commercial Use
If you find AeroSandbox useful in a research publication, please cite it using the following BibTeX snippet:
```bibtex
@mastersthesis{aerosandbox,
title = {AeroSandbox: A Differentiable Framework for Aircraft Design Optimization},
author = {Sharpe, Peter D.},
school = {Massachusetts Institute of Technology},
year = {2021}
}
```
As the MIT License applies, use AeroSandbox for anything you want; attribution is strongly appreciated.
Commercial users: I'm more than happy to discuss consulting work for active AeroSandbox support if this package proves helpful - use the email address in the header of this README to get in touch.
### License
[MIT License, terms here](LICENSE.txt). Basically: use AeroSandbox for anything you want; no warranty express or implied.
## Stargazers over time
[](https://starchart.cc/peterdsharpe/AeroSandbox)
| AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/README.md | README.md |
import aerosandbox.numpy as np
from aerosandbox.optimization.opti import Opti
from abc import abstractmethod, ABC
import copy
from typing import Dict, Any
import casadi as cas
class AeroSandboxObject(ABC):
@abstractmethod
def __init__(self):
"""
Denotes AeroSandboxObject as an abstract class, meaning you can't instantiate it directly - you must subclass
(extend) it instead.
"""
pass
def copy(self):
"""
Returns a shallow copy of the object.
"""
return copy.copy(self)
def deepcopy(self):
"""
Returns a deep copy of the object.
"""
return copy.deepcopy(self)
def substitute_solution(self,
sol: cas.OptiSol,
inplace: bool = None,
):
"""
Substitutes a solution from CasADi's solver recursively as an in-place operation.
In-place operation. To make it not in-place, do `y = copy.deepcopy(x)` or similar first.
:param sol: OptiSol object.
:return:
"""
import warnings
warnings.warn(
"This function is deprecated and will break at some future point.\n"
"Use `sol.value(x)` or even simply `sol(x)` instead, which now works\n"
"recursively on complex data structures.",
DeprecationWarning
)
# Set defaults
if inplace is None:
inplace = True
def convert(item):
"""
This is essentially a supercharged version of sol.value(), which works for more iterable types.
Args:
item:
Returns:
"""
# If it can be converted, do the conversion.
if np.is_casadi_type(item, recursive=False):
return sol.value(item)
t = type(item)
# If it's a Python iterable, recursively convert it, and preserve the type as best as possible.
if issubclass(t, list):
return [convert(i) for i in item]
if issubclass(t, tuple):
return tuple([convert(i) for i in item])
if issubclass(t, set) or issubclass(t, frozenset):
return {convert(i) for i in item}
if issubclass(t, dict):
return {
convert(k): convert(v)
for k, v in item.items()
}
# Skip certain Python types
for type_to_skip in (
bool, str,
int, float, complex,
range,
type(None),
bytes, bytearray, memoryview
):
if issubclass(t, type_to_skip):
return item
# Skip certain CasADi types
for type_to_skip in (
cas.Opti,
cas.OptiSol
):
if issubclass(t, type_to_skip):
return item
# If it's any other type, try converting its attribute dictionary:
try:
newdict = {
k: convert(v)
for k, v in item.__dict__.items()
}
if inplace:
for k, v in newdict.items():
setattr(item, k, v)
return item
else:
newitem = copy.copy(item)
for k, v in newdict.items():
setattr(newitem, k, v)
return newitem
except AttributeError:
pass
# Try converting it blindly. This will catch most NumPy-array-like types.
try:
return sol.value(item)
except (NotImplementedError, TypeError, ValueError):
pass
# At this point, we're not really sure what type the object is. Raise a warning and return the item, then hope for the best.
import warnings
warnings.warn(f"In solution substitution, could not convert an object of type {t}.\n"
f"Returning it and hoping for the best.", UserWarning)
return item
if inplace:
convert(self)
else:
return convert(self)
class ExplicitAnalysis(AeroSandboxObject):
default_analysis_specific_options: Dict[type, Dict[str, Any]] = {}
"""This is part of AeroSandbox's "analysis-specific options" feature, which lets you "tag" geometry objects with
flags that change how different analyses act on them.
This variable, `default_analysis_specific_options`, allows you to specify default values for options that can be used for
specific problems.
This should be a dictionary, where: * keys are the geometry-like types that you might be interested in defining
parameters for. * values are dictionaries, where: * keys are strings that label a given option * values are
anything. These are used as the default values, in the event that the associated geometry doesn't override those.
An example of what this variable might look like, for a vortex-lattice method aerodynamic analysis:
>>> default_analysis_specific_options = {
>>> Airplane: dict(
>>> profile_drag_coefficient=0
>>> ),
>>> Wing : dict(
>>> wing_level_spanwise_spacing=True,
>>> spanwise_resolution=12,
>>> spanwise_spacing="cosine",
>>> chordwise_resolution=12,
>>> chordwise_spacing="cosine",
>>> component=None, # type: int
>>> no_wake=False,
>>> no_alpha_beta=False,
>>> no_load=False,
>>> drag_polar=dict(
>>> CL1=0,
>>> CD1=0,
>>> CL2=0,
>>> CD2=0,
>>> CL3=0,
>>> CD3=0,
>>> ),
>>> )
>>> }
"""
def get_options(self,
geometry_object: AeroSandboxObject,
) -> Dict[str, Any]:
"""
Retrieves the analysis-specific options that correspond to both:
* An analysis type (which is this object, "self"), and
* A specific geometry object, such as an Airplane or Wing.
Args:
geometry_object: An instance of an AeroSandbox geometry object, such as an Airplane, Wing, etc.
* In order for this function to do something useful, you probably want this option to have
`analysis_specific_options` defined. See the asb.Airplane constructor for an example of this.
Returns: A dictionary that combines:
* This analysis's default options for this geometry, if any exist.
* The geometry's declared analysis-specific-options for this analysis, if it exists. These geometry
options will override the defaults from the analysis.
This dictionary has the format:
* keys are strings, listing specific options
* values can be any type, and simply state the value of the analysis-specific option following the
logic above.
Note: if this analysis defines a set of default options for the geometry type in question (by using
`self.default_analysis_specific_options`), all keys from the geometry object's `analysis_specific_options`
will be validated against those in the default options set. A warning will be raised if keys do not
correspond to those in the defaults, as this (potentially) indicates a typo, which would otherwise be
difficult to debug.
"""
### Determine the types of both this analysis and the geometry object.
analysis_type: type = self.__class__
geometry_type: type = geometry_object.__class__
### Determine whether this analysis and the geometry object have options that specifically reference each other or not.
try:
analysis_options_for_this_geometry = self.default_analysis_specific_options[geometry_type]
assert hasattr(analysis_options_for_this_geometry, "items")
except (AttributeError, KeyError, AssertionError):
analysis_options_for_this_geometry = None
try:
geometry_options_for_this_analysis = geometry_object.analysis_specific_options[analysis_type]
assert hasattr(geometry_options_for_this_analysis, "items")
except (AttributeError, KeyError, AssertionError):
geometry_options_for_this_analysis = None
### Now, merge those options (with logic depending on whether they exist or not)
if analysis_options_for_this_geometry is not None:
options = copy.deepcopy(analysis_options_for_this_geometry)
if geometry_options_for_this_analysis is not None:
for k, v in geometry_options_for_this_analysis.items():
if k in analysis_options_for_this_geometry.keys():
options[k] = v
else:
import warnings
allowable_keys = [f'"{k}"' for k in analysis_options_for_this_geometry.keys()]
warnings.warn(
f"\nAn object of type '{geometry_type.__name__}' declared the analysis-specific option '{k}' for use with analysis '{analysis_type.__name__}'.\n"
f"This was unexpected! Allowable analysis-specific options for '{geometry_type.__name__}' with '{analysis_type.__name__}' are:\n"
"\t" + "\n\t".join(allowable_keys) + "\n" "Did you make a typo?",
stacklevel=2,
)
else:
if geometry_options_for_this_analysis is not None:
options = geometry_options_for_this_analysis
else:
options = {}
return options
class ImplicitAnalysis(AeroSandboxObject):
@staticmethod
def initialize(init_method):
"""
A decorator that should be applied to the __init__ method of ImplicitAnalysis or any subclass of it.
Usage example:
>>> class MyAnalysis(ImplicitAnalysis):
>>>
>>> @ImplicitAnalysis.initialize
>>> def __init__(self):
>>> self.a = self.opti.variable(init_guess = 1)
>>> self.b = self.opti.variable(init_guess = 2)
>>>
>>> self.opti.subject_to(
>>> self.a == self.b ** 2
>>> ) # Add a nonlinear governing equation
Functionality:
The basic purpose of this wrapper is to ensure that every ImplicitAnalysis has an `opti` property that points to
an optimization environment (asb.Opti type) that it can work in.
How do we obtain an asb.Opti environment to work in? Well, this decorator adds an optional `opti` parameter to
the __init__ method that it is applied to.
1. If this `opti` parameter is not provided, then a new empty `asb.Opti` environment is created and stored as
`ImplicitAnalysis.opti`.
2. If the `opti` parameter is provided, then we simply assign the given `asb.Opti` environment (which may
already contain other variables/constraints/objective) to `ImplicitAnalysis.opti`.
In addition, a property called `ImplicitAnalysis.opti_provided` is stored, which records whether the user
provided an Opti environment or if one was instead created for them.
If the user did not provide an Opti environment (Option 1 from our list above), we assume that the user basically
just wants to perform a normal, single-disciplinary analysis. So, in this case, we proceed to solve the analysis as-is
and do an in-place substitution of the solution.
If the user did provide an Opti environment (Option 2 from our list above), we assume that the user might potentially want
to add other implicit analyses to the problem. So, in this case, we don't solve the analysis, and the user must later
solve the analysis by calling `sol = opti.solve()` or similar.
"""
def init_wrapped(self, *args, opti=None, **kwargs):
if opti is None:
self.opti = Opti()
self.opti_provided = False
else:
self.opti = opti
self.opti_provided = True
init_method(self, *args, **kwargs)
if not self.opti_provided and not self.opti.x.shape == (0, 1):
sol = self.opti.solve()
self.__dict__ = sol(self.__dict__)
return init_wrapped
class ImplicitAnalysisInitError(Exception):
def __init__(self,
message="""
Your ImplicitAnalysis object doesn't have an `opti` property!
This is almost certainly because you didn't decorate your object's __init__ method with
`@ImplicitAnalysis.initialize`, which you should go do.
"""
):
self.message = message
super().__init__(self.message)
@property
def opti(self):
try:
return self._opti
except AttributeError:
raise self.ImplicitAnalysisInitError()
@opti.setter
def opti(self, value: Opti):
self._opti = value
@property
def opti_provided(self):
try:
return self._opti_provided
except AttributeError:
raise self.ImplicitAnalysisInitError()
@opti_provided.setter
def opti_provided(self, value: bool):
self._opti_provided = value | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/common.py | common.py |
# AeroSandbox User Guide
by Peter Sharpe
----------
Hello there, dear user!
Welcome to the inner workings of AeroSandbox. Come in and stay awhile - we're so glad you're here! :smile:
## Map
There's a big world in here filled with functions, classes, analyses, and more - let me show you around; we wouldn't want you to get lost! Here's your map of the place, in the order of how you should explore these folders to learn how you can harness the power of AeroSandbox. First, let's look at the core pieces:
### The Core Tools
These are the two key pieces of AeroSandbox to understand - once you get these, you're 90% of the way there.
* `/optimization/`: This folder contains only one thing, and it's the single most important class in AeroSandbox: the `Opti` stack. The `Opti` class is an object-oriented way to formulate and solve an optimization problem, with syntax specifically aimed at engineering design.
One of the core principles of AeroSandbox is that *everything* is an optimization problem. Even for problems that look like pure analysis ("I already have a design, how well does it perform?"), there's a beautiful duality between optimization and analysis through something called "Simultaneous Analysis and Design" - more on this later. Because of this, the `Opti` stack is truly ubiquitous throughout AeroSandbox.
Extensive documentation with examples is provided in `aerosandbox.optimization.opti` - please read this!
* `/numpy/`: One of the coolest things about the `Opti` stack is that it's fast - really, **really** fast. You can solve nonlinear, nonconvex optimization problems with thousands of variables in mere seconds on a laptop, thanks to automatic differentiation (AD) provided by CasADi and modern optimization methods via IPOPT.
In order for AD to work, we need to be able to make a list (more precisely, a directed graph) of each mathematical operation (think `+`, `-`, `*`, `/`, `**`, `log()`, `fabs()`, etc.) that's applied throughout our optimization formulation (some call this list a "trace" in the literature). This means we can't just use NumPy out of the box like we'd like to, because some of its functions break our trace.
Instead, we need to use a custom math library, which sounds scary at first. However, the AeroSandbox development team has tried to make this as seamless to you as possible - by writing our own NumPy with identical syntax! Here's how this works:
* `aerosandbox.numpy` imports the entirety of NumPy.
* For NumPy functions that break our AD trace (e.g. `np.sum()`), we've rewritten our own versions of them. This means:
* If you pass normal NumPy arrays to these functions, they'll work 100% exactly the same as they would in original NumPy - same result and same speed.
* If you pass optimization variables to these functions, they'll intelligently switch over to a version of the function that allows us to preserve the AD trace.
* **So what does this mean for you, dear user?** It means that when working with AeroSandbox, all you need to do is replace `import numpy as np` with `import aerosandbox.numpy as np`, and you're good to go!
* Caveat: Not all NumPy functions that should be overwritten have been overwritten - we've done our best, but there are *sooo* many obscure NumPy functions! If you get an error on a function you want to use, raise an issue ticket [here](https://github.com/peterdsharpe/AeroSandbox/issues)!
Before continuing, I'd recommend practicing a bit using the `Opti()` stack and `aerosandbox.numpy` to solve a few canonical optimization problems. A good starter problem is finding the minimum of the 2D [Rosenbrock function](https://en.wikipedia.org/wiki/Rosenbrock_function) - for extra credit, add a constraint that the point has to lie inside the unit circle!
### Modeling Tools
* `/geometry/`: The main goal of AeroSandbox is to make engineering design optimization more harmonious. Engineering design can look like a lot of things depending on what you're trying to design - airplanes, cars, bridges, et cetera.
However, all of these objects have one thing in common - geometry! They're all *physical* objects that we're trying to optimize the shape of - remember that **engineering design is the process of finding an optimal mapping from an object's function to that object's form** (in the words of my excellent advisor, Prof. John Hansman).
The `geometry` folder therefore is a self-contained object-oriented framework for representing the geometry of engineered systems in Python. Right now, it's primarily used for aircraft - you can build a nested data structure all the way from an entire aircraft down to each point that defines the shape of an individual airfoil. Once you have that data structure, you can do all sorts of useful things with - output it to various filetypes, draw it in an interactive 3D window so that you can see it, and pass it to all kinds of analysis tools.
In the future, we'll hopefully generalize this `geometry` stack with more general representations (`Polygon`, `Polyhedron`, etc.) to represent the geometry of arbitrary types of engineered systems (not just aircraft).
* `/modeling/` is all about one thing - making surrogate models (which is also called "curve fitting" if we're being honest or "machine learning" if we're trying to convince someone to give us money). Jokes aside, the point of `modeling` is to make simple, continuous, automatic-differentiable models based on either:
* synthetic "data" created by running either:
* high-fidelity analysis that we'd prefer not to run "online" with the rest of our optimization models (e.g. RANS CFD)
* black-box analysis that we can't run "online" with the rest of our optimization models (e.g. legacy Fortran code)
* real data from real-life experiments (the highest-fidelity analysis, in a sense!)
### Discipline-Specific Tools
We also have a collections of aircraft analysis tools categorized by discipline:
* `/aerodynamics/`: Contains analysis tools related to aerodynamics both in 2D and 3D.
* `/propulsion/`: Contains analysis tools related to propulsion. Work in progress.
* `/structures/`: Contains analysis tools related to structures, like beam models.
* `/atmosphere/`: Contains a few models of standard atmospheres, so that you can get atmospheric properties at different altitudes.
* `/library/`: Contains tons of simple surrogate models for a wide variety of relevant phenomena. Some models are physics-based, others are empirical fits - all are documented in their docstring.
### Miscellaneous Tools
You can ignore all of these folders.
* `/tools/`: Miscellaneous tools that do not interact with the rest of AeroSandbox - just along for the ride.
* `/visualization/`: Tools for making prettier plots.
* `in_progress`: Here be dragons, beware! But seriously, this is work in progress, ignore it. | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/README.md | README.md |
# AeroSandbox Dynamics
This module provides dynamics engines (e.g., tools for computing equations of motions) for a variety of free-flying dynamical systems.
Each dynamics engine is given within the context of a Python class. These dynamics engines can be broadly categorized into three groups, which increasing fidelity:
* Point mass
* 3 DoF (i.e., 2D)
* 6 DoF (i.e., 3D). These models can use either Euler angle parameterization or quaternion parameterization for the underlying state variables.
| AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/README.md | README.md |
import aerosandbox.numpy as np
from aerosandbox.common import AeroSandboxObject
from abc import ABC, abstractmethod, abstractproperty
from typing import Union, Dict, Tuple, List
from aerosandbox import MassProperties, Opti, OperatingPoint, Atmosphere, Airplane, _asb_root
from aerosandbox.tools.string_formatting import trim_string
import inspect
import copy
class _DynamicsPointMassBaseClass(AeroSandboxObject, ABC):
@abstractmethod
def __init__(self,
mass_props: MassProperties = None,
**state_variables_and_indirect_control_variables,
):
self.mass_props = MassProperties() if mass_props is None else mass_props
"""
For each state variable, self.state_var = state_var
For each indirect control variable, self.indirect_control_var = indirect_control_var
For each control variable, self.control_var = 0
"""
@abstractproperty
def state(self) -> Dict[str, Union[float, np.ndarray]]:
"""
Returns the state variables of this Dynamics instance as a Dict.
Keys are strings that give the name of the variables.
Values are the variables themselves.
This method should look something like:
>>> {
>>> "x_e": self.x_e,
>>> "u_e": self.u_e,
>>> ...
>>> }
"""
pass
def get_new_instance_with_state(self,
new_state: Union[
Dict[str, Union[float, np.ndarray]],
List, Tuple, np.ndarray
] = None
):
"""
Creates a new instance of this same Dynamics class from the given state.
Note that any control variables (forces, moments) associated with the previous instance are zeroed.
Args:
new_state: The new state to be used for the new instance. Ideally, this is represented as a Dict in identical format to the `state` of a Dynamics instance.
Returns: A new instance of this same Dynamics class.
"""
### Get a list of all the inputs that the class constructor wants to see
init_signature = inspect.signature(self.__class__.__init__)
init_args = list(init_signature.parameters.keys())[1:] # Ignore 'self'
### Create a new instance, and give the constructor all the inputs it wants to see (based on values in this instance)
new_dyn: __class__ = self.__class__(**{
k: getattr(self, k)
for k in init_args
})
### Overwrite the state variables in the new instance with those from the input
new_dyn._set_state(new_state=new_state)
### Return the new instance
return new_dyn
def _set_state(self,
new_state: Union[
Dict[str, Union[float, np.ndarray]],
List, Tuple, np.ndarray
] = None
):
"""
Force-overwrites all state variables with a new set (either partial or complete) of state variables.
Warning: this is *not* the intended public usage of Dynamics instances.
If you want a new state yourself, you should instantiate a new one either:
a) manually, or
b) by using Dynamics.get_new_instance_with_state()
Hence, this function is meant for PRIVATE use only - be careful how you use this! Especially note that
control variables (e.g., forces, moments) do not reset to zero.
"""
### Set the default parameters
if new_state is None:
new_state = {}
try: # Assume `value` is a dict-like, with keys
for key in new_state.keys(): # Overwrite each of the specified state variables
setattr(self, key, new_state[key])
except AttributeError: # Assume it's an iterable that has been sorted.
self._set_state(
self.pack_state(new_state)) # Pack the iterable into a dict-like, then do the same thing as above.
def unpack_state(self,
dict_like_state: Dict[str, Union[float, np.ndarray]] = None
) -> Tuple[Union[float, np.ndarray]]:
"""
'Unpacks' a Dict-like state into an array-like that represents the state of the dynamical system.
Args:
dict_like_state: Takes in a dict-like representation of the state.
Returns: The array representation of the state that you gave.
"""
if dict_like_state is None:
dict_like_state = self.state
return tuple(dict_like_state.values())
def pack_state(self,
array_like_state: Union[List, Tuple, np.ndarray] = None
) -> Dict[str, Union[float, np.ndarray]]:
"""
'Packs' an array into a Dict that represents the state of the dynamical system.
Args:
array_like_state: Takes in an iterable that must have the same number of entries as the state vector of the system.
Returns: The Dict representation of the state that you gave.
"""
if array_like_state is None:
return self.state
if not len(self.state.keys()) == len(array_like_state):
raise ValueError(
"There are a differing number of elements in the `state` variable and the `array_like` you're trying to pack!")
return {
k: v
for k, v in zip(
self.state.keys(),
array_like_state
)
}
@abstractproperty
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
pass
def __repr__(self) -> str:
title = f"{self.__class__.__name__} instance:"
def makeline(k, v):
name = trim_string(str(k).strip(), length=8).rjust(8)
item = trim_string(str(v).strip(), length=40).ljust(40)
line = f"{name}: {item}"
return line
state_variables_title = "\tState variables:"
state_variables = "\n".join([
"\t\t" + makeline(k, v)
for k, v in self.state.items()
])
control_variables_title = "\tControl variables:"
control_variables = "\n".join([
"\t\t" + makeline(k, v)
for k, v in self.control_variables.items()
])
return "\n".join([
title,
state_variables_title,
state_variables,
control_variables_title,
control_variables
])
def __getitem__(self, index: int):
"""
Indexes one item from each attribute of a Dynamics instance.
Returns a new Dynamics instance of the same type.
Args:
index: The index that is being called; e.g.,:
>>> first_dyn = dyn[0]
Returns: A new Dynamics instance, where each attribute is subscripted at the given value, if possible.
"""
def get_item_of_attribute(a):
try:
return a[index]
except TypeError as e: # object is not subscriptable
return a
except IndexError as e: # index out of range
raise IndexError("A state variable could not be indexed, since the index is out of range!")
except NotImplementedError as e:
raise TypeError(f"Indices must be integers or slices, not {index.__class__.__name__}")
new_instance = self.get_new_instance_with_state()
for k, v in new_instance.__dict__.items():
setattr(new_instance, k, get_item_of_attribute(v))
return new_instance
def __len__(self):
"""
Returns the length of the (vectorized) state vector of the system.
If no elements of the state vector are vectorized, returns 1.
"""
length = 1
for v in self.state.values():
if np.length(v) == 1:
pass
elif length == 1:
length = np.length(v)
elif length == np.length(v):
pass
else:
raise ValueError("State variables are appear vectorized, but of different lengths!")
return length
@abstractmethod
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
"""
A function that returns the derivatives with respect to time of the state specified in the `state` property.
Should return a Dict with the same keys as the `state` property.
"""
pass
def constrain_derivatives(self,
opti: Opti,
time: np.ndarray,
method: str = "midpoint",
which: Union[str, List[str], Tuple[str]] = "all",
_stacklevel=1,
):
"""
Applies the relevant state derivative constraints to a given Opti instance.
Args:
opti: the AeroSandbox `Opti` instance that constraints should be applied to.
time: A vector that represents the time at each discrete point. Should be the same length as any
vectorized state variables in the `state` property of this Dynamics instance.
method: The discrete integration method to use. See Opti.constrain_derivative() for options.
which: Which state variables should be we constrain? By default, constrains all of them.
Options:
* "all", which constrains all state variables (default)
* A list of strings that are state variable names (i.e., a subset of `dyn.state.keys()`),
that gives the names of state variables to be constrained.
_stacklevel: Optional and advanced, purely used for debugging. Allows users to correctly track where
constraints are declared in the event that they are subclassing `aerosandbox.Opti`. Modifies the
stacklevel of the declaration tracked, which is then presented using
`aerosandbox.Opti.variable_declaration()` and `aerosandbox.Opti.constraint_declaration()`.
Returns:
"""
if which == "all":
which = self.state.keys()
state_derivatives = self.state_derivatives()
for state_var_name in which:
# If a state derivative has a None value, skip it.
if state_derivatives[state_var_name] is None:
continue
# Try to constrain the derivative
try:
opti.constrain_derivative(
derivative=state_derivatives[state_var_name],
variable=self.state[state_var_name],
with_respect_to=time,
method=method,
_stacklevel=_stacklevel + 1
)
except KeyError:
raise ValueError(f"This dynamics instance does not have a state named '{state_var_name}'!")
except Exception as e:
raise ValueError(f"Error while constraining state variable '{state_var_name}': \n{e}")
@abstractmethod
def convert_axes(self,
x_from: float,
y_from: float,
z_from: float,
from_axes: str,
to_axes: str,
) -> Tuple[float, float, float]:
"""
Converts a vector [x_from, y_from, z_from], as given in the `from_axes` frame, to an equivalent vector [x_to,
y_to, z_to], as given in the `to_axes` frame.
Identical to OperatingPoint.convert_axes(), but adds in "earth" as a valid axis frame. For more documentation,
see the docstring of OperatingPoint.convert_axes().
Both `from_axes` and `to_axes` should be a string, one of:
* "geometry"
* "body"
* "wind"
* "stability"
* "earth"
Args:
x_from: x-component of the vector, in `from_axes` frame.
y_from: y-component of the vector, in `from_axes` frame.
z_from: z-component of the vector, in `from_axes` frame.
from_axes: The axes to convert from. See above for options.
to_axes: The axes to convert to. See above for options.
Returns: The x-, y-, and z-components of the vector, in `to_axes` frame. Given as a tuple.
"""
pass
@abstractmethod
def add_force(self,
Fx: Union[float, np.ndarray] = 0,
Fy: Union[float, np.ndarray] = 0,
Fz: Union[float, np.ndarray] = 0,
axes: str = "wind",
) -> None:
"""
Adds a force (in whichever axis system you choose) to this Dynamics instance.
Args:
Fx: Force in the x-direction in the axis system chosen. [N]
Fy: Force in the y-direction in the axis system chosen. [N]
Fz: Force in the z-direction in the axis system chosen. [N]
axes: The axis system that the specified force is in. One of:
* "geometry"
* "body"
* "wind"
* "stability"
* "earth"
Returns: None (in-place)
"""
pass
def add_gravity_force(self,
g=9.81
) -> None:
"""
In-place modifies the forces associated with this Dynamics instance: adds a force in the -z direction,
equal to the weight of the aircraft.
Args:
g: The gravitational acceleration. [m/s^2]
Returns: None (in-place)
"""
self.add_force(
Fz=self.mass_props.mass * g,
axes="earth",
)
@property
def op_point(self):
"""
Returns an OperatingPoint object that represents the current state of the dynamics instance.
This OperatingPoint object is effectively a subset of the state variables, and is used to compute aerodynamic
forces and moments.
"""
return OperatingPoint(
atmosphere=Atmosphere(altitude=self.altitude),
velocity=self.speed,
alpha=self.alpha,
beta=self.beta,
p=0,
q=0,
r=0,
)
def draw(self,
vehicle_model: Airplane = None,
backend: str = "pyvista",
draw_axes: bool = True,
scale_vehicle_model: Union[float, None] = None,
n_vehicles_to_draw: int = 10,
cg_axes: str = "geometry",
show: bool = True,
):
if backend == "pyvista":
import pyvista as pv
import aerosandbox.tools.pretty_plots as p
if vehicle_model is None:
default_vehicle_stl = _asb_root / "dynamics/visualization/default_assets/yf23.stl"
vehicle_model = pv.read(str(default_vehicle_stl))
elif isinstance(vehicle_model, pv.PolyData):
pass
elif isinstance(vehicle_model, Airplane):
vehicle_model = vehicle_model.draw(
backend="pyvista",
show=False
)
vehicle_model.rotate_y(180, inplace=True) # Rotate from geometry axes to body axes.
elif isinstance(vehicle_model, str): # Interpret the string as a filepath to a .stl or similar
try:
pv.read(filename=vehicle_model)
except Exception:
raise ValueError("Could not parse `vehicle_model`!")
else:
raise TypeError("`vehicle_model` should be an Airplane or PolyData object.")
x_e = np.array(self.x_e)
y_e = np.array(self.y_e)
z_e = np.array(self.z_e)
if np.length(x_e) == 1:
x_e = x_e * np.ones(len(self))
if np.length(y_e) == 1:
y_e = y_e * np.ones(len(self))
if np.length(z_e) == 1:
z_e = z_e * np.ones(len(self))
if scale_vehicle_model is None:
trajectory_bounds = np.array([
[x_e.min(), x_e.max()],
[y_e.min(), y_e.max()],
[z_e.min(), z_e.max()],
])
trajectory_size = np.max(np.diff(trajectory_bounds, axis=1))
vehicle_bounds = np.array(vehicle_model.bounds).reshape((3, 2))
vehicle_size = np.max(np.diff(vehicle_bounds, axis=1))
scale_vehicle_model = 0.1 * trajectory_size / vehicle_size
### Initialize the plotter
plotter = pv.Plotter()
# Set the window title
title = "ASB Dynamics"
addenda = []
if scale_vehicle_model != 1:
addenda.append(f"Vehicle drawn at {scale_vehicle_model:.2g}x scale")
addenda.append(f"{self.__class__.__name__} Engine")
if len(addenda) != 0:
title = title + f" ({'; '.join(addenda)})"
plotter.title = title
# Draw axes and grid
plotter.add_axes()
plotter.show_grid(color='gray')
### Draw the vehicle
for i in np.unique(
np.round(
np.linspace(0, len(self) - 1, n_vehicles_to_draw)
)
).astype(np.int64):
dyn = self[i]
try:
phi = dyn.phi
except AttributeError:
phi = dyn.bank
try:
theta = dyn.theta
except AttributeError:
theta = dyn.gamma
try:
psi = dyn.psi
except AttributeError:
psi = dyn.track
x_cg_b, y_cg_b, z_cg_b = dyn.convert_axes(
dyn.mass_props.x_cg,
dyn.mass_props.y_cg,
dyn.mass_props.z_cg,
from_axes=cg_axes,
to_axes="body"
)
this_vehicle = copy.deepcopy(vehicle_model)
this_vehicle.translate([
-x_cg_b,
-y_cg_b,
-z_cg_b,
], inplace=True)
this_vehicle.points *= scale_vehicle_model
this_vehicle.rotate_x(np.degrees(phi), inplace=True)
this_vehicle.rotate_y(np.degrees(theta), inplace=True)
this_vehicle.rotate_z(np.degrees(psi), inplace=True)
this_vehicle.translate([
dyn.x_e,
dyn.y_e,
dyn.z_e,
], inplace=True)
plotter.add_mesh(
this_vehicle,
)
if draw_axes:
rot = np.rotation_matrix_from_euler_angles(phi, theta, psi)
axes_scale = 0.5 * np.max(
np.diff(
np.array(this_vehicle.bounds).reshape((3, -1)),
axis=1
)
)
origin = np.array([
dyn.x_e,
dyn.y_e,
dyn.z_e,
])
for i, c in enumerate(["r", "g", "b"]):
plotter.add_mesh(
pv.Spline(np.array([
origin,
origin + rot[:, i] * axes_scale
])),
color=c,
line_width=2.5,
)
for i in range(len(self)):
### Draw the trajectory line
polyline = pv.Spline(np.array([x_e, y_e, z_e]).T)
plotter.add_mesh(
polyline,
color=p.adjust_lightness(p.palettes["categorical"][0], 1.2),
line_width=3,
)
### Finalize the plotter
plotter.camera.up = (0, 0, -1)
plotter.camera.Azimuth(90)
plotter.camera.Elevation(60)
if show:
plotter.show()
return plotter
@property
def altitude(self):
return -self.z_e
@property
def translational_kinetic_energy(self) -> float:
"""
Computes the kinetic energy [J] from translational motion.
KE = 0.5 * m * v^2
Returns:
Kinetic energy [J]
"""
return 0.5 * self.mass_props.mass * self.speed ** 2
@property
def rotational_kinetic_energy(self) -> float:
"""
Computes the kinetic energy [J] from rotational motion.
KE = 0.5 * I * w^2
Returns:
Kinetic energy [J]
"""
return 0.5 * (
self.mass_props.Ixx * self.p ** 2 +
self.mass_props.Iyy * self.q ** 2 +
self.mass_props.Izz * self.r ** 2
)
@property
def kinetic_energy(self):
"""
Computes the kinetic energy [J] from translational and rotational motion.
KE = 0.5 * m * v^2 + 0.5 * I * w^2
Returns:
Kinetic energy [J]
"""
return self.translational_kinetic_energy + self.rotational_kinetic_energy
@property
def potential_energy(self,
g: float = 9.81
):
"""
Computes the potential energy [J] from gravity.
PE = mgh
Args:
g: Acceleration due to gravity [m/s^2]
Returns:
Potential energy [J]
"""
return self.mass_props.mass * g * self.altitude | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/point_mass/common_point_mass.py | common_point_mass.py |
from aerosandbox.dynamics.point_mass.common_point_mass import _DynamicsPointMassBaseClass
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass3DSpeedGammaTrack(_DynamicsPointMassBaseClass):
"""
Dynamics instance:
* simulating a point mass
* in 3D
* with velocity parameterized in speed-gamma-track space
State variables:
x_e: x-position, in Earth axes. [meters]
y_e: y-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
speed: Speed; equivalent to u_w, the x-velocity in wind axes. [m/s]
gamma: Flight path angle. [radians]
track: Track angle. [radians]
* Track of 0 == North == aligned with x_e axis
* Track of np.pi / 2 == East == aligned with y_e axis
Indirect control variables:
alpha: Angle of attack. [degrees]
beta: Sideslip angle. [degrees]
bank: Bank angle. [radians]
Control variables:
Fx_w: Force along the wind-x axis. [N]
Fy_w: Force along the wind-y axis. [N]
Fz_w: Force along the wind-z axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
y_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
speed: Union[float, np.ndarray] = 0,
gamma: Union[float, np.ndarray] = 0,
track: Union[float, np.ndarray] = 0,
alpha: Union[float, np.ndarray] = 0,
beta: Union[float, np.ndarray] = 0,
bank: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = y_e
self.z_e = z_e
self.speed = speed
self.gamma = gamma
self.track = track
# Initialize indirect control variables
self.alpha = alpha
self.beta = beta
self.bank = bank
# Initialize control variables
self.Fx_w = 0
self.Fy_w = 0
self.Fz_w = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e" : self.x_e,
"y_e" : self.y_e,
"z_e" : self.z_e,
"speed": self.speed,
"gamma": self.gamma,
"track": self.track,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"alpha": self.alpha,
"beta" : self.beta,
"bank" : self.bank,
"Fx_w" : self.Fx_w,
"Fy_w" : self.Fy_w,
"Fz_w" : self.Fz_w,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
d_speed = self.Fx_w / self.mass_props.mass
sb = np.sin(self.bank)
cb = np.cos(self.bank)
force_gamma_direction = -cb * self.Fz_w - sb * self.Fy_w # Force in the direction that acts to increase gamma
force_track_direction = -sb * self.Fz_w + cb * self.Fy_w # Force in the direction that acts to increase track
d_gamma = force_gamma_direction / self.mass_props.mass / self.speed
d_track = force_track_direction / self.mass_props.mass / self.speed / np.cos(self.gamma)
return {
"x_e" : self.u_e,
"y_e" : self.v_e,
"z_e" : self.w_e,
"speed": d_speed,
"gamma": d_gamma,
"track": d_track,
}
@property
def u_e(self):
return self.speed * np.cos(self.gamma) * np.cos(self.track)
@property
def v_e(self):
return self.speed * np.cos(self.gamma) * np.sin(self.track)
@property
def w_e(self):
return -self.speed * np.sin(self.gamma)
def convert_axes(self,
x_from: float,
y_from: float,
z_from: float,
from_axes: str,
to_axes: str,
) -> Tuple[float, float, float]:
if from_axes == to_axes:
return x_from, y_from, z_from
if (from_axes == "earth" or to_axes == "earth"):
rot_w_to_e = np.rotation_matrix_from_euler_angles(
roll_angle=self.bank,
pitch_angle=self.gamma,
yaw_angle=self.track,
as_array=False
)
if from_axes == "wind":
x_w = x_from
y_w = y_from
z_w = z_from
elif from_axes == "earth":
x_w = rot_w_to_e[0][0] * x_from + rot_w_to_e[1][0] * y_from + rot_w_to_e[2][0] * z_from
y_w = rot_w_to_e[0][1] * x_from + rot_w_to_e[1][1] * y_from + rot_w_to_e[2][1] * z_from
z_w = rot_w_to_e[0][2] * x_from + rot_w_to_e[1][2] * y_from + rot_w_to_e[2][2] * z_from
else:
x_w, y_w, z_w = self.op_point.convert_axes(
x_from, y_from, z_from,
from_axes=from_axes, to_axes="wind"
)
if to_axes == "wind":
x_to = x_w
y_to = y_w
z_to = z_w
elif to_axes == "earth":
x_to = rot_w_to_e[0][0] * x_w + rot_w_to_e[0][1] * y_w + rot_w_to_e[0][2] * z_w
y_to = rot_w_to_e[1][0] * x_w + rot_w_to_e[1][1] * y_w + rot_w_to_e[1][2] * z_w
z_to = rot_w_to_e[2][0] * x_w + rot_w_to_e[2][1] * y_w + rot_w_to_e[2][2] * z_w
else:
x_to, y_to, z_to = self.op_point.convert_axes(
x_w, y_w, z_w,
from_axes="wind", to_axes=to_axes
)
return x_to, y_to, z_to
def add_force(self,
Fx: Union[float, np.ndarray] = 0,
Fy: Union[float, np.ndarray] = 0,
Fz: Union[float, np.ndarray] = 0,
axes="wind",
) -> None:
Fx_w, Fy_w, Fz_w = self.convert_axes(
x_from=Fx,
y_from=Fy,
z_from=Fz,
from_axes=axes,
to_axes="wind"
)
self.Fx_w = self.Fx_w + Fx_w
self.Fy_w = self.Fy_w + Fy_w
self.Fz_w = self.Fz_w + Fz_w
if __name__ == '__main__':
dyn = DynamicsPointMass3DSpeedGammaTrack() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/point_mass/point_3D/speed_gamma_track.py | speed_gamma_track.py |
from aerosandbox.dynamics.point_mass.common_point_mass import _DynamicsPointMassBaseClass
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass3DCartesian(_DynamicsPointMassBaseClass):
"""
Dynamics instance:
* simulating a point mass
* in 3D
* with velocity parameterized in Cartesian coordinates
State variables:
x_e: x-position, in Earth axes. [meters]
y_e: y-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
u_e: x-velocity, in Earth axes. [m/s]
v_e: v-velocity, in Earth axes. [m/s]
w_e: z-velocity, in Earth axes. [m/s]
Indirect control variables:
alpha: Angle of attack. [degrees]
beta: Sideslip angle. [degrees]
bank: Bank angle. [radians]
Control variables:
Fx_e: Force along the Earth-x axis. [N]
Fy_e: Force along the Earth-y axis. [N]
Fz_e: Force along the Earth-z axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
y_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
u_e: Union[float, np.ndarray] = 0,
v_e: Union[float, np.ndarray] = 0,
w_e: Union[float, np.ndarray] = 0,
alpha: Union[float, np.ndarray] = 0,
beta: Union[float, np.ndarray] = 0,
bank: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = y_e
self.z_e = z_e
self.u_e = u_e
self.v_e = v_e
self.w_e = w_e
# Initialize indirect control variables
self.alpha = alpha
self.beta = beta
self.bank = bank
# Initialize control variables
self.Fx_e = 0
self.Fy_e = 0
self.Fz_e = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e": self.x_e,
"y_e": self.y_e,
"z_e": self.z_e,
"u_e": self.u_e,
"v_e": self.v_e,
"w_e": self.w_e,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"alpha": self.alpha,
"beta" : self.beta,
"bank" : self.bank,
"Fx_e" : self.Fx_e,
"Fy_e" : self.Fy_e,
"Fz_e" : self.Fz_e,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e": self.u_e,
"y_e": self.v_e,
"z_e": self.w_e,
"u_e": self.Fx_e / self.mass_props.mass,
"v_e": self.Fy_e / self.mass_props.mass,
"w_e": self.Fz_e / self.mass_props.mass,
}
@property
def speed(self) -> float:
return (
self.u_e ** 2 +
self.v_e ** 2 +
self.w_e ** 2
) ** 0.5
@property
def gamma(self):
"""
Returns the flight path angle, in radians.
Positive flight path angle indicates positive vertical speed.
"""
return np.arctan2(
-self.w_e,
(
self.u_e ** 2 +
self.v_e ** 2
) ** 0.5
)
@property
def track(self):
"""
Returns the track angle, in radians.
* Track of 0 == North == aligned with x_e axis
* Track of np.pi / 2 == East == aligned with y_e axis
"""
return np.arctan2(
self.v_e,
self.u_e,
)
def convert_axes(self,
x_from: float,
y_from: float,
z_from: float,
from_axes: str,
to_axes: str,
) -> Tuple[float, float, float]:
if from_axes == to_axes:
return x_from, y_from, z_from
if not (from_axes == "earth" and to_axes == "earth"):
rot_w_to_e = np.rotation_matrix_from_euler_angles(
roll_angle=self.bank,
pitch_angle=self.gamma,
yaw_angle=self.track,
as_array=False
)
if from_axes == "earth":
x_e = x_from
y_e = y_from
z_e = z_from
elif from_axes == "wind":
x_e = rot_w_to_e[0][0] * x_from + rot_w_to_e[0][1] * y_from + rot_w_to_e[0][2] * z_from
y_e = rot_w_to_e[1][0] * x_from + rot_w_to_e[1][1] * y_from + rot_w_to_e[1][2] * z_from
z_e = rot_w_to_e[2][0] * x_from + rot_w_to_e[2][1] * y_from + rot_w_to_e[2][2] * z_from
else:
x_w, y_w, z_w = self.op_point.convert_axes(
x_from, y_from, z_from,
from_axes=from_axes, to_axes="wind"
)
x_e = rot_w_to_e[0][0] * x_w + rot_w_to_e[0][1] * y_w + rot_w_to_e[0][2] * z_w
y_e = rot_w_to_e[1][0] * x_w + rot_w_to_e[1][1] * y_w + rot_w_to_e[1][2] * z_w
z_e = rot_w_to_e[2][0] * x_w + rot_w_to_e[2][1] * y_w + rot_w_to_e[2][2] * z_w
if to_axes == "earth":
x_to = x_e
y_to = y_e
z_to = z_e
elif to_axes == "wind":
x_to = rot_w_to_e[0][0] * x_e + rot_w_to_e[1][0] * y_e + rot_w_to_e[2][0] * z_e
y_to = rot_w_to_e[0][1] * x_e + rot_w_to_e[1][1] * y_e + rot_w_to_e[2][1] * z_e
z_to = rot_w_to_e[0][2] * x_e + rot_w_to_e[1][2] * y_e + rot_w_to_e[2][2] * z_e
else:
x_w = rot_w_to_e[0][0] * x_e + rot_w_to_e[1][0] * y_e + rot_w_to_e[2][0] * z_e
y_w = rot_w_to_e[0][1] * x_e + rot_w_to_e[1][1] * y_e + rot_w_to_e[2][1] * z_e
z_w = rot_w_to_e[0][2] * x_e + rot_w_to_e[1][2] * y_e + rot_w_to_e[2][2] * z_e
x_to, y_to, z_to = self.op_point.convert_axes(
x_w, y_w, z_w,
from_axes="wind", to_axes=to_axes
)
return x_to, y_to, z_to
def add_force(self,
Fx: Union[float, np.ndarray] = 0,
Fy: Union[float, np.ndarray] = 0,
Fz: Union[float, np.ndarray] = 0,
axes="earth",
) -> None:
Fx_e, Fy_e, Fz_e = self.convert_axes(
x_from=Fx,
y_from=Fy,
z_from=Fz,
from_axes=axes,
to_axes="earth"
)
self.Fx_e = self.Fx_e + Fx_e
self.Fy_e = self.Fy_e + Fy_e
self.Fz_e = self.Fz_e + Fz_e
if __name__ == '__main__':
dyn = DynamicsPointMass3DCartesian() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/point_mass/point_3D/cartesian.py | cartesian.py |
from aerosandbox.dynamics.point_mass.point_3D.cartesian import DynamicsPointMass3DCartesian
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass2DCartesian(DynamicsPointMass3DCartesian):
"""
Dynamics instance:
* simulating a point mass
* in 2D
* with velocity parameterized in Cartesian coordinates
State variables:
x_e: x-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
u_e: x-velocity, in Earth axes. [m/s]
w_e: z-velocity, in Earth axes. [m/s]
Indirect control variables:
alpha: Angle of attack. [degrees]
Control variables:
Fx_e: Force along the Earth-x axis. [N]
Fz_e: Force along the Earth-z axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
u_e: Union[float, np.ndarray] = 0,
w_e: Union[float, np.ndarray] = 0,
alpha: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = 0
self.z_e = z_e
self.u_e = u_e
self.v_e = 0
self.w_e = w_e
# Initialize indirect control variables
self.alpha = alpha
self.beta = 0
self.bank = 0
# Initialize control variables
self.Fx_e = 0
self.Fy_e = 0
self.Fz_e = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e": self.x_e,
"z_e": self.z_e,
"u_e": self.u_e,
"w_e": self.w_e,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"alpha": self.alpha,
"Fx_e" : self.Fx_e,
"Fz_e" : self.Fz_e,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
derivatives = super().state_derivatives()
return {
k: derivatives[k] for k in self.state.keys()
}
if __name__ == '__main__':
dyn = DynamicsPointMass2DCartesian() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/point_mass/point_2D/cartesian.py | cartesian.py |
from aerosandbox.dynamics.point_mass.point_3D.speed_gamma_track import DynamicsPointMass3DSpeedGammaTrack
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass2DSpeedGamma(DynamicsPointMass3DSpeedGammaTrack):
"""
Dynamics instance:
* simulating a point mass
* in 2D
* with velocity parameterized in speed-gamma space.
State variables:
x_e: x-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
speed: Speed; equivalent to u_w, the x-velocity in wind axes. [m/s]
gamma: Flight path angle. [rad]
Indirect control variables:
alpha: Angle of attack. [degrees]
Control variables:
Fx_w: Force along the wind-x axis. [N]
Fz_w: Force along the wind-z axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
speed: Union[float, np.ndarray] = 0,
gamma: Union[float, np.ndarray] = 0,
alpha: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = 0
self.z_e = z_e
self.speed = speed
self.gamma = gamma
self.track = 0
self.bank = 0
# Initialize indirect control variables
self.alpha = alpha
self.beta = 0
self.bank = 0
# Initialize control variables
self.Fx_w = 0
self.Fy_w = 0
self.Fz_w = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e" : self.x_e,
"z_e" : self.z_e,
"speed": self.speed,
"gamma": self.gamma,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"alpha": self.alpha,
"Fx_w" : self.Fx_w,
"Fz_w" : self.Fz_w,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
derivatives = super().state_derivatives()
return {
k: derivatives[k] for k in self.state.keys()
}
if __name__ == '__main__':
dyn = DynamicsPointMass2DSpeedGamma() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/point_mass/point_2D/speed_gamma.py | speed_gamma.py |
from aerosandbox.geometry.airplane import Airplane
from aerosandbox.performance import OperatingPoint
from aerosandbox.weights import MassProperties
import aerosandbox.numpy as np
def get_modes(
airplane: Airplane,
op_point: OperatingPoint,
mass_props: MassProperties,
aero,
g=9.81,
):
Q = op_point.dynamic_pressure()
S = airplane.s_ref
c = airplane.c_ref
b = airplane.b_ref
QS = Q * S
m = mass_props.mass
Ixx = mass_props.Ixx
Iyy = mass_props.Iyy
Izz = mass_props.Izz
u0 = op_point.velocity
# X_u = QS / m / u0 * Cxu
# X_w = QS / m / u0 * Cxa
# X_q = QS / m * c / (2 * u0) * Cxq
# Z_u = QS / m / u0 * Czu
# Z_w = QS / m / u0 * Cza
# Z_q = QS / m * c / (2 * u0) * Czq
# M_u = QS * c / Iyy / u0 * Cmu
# M_w = QS * c / Iyy / u0 * Cma
# M_q = QS * c / Iyy * c / (2 * u0) * Cmq
def get_mode_info(
sigma,
omega_squared,
):
is_oscillatory = omega_squared > 0
mode_info = {
"eigenvalue_real": sigma + np.where(
is_oscillatory,
0,
np.abs(omega_squared + 1e-100) ** 0.5,
),
"eigenvalue_imag": np.where(
is_oscillatory,
np.abs(omega_squared + 1e-100) ** 0.5,
0,
),
}
mode_info['damping_ratio'] = (
-mode_info['eigenvalue_real'] /
(mode_info['eigenvalue_real'] ** 2 + mode_info['eigenvalue_imag'] ** 2) ** 0.5
)
return mode_info
modes = {}
##### Longitudinal modes
# TODO add longitudinal
# ### Phugoid
# modes['phugoid'] = get_mode_info(
# sigma=0.5 * X_u,
# omega_squared=-(X_u ** 2) / 4 - g * Z_u / u0
# )
# modes['phugoid'] = {
# "frequency_approx" : 2 ** 0.5 * g / u0,
# "damping_ratio_approx": 2 ** -0.5 * CD / CL
# }
#
# ### Short-period
# modes['short_period'] = get_mode_info(
# sigma=0.5 * M_q,
# omega_squared=-(M_q ** 2) / 4 - u0 * M_w
# )
##### Lateral modes
### Roll subsidence
modes['roll_subsidence'] = {
"eigenvalue_real": (
QS * b ** 2 / (2 * Ixx * u0) * aero["Clp"]
),
"eigenvalue_imag": 0,
"damping_ratio" : 0,
}
### Dutch roll
modes['dutch_roll'] = get_mode_info(
sigma=(
QS * b ** 2 /
(2 * Izz * u0) *
(aero["Cnr"] + Izz / (m * b ** 2) * aero["CYb"])
),
omega_squared=(
QS * b / Izz *
(
aero["Cnb"] + (
op_point.atmosphere.density() * S * b / (4 * m) *
(aero["CYb"] * aero["Cnr"] - aero["Cnb"] * aero["CYr"])
)
)
)
)
### Spiral
modes['spiral'] = {
"eigenvalue_real": (
QS * b ** 2 / (2 * Izz * u0) *
(aero["Cnr"] - aero["Cnb"] * aero["Clr"] / aero["Clb"])
),
"eigenvalue_imag": 0,
"damping_ratio" : 0,
}
return modes
if __name__ == '__main__':
import aerosandbox as asb
import aerosandbox.numpy as np
from pprint import pprint
pprint(
get_modes(
airplane=asb.Airplane(
s_ref=9,
c_ref=0.90,
b_ref=10,
),
op_point=asb.OperatingPoint(velocity=10),
mass_props=asb.MassProperties(mass=1, Ixx=1, Iyy=1, Izz=1),
aero=dict(
CL=0.46,
CD=0.11,
Cm=0.141,
CLa=5.736,
# CYa = 0,
# Cla = 0,
Cma=-1.59,
# Cna = 0,
# CLb = 0,
CYb=-0.380,
Clb=-0.208,
# Cmb=0,
Cnb=0.0294,
# CLp =0,
CYp=-0.325,
Clp=-0.593,
# Cmp=0,
Cnp=-0.041,
CLq=10.41,
# CYq=0,
# Clq=0,
Cmq=-25.05,
# Cnq=0,
# CLr=0,
CYr=0.194,
Clr=0.143,
# Cmr=0,
Cnr=-0.048
)
)
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/flight_dynamics/airplane.py | airplane.py |
import aerosandbox.numpy as np
from aerosandbox.dynamics.point_mass.common_point_mass import _DynamicsPointMassBaseClass
from abc import ABC, abstractmethod, abstractproperty
from typing import Union, Tuple
from aerosandbox import OperatingPoint, Atmosphere
class _DynamicsRigidBodyBaseClass(_DynamicsPointMassBaseClass, ABC):
# TODO: add method for force at offset (i.e., add moment and force)
@abstractmethod
def add_moment(self,
Mx: Union[float, np.ndarray] = 0,
My: Union[float, np.ndarray] = 0,
Mz: Union[float, np.ndarray] = 0,
axes="body",
) -> None:
"""
Adds a moment (in whichever axis system you choose) to this Dynamics instance.
Args:
Mx: Moment about the x-axis in the axis system chosen. Assumed these moments are applied about the center of mass. [Nm]
My: Moment about the y-axis in the axis system chosen. Assumed these moments are applied about the center of mass. [Nm]
Mz: Moment about the z-axis in the axis system chosen. Assumed these moments are applied about the center of mass. [Nm]
axes: The axis system that the specified moment is in. One of:
* "geometry"
* "body"
* "wind"
* "stability"
* "earth"
Returns: None (in-place)
"""
pass
@property
def op_point(self):
return OperatingPoint(
atmosphere=Atmosphere(altitude=self.altitude),
velocity=self.speed,
alpha=self.alpha,
beta=self.beta,
p=self.p,
q=self.q,
r=self.r,
)
@property
def alpha(self):
"""The angle of attack, in degrees."""
return np.arctan2d(
self.w_b,
self.u_b
)
@property
def beta(self):
"""The sideslip angle, in degrees."""
return np.arctan2d(
self.v_b,
(
self.u_b ** 2 +
self.w_b ** 2
) ** 0.5
)
@property
def rotational_kinetic_energy(self):
return 0.5 * (
self.mass_props.Ixx * self.p ** 2 +
self.mass_props.Iyy * self.q ** 2 +
self.mass_props.Izz * self.r ** 2
)
@property
def kinetic_energy(self):
return self.translational_kinetic_energy + self.rotational_kinetic_energy | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/rigid_body/common_rigid_body.py | common_rigid_body.py |
from aerosandbox.dynamics.rigid_body.common_rigid_body import _DynamicsRigidBodyBaseClass
import aerosandbox.numpy as np
from aerosandbox.weights.mass_properties import MassProperties
from typing import Union
class DynamicsRigidBody3DBodyEuler(_DynamicsRigidBodyBaseClass):
"""
Dynamics instance:
* simulating a rigid body
* in 3D
* with velocity parameterized in body axes
* and angle parameterized in Euler angles
State variables:
x_e: x-position, in Earth axes. [meters]
y_e: y-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
u_b: x-velocity, in body axes. [m/s]
v_b: y-velocity, in body axes. [m/s]
w_b: z-velocity, in body axes. [m/s]
phi: roll angle. Uses yaw-pitch-roll Euler angle convention. [rad]
theta: pitch angle. Uses yaw-pitch-roll Euler angle convention. [rad]
psi: yaw angle. Uses yaw-pitch-roll Euler angle convention. [rad]
p: x-angular-velocity, in body axes. [rad/sec]
q: y-angular-velocity, in body axes. [rad/sec]
r: z-angular-velocity, in body axes. [rad/sec]
Control variables:
Fx_b: Force along the body-x axis. [N]
Fy_b: Force along the body-y axis. [N]
Fz_b: Force along the body-z axis. [N]
Mx_b: Moment about the body-x axis. [Nm]
My_b: Moment about the body-y axis. [Nm]
Mz_b: Moment about the body-z axis. [Nm]
hx_b: Angular momentum (e.g., propellers) about the body-x axis. [kg*m^2/sec]
hy_b: Angular momentum (e.g., propellers) about the body-y axis. [kg*m^2/sec]
hz_b: Angular momentum (e.g., propellers) about the body-z axis. [kg*m^2/sec]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
y_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
u_b: Union[float, np.ndarray] = 0,
v_b: Union[float, np.ndarray] = 0,
w_b: Union[float, np.ndarray] = 0,
phi: Union[float, np.ndarray] = 0,
theta: Union[float, np.ndarray] = 0,
psi: Union[float, np.ndarray] = 0,
p: Union[float, np.ndarray] = 0,
q: Union[float, np.ndarray] = 0,
r: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = y_e
self.z_e = z_e
self.u_b = u_b
self.v_b = v_b
self.w_b = w_b
self.phi = phi
self.theta = theta
self.psi = psi
self.p = p
self.q = q
self.r = r
# Initialize control variables
self.Fx_b = 0
self.Fy_b = 0
self.Fz_b = 0
self.Mx_b = 0
self.My_b = 0
self.Mz_b = 0
self.hx_b = 0
self.hy_b = 0
self.hz_b = 0
@property
def state(self):
return {
"x_e" : self.x_e,
"y_e" : self.y_e,
"z_e" : self.z_e,
"u_b" : self.u_b,
"v_b" : self.v_b,
"w_b" : self.w_b,
"phi" : self.phi,
"theta": self.theta,
"psi" : self.psi,
"p" : self.p,
"q" : self.q,
"r" : self.r,
}
@property
def control_variables(self):
return {
"Fx_b": self.Fx_b,
"Fy_b": self.Fy_b,
"Fz_b": self.Fz_b,
"Mx_b": self.Mx_b,
"My_b": self.My_b,
"Mz_b": self.Mz_b,
"hx_b": self.hx_b,
"hy_b": self.hy_b,
"hz_b": self.hz_b,
}
def state_derivatives(self):
"""
Computes the state derivatives (i.e. equations of motion) for a body in 3D space.
Based on Section 9.8.2 of Flight Vehicle Aerodynamics by Mark Drela.
Returns:
Time derivatives of each of the 12 state variables, given in a dictionary:
{
"xe" : d_xe,
"ye" : d_ye,
"ze" : d_ze,
"u" : d_u,
"v" : d_v,
"w" : d_w,
"phi" : d_phi,
"theta": d_theta,
"psi" : d_psi,
"p" : d_p,
"q" : d_q,
"r" : d_r,
}
"""
### Shorthand everything so we're not constantly "self."-ing:
u = self.u_b
v = self.v_b
w = self.w_b
phi = self.phi
theta = self.theta
psi = self.psi
p = self.p
q = self.q
r = self.r
X = self.Fx_b
Y = self.Fy_b
Z = self.Fz_b
L = self.Mx_b
M = self.My_b
N = self.Mz_b
mass = self.mass_props.mass
Ixx = self.mass_props.Ixx
Iyy = self.mass_props.Iyy
Izz = self.mass_props.Izz
Ixy = self.mass_props.Ixy
Iyz = self.mass_props.Iyz
Ixz = self.mass_props.Ixz
hx = self.hx_b
hy = self.hy_b
hz = self.hz_b
### Trig Shorthands
def sincos(x):
try:
x = np.mod(x, 2 * np.pi)
one = np.ones_like(x)
zero = np.zeros_like(x)
if np.allclose(x, 0) or np.allclose(x, 2 * np.pi):
sin = zero
cos = one
elif np.allclose(x, np.pi / 2):
sin = one
cos = zero
elif np.allclose(x, np.pi):
sin = zero
cos = -one
elif np.allclose(x, 3 * np.pi / 2):
sin = -one
cos = zero
else:
raise ValueError()
except Exception:
sin = np.sin(x)
cos = np.cos(x)
return sin, cos
# Do the trig
sphi, cphi = sincos(phi)
sthe, cthe = sincos(theta)
spsi, cpsi = sincos(psi)
##### Equations of Motion
### Position derivatives
d_xe = (
(cthe * cpsi) * u +
(sphi * sthe * cpsi - cphi * spsi) * v +
(cphi * sthe * cpsi + sphi * spsi) * w
)
d_ye = (
(cthe * spsi) * u +
(sphi * sthe * spsi + cphi * cpsi) * v +
(cphi * sthe * spsi - sphi * cpsi) * w
)
d_ze = (
(-sthe) * u +
(sphi * cthe) * v +
(cphi * cthe) * w
)
### Velocity derivatives
d_u = (
(X / mass) -
q * w +
r * v
)
d_v = (
(Y / mass) -
r * u +
p * w
)
d_w = (
(Z / mass) -
p * v +
q * u
)
### Angle derivatives
if np.all(cthe == 0):
d_phi = 0
else:
d_phi = (
p +
q * sphi * sthe / cthe +
r * cphi * sthe / cthe
)
d_theta = (
q * cphi -
r * sphi
)
if np.all(cthe == 0):
d_psi = 0
else:
d_psi = (
q * sphi / cthe +
r * cphi / cthe
)
### Angular velocity derivatives
RHS_L = (
L -
(Izz - Iyy) * q * r -
Iyz * (q ** 2 - r ** 2) -
Ixz * p * q +
Ixy * p * r -
hz * q +
hy * r
)
RHS_M = (
M -
(Ixx - Izz) * r * p -
Ixz * (r ** 2 - p ** 2) -
Ixy * q * r +
Iyz * q * p -
hx * r +
hz * p
)
RHS_N = (
N -
(Iyy - Ixx) * p * q -
Ixy * (p ** 2 - q ** 2) -
Iyz * r * p +
Ixz * r * q -
hy * p +
hx * q
)
i11, i22, i33, i12, i23, i13 = np.linalg.inv_symmetric_3x3(Ixx, Iyy, Izz, Ixy, Iyz, Ixz)
d_p = i11 * RHS_L + i12 * RHS_M + i13 * RHS_N
d_q = i12 * RHS_L + i22 * RHS_M + i23 * RHS_N
d_r = i13 * RHS_L + i23 * RHS_M + i33 * RHS_N
return {
"x_e" : d_xe,
"y_e" : d_ye,
"z_e" : d_ze,
"u_b" : d_u,
"v_b" : d_v,
"w_b" : d_w,
"phi" : d_phi,
"theta": d_theta,
"psi" : d_psi,
"p" : d_p,
"q" : d_q,
"r" : d_r,
}
def convert_axes(self,
x_from, y_from, z_from,
from_axes: str,
to_axes: str,
):
"""
Converts a vector [x_from, y_from, z_from], as given in the `from_axes` frame, to an equivalent vector [x_to,
y_to, z_to], as given in the `to_axes` frame.
Identical to OperatingPoint.convert_axes(), but adds in "earth" as a valid axis frame. For more documentation,
see the docstring of OperatingPoint.convert_axes().
Both `from_axes` and `to_axes` should be a string, one of:
* "geometry"
* "body"
* "wind"
* "stability"
* "earth"
Args:
x_from: x-component of the vector, in `from_axes` frame.
y_from: y-component of the vector, in `from_axes` frame.
z_from: z-component of the vector, in `from_axes` frame.
from_axes: The axes to convert from.
to_axes: The axes to convert to.
Returns: The x-, y-, and z-components of the vector, in `to_axes` frame. Given as a tuple.
"""
if from_axes == to_axes:
return x_from, y_from, z_from
if from_axes == "earth" or to_axes == "earth":
### Trig Shorthands
def sincos(x):
try:
x = np.mod(x, 2 * np.pi)
one = np.ones_like(x)
zero = np.zeros_like(x)
if np.allclose(x, 0) or np.allclose(x, 2 * np.pi):
sin = zero
cos = one
elif np.allclose(x, np.pi / 2):
sin = one
cos = zero
elif np.allclose(x, np.pi):
sin = zero
cos = -one
elif np.allclose(x, 3 * np.pi / 2):
sin = -one
cos = zero
else:
raise ValueError()
except Exception:
sin = np.sin(x)
cos = np.cos(x)
return sin, cos
# Do the trig
sphi, cphi = sincos(self.phi)
sthe, cthe = sincos(self.theta)
spsi, cpsi = sincos(self.psi)
if from_axes == "earth":
x_b = (
(cthe * cpsi) * x_from +
(cthe * spsi) * y_from +
(-sthe) * z_from
)
y_b = (
(sphi * sthe * cpsi - cphi * spsi) * x_from +
(sphi * sthe * spsi + cphi * cpsi) * y_from +
(sphi * cthe) * z_from
)
z_b = (
(cphi * sthe * cpsi + sphi * spsi) * x_from +
(cphi * sthe * spsi - sphi * cpsi) * y_from +
(cphi * cthe) * z_from
)
else:
x_b, y_b, z_b = self.op_point.convert_axes(
x_from, y_from, z_from,
from_axes=from_axes, to_axes="body"
)
if to_axes == "earth":
x_to = (
(cthe * cpsi) * x_b +
(sphi * sthe * cpsi - cphi * spsi) * y_b +
(cphi * sthe * cpsi + sphi * spsi) * z_b
)
y_to = (
(cthe * spsi) * x_b +
(sphi * sthe * spsi + cphi * cpsi) * y_b +
(cphi * sthe * spsi - sphi * cpsi) * z_b
)
z_to = (
(-sthe) * x_b +
(sphi * cthe) * y_b +
(cphi * cthe) * z_b
)
else:
x_to, y_to, z_to = self.op_point.convert_axes(
x_b, y_b, z_b,
from_axes="body", to_axes=to_axes
)
return x_to, y_to, z_to
def add_force(self,
Fx: Union[float, np.ndarray] = 0,
Fy: Union[float, np.ndarray] = 0,
Fz: Union[float, np.ndarray] = 0,
axes="body",
):
Fx_b, Fy_b, Fz_b = self.convert_axes(
x_from=Fx,
y_from=Fy,
z_from=Fz,
from_axes=axes,
to_axes="body"
)
self.Fx_b = self.Fx_b + Fx_b
self.Fy_b = self.Fy_b + Fy_b
self.Fz_b = self.Fz_b + Fz_b
def add_moment(self,
Mx: Union[float, np.ndarray] = 0,
My: Union[float, np.ndarray] = 0,
Mz: Union[float, np.ndarray] = 0,
axes="body",
):
Mx_b, My_b, Mz_b = self.convert_axes(
x_from=Mx,
y_from=My,
z_from=Mz,
from_axes=axes,
to_axes="body"
)
self.Mx_b = self.Mx_b + Mx_b
self.My_b = self.My_b + My_b
self.Mz_b = self.Mz_b + Mz_b
@property
def speed(self):
"""The speed of the object, expressed as a scalar."""
return (
self.u_b ** 2 +
self.v_b ** 2 +
self.w_b ** 2
) ** 0.5
@property
def alpha(self):
"""The angle of attack, in degrees."""
return np.arctan2d(
self.w_b,
self.u_b
)
@property
def beta(self):
"""The sideslip angle, in degrees."""
return np.arctan2d(
self.v_b,
(
self.u_b ** 2 +
self.w_b ** 2
) ** 0.5
)
if __name__ == '__main__':
import aerosandbox as asb
dyn = DynamicsRigidBody3DBodyEuler(
mass_props=asb.MassProperties(
mass=1
)
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/rigid_body/rigid_3D/body_euler.py | body_euler.py |
from aerosandbox.dynamics.rigid_body.rigid_3D.body_euler import DynamicsRigidBody3DBodyEuler
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsRigidBody2DBody(DynamicsRigidBody3DBodyEuler):
"""
Dynamics instance:
* simulating a rigid body
* in 2D
* with velocity parameterized in body axes
State variables:
x_e: x-position, in Earth axes. [meters]
z_e: z-position, in Earth axes. [meters]
u_b: x-velocity, in body axes. [m/s]
w_b: z-velocity, in body axes. [m/s]
theta: pitch angle. [rad]
q: y-angular-velocity, in body axes. [rad/sec]
Control variables:
Fx_b: Force along the body-x axis. [N]
Fz_b: Force along the body-z axis. [N]
My_b: Moment about the body-y axis. [Nm]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
z_e: Union[float, np.ndarray] = 0,
u_b: Union[float, np.ndarray] = 0,
w_b: Union[float, np.ndarray] = 0,
theta: Union[float, np.ndarray] = 0,
q: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = 0
self.z_e = z_e
self.u_b = u_b
self.v_b = 0
self.w_b = w_b
self.phi = 0
self.theta = theta
self.psi = 0
self.p = 0
self.q = q
self.r = 0
# Initialize control variables
self.Fx_b = 0
self.Fy_b = 0
self.Fz_b = 0
self.Mx_b = 0
self.My_b = 0
self.Mz_b = 0
self.hx_b = 0
self.hy_b = 0
self.hz_b = 0
@property
def state(self):
return {
"x_e" : self.x_e,
"z_e" : self.z_e,
"u_b" : self.u_b,
"w_b" : self.w_b,
"theta": self.theta,
"q" : self.q,
}
@property
def control_variables(self):
return {
"Fx_b": self.Fx_b,
"Fz_b": self.Fz_b,
"My_b": self.My_b,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
derivatives = super().state_derivatives()
return {
k: derivatives[k] for k in self.state.keys()
}
if __name__ == '__main__':
dyn = DynamicsRigidBody2DBody() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/rigid_body/rigid_2D/body.py | body.py |
import aerosandbox as asb
import aerosandbox.numpy as np
import pytest
def test_quadcopter_navigation():
opti = asb.Opti()
N = 300
time_final = 1
time = np.linspace(0, time_final, N)
left_thrust = opti.variable(init_guess=0.5, scale=1, n_vars=N, lower_bound=0, upper_bound=1)
right_thrust = opti.variable(init_guess=0.5, scale=1, n_vars=N, lower_bound=0, upper_bound=1)
mass = 0.1
dyn = asb.FreeBodyDynamics(
opti_to_add_constraints_to=opti,
time=time,
xe=opti.variable(init_guess=np.linspace(0, 1, N)),
ze=opti.variable(init_guess=np.linspace(0, -1, N)),
u=opti.variable(init_guess=0, n_vars=N),
w=opti.variable(init_guess=0, n_vars=N),
theta=opti.variable(init_guess=np.linspace(np.pi / 2, np.pi / 2, N)),
q=opti.variable(init_guess=0, n_vars=N),
X=left_thrust + right_thrust,
M=(right_thrust - left_thrust) * 0.1 / 2,
mass=mass,
Iyy=0.5 * mass * 0.1 ** 2,
g=9.81,
)
opti.subject_to([ # Starting state
dyn.xe[0] == 0,
dyn.ze[0] == 0,
dyn.u[0] == 0,
dyn.w[0] == 0,
dyn.theta[0] == np.radians(90),
dyn.q[0] == 0,
])
opti.subject_to([ # Final state
dyn.xe[-1] == 1,
dyn.ze[-1] == -1,
dyn.u[-1] == 0,
dyn.w[-1] == 0,
dyn.theta[-1] == np.radians(90),
dyn.q[-1] == 0,
])
effort = np.sum( # The average "effort per second", where effort is integrated as follows:
np.trapz(left_thrust ** 2 + right_thrust ** 2) * np.diff(time)
) / time_final
opti.minimize(effort)
sol = opti.solve()
dyn = sol(dyn)
assert sol.value(effort) == pytest.approx(0.714563, rel=0.01)
print(sol.value(effort))
def test_quadcopter_flip():
opti = asb.Opti()
N = 300
time_final = opti.variable(init_guess=1, lower_bound=0)
time = np.linspace(0, time_final, N)
left_thrust = opti.variable(init_guess=0.7, scale=1, n_vars=N, lower_bound=0, upper_bound=1)
right_thrust = opti.variable(init_guess=0.6, scale=1, n_vars=N, lower_bound=0, upper_bound=1)
mass = 0.1
dyn = asb.FreeBodyDynamics(
opti_to_add_constraints_to=opti,
time=time,
xe=opti.variable(init_guess=np.linspace(0, 1, N)),
ze=opti.variable(init_guess=0, n_vars=N),
u=opti.variable(init_guess=0, n_vars=N),
w=opti.variable(init_guess=0, n_vars=N),
theta=opti.variable(init_guess=np.linspace(np.pi / 2, np.pi / 2 - 2 * np.pi, N)),
q=opti.variable(init_guess=0, n_vars=N),
X=left_thrust + right_thrust,
M=(right_thrust - left_thrust) * 0.1 / 2,
mass=mass,
Iyy=0.5 * mass * 0.1 ** 2,
g=9.81,
)
opti.subject_to([ # Starting state
dyn.xe[0] == 0,
dyn.ze[0] == 0,
dyn.u[0] == 0,
dyn.w[0] == 0,
dyn.theta[0] == np.radians(90),
dyn.q[0] == 0,
])
opti.subject_to([ # Final state
dyn.xe[-1] == 1,
dyn.ze[-1] == 0,
dyn.u[-1] == 0,
dyn.w[-1] == 0,
dyn.theta[-1] == np.radians(90 - 360),
dyn.q[-1] == 0,
])
opti.minimize(time_final)
sol = opti.solve(verbose=False)
dyn = sol(dyn)
assert sol.value(time_final) == pytest.approx(0.824, abs=0.01)
if __name__ == '__main__':
pytest.main() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/dynamics/rigid_body/rigid_2D/ignore/quadcopter - work in progress.py | quadcopter - work in progress.py |
from aerosandbox.common import *
from aerosandbox.geometry import Airfoil
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_2D.singularities import calculate_induced_velocity_line_singularities
import aerosandbox.numpy as np
from typing import Union, List, Optional
class AirfoilInviscid(ImplicitAnalysis):
"""
An implicit analysis for inviscid analysis of an airfoil (or family of airfoils).
Key outputs:
* AirfoilInviscid.Cl
"""
@ImplicitAnalysis.initialize
def __init__(self,
airfoil: Union[Airfoil, List[Airfoil]],
op_point: OperatingPoint,
ground_effect: bool = False,
):
if isinstance(airfoil, Airfoil):
self.airfoils = [airfoil]
else:
self.airfoils = airfoil
self.op_point = op_point
self.ground_effect = ground_effect
self._setup_unknowns()
self._enforce_governing_equations()
self._calculate_forces()
def __repr__(self):
return self.__class__.__name__ + "(\n\t" + "\n\t".join([
f"airfoils={self.airfoils}",
f"op_point={self.op_point}",
]) + "\n)"
def _setup_unknowns(self):
for airfoil in self.airfoils:
airfoil.gamma = self.opti.variable(
init_guess=0,
scale=self.op_point.velocity,
n_vars=airfoil.n_points()
)
airfoil.sigma = np.zeros(airfoil.n_points())
def calculate_velocity(self,
x_field,
y_field,
) -> [np.ndarray, np.ndarray]:
### Analyze the freestream
u_freestream = self.op_point.velocity * np.cosd(self.op_point.alpha)
v_freestream = self.op_point.velocity * np.sind(self.op_point.alpha)
u_field = u_freestream
v_field = v_freestream
for airfoil in self.airfoils:
### Add in the influence of the vortices and sources on the airfoil surface
u_field_induced, v_field_induced = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=airfoil.x(),
y_panels=airfoil.y(),
gamma=airfoil.gamma,
sigma=airfoil.sigma,
)
u_field = u_field + u_field_induced
v_field = v_field + v_field_induced
### Add in the influence of a source across the open trailing-edge panel.
if airfoil.TE_thickness() != 0:
u_field_induced_TE, v_field_induced_TE = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=[airfoil.x()[0], airfoil.x()[-1]],
y_panels=[airfoil.y()[0], airfoil.y()[-1]],
gamma=[0, 0],
sigma=[airfoil.gamma[0], airfoil.gamma[0]]
)
u_field = u_field + u_field_induced_TE
v_field = v_field + v_field_induced_TE
if self.ground_effect:
### Add in the influence of the vortices and sources on the airfoil surface
u_field_induced, v_field_induced = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=airfoil.x(),
y_panels=-airfoil.y(),
gamma=-airfoil.gamma,
sigma=airfoil.sigma,
)
u_field = u_field + u_field_induced
v_field = v_field + v_field_induced
### Add in the influence of a source across the open trailing-edge panel.
if airfoil.TE_thickness() != 0:
u_field_induced_TE, v_field_induced_TE = calculate_induced_velocity_line_singularities(
x_field=x_field,
y_field=y_field,
x_panels=[airfoil.x()[0], airfoil.x()[-1]],
y_panels=-1 * np.array([airfoil.y()[0], airfoil.y()[-1]]),
gamma=[0, 0],
sigma=[airfoil.gamma[0], airfoil.gamma[0]]
)
u_field = u_field + u_field_induced_TE
v_field = v_field + v_field_induced_TE
return u_field, v_field
def _enforce_governing_equations(self):
for airfoil in self.airfoils:
### Compute normal velocities at the middle of each panel
x_midpoints = np.trapz(airfoil.x())
y_midpoints = np.trapz(airfoil.y())
u_midpoints, v_midpoints = self.calculate_velocity(
x_field=x_midpoints,
y_field=y_midpoints,
)
panel_dx = np.diff(airfoil.x())
panel_dy = np.diff(airfoil.y())
panel_length = (panel_dx ** 2 + panel_dy ** 2) ** 0.5
xp_hat_x = panel_dx / panel_length # x-coordinate of the xp_hat vector
xp_hat_y = panel_dy / panel_length # y-coordinate of the yp_hat vector
yp_hat_x = -xp_hat_y
yp_hat_y = xp_hat_x
normal_velocities = u_midpoints * yp_hat_x + v_midpoints * yp_hat_y
### Add in flow tangency constraint
self.opti.subject_to(normal_velocities == 0)
### Add in Kutta condition
self.opti.subject_to(airfoil.gamma[0] + airfoil.gamma[-1] == 0)
def _calculate_forces(self):
for airfoil in self.airfoils:
panel_dx = np.diff(airfoil.x())
panel_dy = np.diff(airfoil.y())
panel_length = (panel_dx ** 2 + panel_dy ** 2) ** 0.5
### Sum up the vorticity on this airfoil by integrating
airfoil.vorticity = np.sum(
(airfoil.gamma[1:] + airfoil.gamma[:-1]) / 2 *
panel_length
)
airfoil.Cl = 2 * airfoil.vorticity # TODO normalize by chord and freestream velocity etc.
self.total_vorticity = sum([airfoil.vorticity for airfoil in self.airfoils])
self.Cl = 2 * self.total_vorticity
def draw_streamlines(self, res=200, show=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 0.5)
xrng = np.diff(np.array(ax.get_xlim()))
yrng = np.diff(np.array(ax.get_ylim()))
x = np.linspace(*ax.get_xlim(), int(np.round(res * xrng / yrng)))
y = np.linspace(*ax.get_ylim(), res)
X, Y = np.meshgrid(x, y)
shape = X.shape
X = X.flatten()
Y = Y.flatten()
U, V = self.calculate_velocity(X, Y)
X = X.reshape(shape)
Y = Y.reshape(shape)
U = U.reshape(shape)
V = V.reshape(shape)
# NaN out any points inside the airfoil
for airfoil in self.airfoils:
contains = airfoil.contains_points(X, Y)
U[contains] = np.nan
V[contains] = np.nan
speed = (U ** 2 + V ** 2) ** 0.5
Cp = 1 - speed ** 2
### Draw the airfoils
for airfoil in self.airfoils:
plt.fill(airfoil.x(), airfoil.y(), "k", linewidth=0, zorder=4)
plt.streamplot(
x,
y,
U,
V,
color=speed,
density=2.5,
arrowsize=0,
cmap=plt.get_cmap('coolwarm_r'),
)
CB = plt.colorbar(
orientation="horizontal",
shrink=0.8,
aspect=40,
)
CB.set_label(r"Relative Airspeed ($U/U_\infty$)")
plt.clim(0.6, 1.4)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x/c$")
plt.ylabel(r"$y/c$")
plt.title(rf"Inviscid Airfoil: Flow Field")
plt.tight_layout()
if show:
plt.show()
def draw_cp(self, show=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
for airfoil in self.airfoils:
surface_speeds = airfoil.gamma
C_p = 1 - surface_speeds ** 2
plt.plot(airfoil.x(), C_p)
plt.ylim(-4, 1.1)
plt.gca().invert_yaxis()
plt.xlabel(r"$x/c$")
plt.ylabel(r"$C_p$")
plt.title(r"$C_p$ on Surface")
plt.tight_layout()
if show:
plt.show()
if __name__ == '__main__':
a = AirfoilInviscid(
airfoil=[
# Airfoil("naca4408")
# .repanel(50)
Airfoil("e423")
.repanel(n_points_per_side=50),
Airfoil("naca6408")
.repanel(n_points_per_side=50)
.scale(0.4, 0.4)
.rotate(np.radians(-25))
.translate(0.9, -0.05),
],
op_point=OperatingPoint(
velocity=1,
alpha=5,
)
)
a.draw_streamlines()
a.draw_cp()
from aerosandbox import Opti
opti2 = Opti()
b = AirfoilInviscid(
airfoil=Airfoil("naca4408"),
op_point=OperatingPoint(
velocity=1,
alpha=5
),
opti=opti2
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_inviscid.py | airfoil_inviscid.py |
from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airfoil
from typing import Union, List, Dict
import tempfile
import warnings
import os
class XFoil(ExplicitAnalysis):
"""
An interface to XFoil, a 2D airfoil analysis tool developed by Mark Drela at MIT.
Requires XFoil to be on your computer; XFoil is available here: https://web.mit.edu/drela/Public/web/xfoil/
It is recommended (but not required) that you add XFoil to your system PATH environment variable such that it can
be called with the command `xfoil`. If this is not the case, you need to specify the path to your XFoil
executable using the `xfoil_command` argument of the constructor.
Usage example:
>>> xf = XFoil(
>>> airfoil=Airfoil("naca2412").repanel(n_points_per_side=100),
>>> Re=1e6,
>>> )
>>>
>>> result_at_single_alpha = xf.alpha(5)
>>> result_at_several_CLs = xf.cl([0.5, 0.7, 0.8, 0.9])
>>> result_at_multiple_alphas = xf.alpha([3, 5, 60]) # Note: if a result does not converge (such as the 60 degree case here), it will not be included in the results.
"""
def __init__(self,
airfoil: Airfoil,
Re: float = 0.,
mach: float = 0.,
n_crit: float = 9.,
xtr_upper: float = 1.,
xtr_lower: float = 1.,
hinge_point_x: float = None,
full_potential: bool = False,
max_iter: int = 100,
xfoil_command: str = "xfoil",
xfoil_repanel: bool = True,
verbose: bool = False,
timeout: Union[float, int, None] = 30,
working_directory: str = None,
):
"""
Interface to XFoil. Compatible with both XFoil v6.xx (public) and XFoil v7.xx (private, contact Mark Drela at
MIT for a copy.)
Args:
airfoil: The airfoil to analyze. Should be an AeroSandbox Airfoil object.
Re: The chord-referenced Reynolds number. Set this to 0 to run in inviscid mode.
mach: The freestream Mach number. Note that XFoil 6.xx uses the Karman-Tsien compressibility correction,
which breaks down once supersonic flow is present (i.e., past M_crit). XFoil 7.xx has a full-potential
solver that is theoretically-valid for weak shocks (perhaps up to M_crit + 0.05 or so).
n_crit: The critical Tollmein-Schlichting wave amplification factor, as part of the "e^n" transition
criterion. This is a measure of freestream turbulence and surface roughness. The following reference conditions
are given in the XFoil documentation:
- sailplane: 12-14
- motorglider: 11-13
- clean wind tunnel: 10-12
- average wind tunnel: 9 (default)
- dirty wind tunnel: 4-8
xtr_upper: The upper-surface forced transition location [x/c], where the boundary layer will be
automatically tripped to turbulent. Set to 1 to disable forced transition (default). Note that if the
Reynolds number is sufficiently low, it is possible for the flow to re-laminarize after being tripped.
xtr_lower: The lower-surface forced transition location [x/c], where the boundary layer will be
automatically tripped to turbulent. Set to 1 to disable forced transition (default). Note that if the
Reynolds number is sufficiently low, it is possible for the flow to re-laminarize after being tripped.
hinge_point_x: The x/c location of the hinge point. This is used to calculate the hinge moment. If this is
None, the hinge moment is not calculated.
full_potential: If this is set True, it will turn full-potential mode on. Note that full-potential mode
is only available in XFoil v7.xx or higher. (Unless you have specifically gone through the trouble of
acquiring a copy of XFoil v7.xx you likely have v6.xx. Version 7.xx is not publicly distributed as of
2023; contact Mark Drela at MIT for a copy.) Note that if you enable this flag with XFoil v6.xx,
you'll likely get an error (no output file generated).
max_iter: How many iterations should we let XFoil do?
xfoil_command: The command-line argument to call XFoil, given as a string or a Path-like object.
* If XFoil is on your system PATH, then you can just leave this as "xfoil".
* If XFoil is not on your system PATH, then you should provide a filepath to the XFoil executable.
Note that XFoil is not on your PATH by default. To tell if XFoil is not on your system PATH,
open up a terminal and type "xfoil".
* If the XFoil menu appears, it's on your PATH.
* If you get something like "'xfoil' is not recognized as an internal or external command..." or
"Command 'xfoil' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your XFoil executable as a string.
To add XFoil to your path, modify your system's environment variables. (Google how to do this for
your OS.)
xfoil_repanel: Controls whether to allow XFoil to repanel your airfoil using its internal methods (PANE,
with default settings, 160 nodes). Boolean, defaults to True.
verbose: Controls whether or not XFoil output is printed to command line. Defaults to False.
timeout: Controls how long any individual XFoil run (i.e. alpha sweep) is allowed to run before the
process is killed. Given in units of seconds. To disable timeout, set this to None.
working_directory: Controls which working directory is used for the XFoil input and output files. By
default, this is set to a TemporaryDirectory that is deleted after the run. However, you can set it to
somewhere local for debugging purposes.
"""
if mach >= 1:
raise ValueError("XFoil will terminate if a supersonic freestream Mach number is given.")
self.airfoil = airfoil
self.Re = Re
self.mach = mach
self.n_crit = n_crit
self.xtr_upper = xtr_upper
self.xtr_lower = xtr_lower
self.hinge_point_x = hinge_point_x
self.full_potential = full_potential
self.max_iter = max_iter
self.xfoil_command = xfoil_command
self.xfoil_repanel = xfoil_repanel
self.verbose = verbose
self.timeout = timeout
self.working_directory = working_directory
def __repr__(self):
return f"XFoil(airfoil={self.airfoil}, Re={self.Re}, mach={self.mach}, n_crit={self.n_crit})"
def _default_keystrokes(self,
output_filename: str,
) -> List[str]:
"""
Returns a list of XFoil keystrokes that are common to all XFoil runs.
Returns:
A list of strings, each of which is a single XFoil keystroke to be followed by <enter>.
"""
run_file_contents = []
# Disable graphics
run_file_contents += [
"plop",
"g",
"",
]
if self.xfoil_repanel:
run_file_contents += [
"pane",
# "ppar",
# "",
]
# Enter oper mode
run_file_contents += [
"oper",
]
# Handle Re
if self.Re != 0:
run_file_contents += [
f"v {self.Re}",
]
# Handle mach
if self.mach != 0:
run_file_contents += [
f"m {self.mach}",
]
# Handle hinge moment
if self.hinge_point_x is not None:
run_file_contents += [
"hinc",
f"fnew {self.hinge_point_x} {self.airfoil.local_camber(self.hinge_point_x)}",
"fmom",
]
if self.full_potential:
run_file_contents += [
"full",
"fpar",
f"i {self.max_iter}",
"",
]
# Handle iterations
run_file_contents += [
f"iter {self.max_iter}",
]
# Handle trips and ncrit
if not (self.xtr_upper == 1 and self.xtr_lower == 1 and self.n_crit == 9):
run_file_contents += [
"vpar",
f"xtr {self.xtr_upper} {self.xtr_lower}",
f"n {self.n_crit}",
"",
]
# Set polar accumulation
run_file_contents += [
"pacc",
f"{output_filename}",
"",
]
# Include more data in polar
run_file_contents += [
"cinc" # include minimum Cp
]
return run_file_contents
def _run_xfoil(self,
run_command: str,
) -> Dict[str, np.ndarray]:
"""
Private function to run XFoil.
Args: run_command: A string with any XFoil keystroke inputs that you'd like. By default, you start off within the OPER
menu. All of the inputs indicated in the constructor have been set already, but you can override them here (for
this run only) if you want.
Returns: A dictionary containing all converged solutions obtained with your inputs.
"""
# Set up a temporary directory
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Designate an intermediate file for file I/O
output_filename = "output.txt"
# Handle the airfoil file
airfoil_file = "airfoil.dat"
self.airfoil.write_dat(directory / airfoil_file)
# Handle the keystroke file
keystrokes = self._default_keystrokes(output_filename=output_filename)
keystrokes += [run_command]
keystrokes += [
"pacc", # End polar accumulation
"",
"quit"
]
# Remove an old output file, if one exists:
try:
os.remove(directory / output_filename)
except FileNotFoundError:
pass
### Execute
try:
# command = f'{self.xfoil_command} {airfoil_file}' # Old syntax; try this if calls are not working
command = [self.xfoil_command, airfoil_file]
proc = subprocess.Popen(
command,
cwd=directory,
stdin=subprocess.PIPE,
stdout=None if self.verbose else subprocess.DEVNULL,
stderr=None if self.verbose else subprocess.DEVNULL,
text=True,
# shell=True,
# timeout=self.timeout,
# check=True
)
outs, errs = proc.communicate(
input="\n".join(keystrokes),
timeout=self.timeout
)
return_code = proc.poll()
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
warnings.warn(
"XFoil run timed out!\n"
"If this was not expected, try increasing the `timeout` parameter\n"
"when you create this AeroSandbox XFoil instance.",
stacklevel=2
)
except subprocess.CalledProcessError as e:
if e.returncode == 11:
raise RuntimeError(
"XFoil segmentation-faulted. This is likely because your input airfoil has too many points.\n"
"Try repaneling your airfoil with `Airfoil.repanel()` before passing it into XFoil.\n"
"For further debugging, turn on the `verbose` flag when creating this AeroSandbox XFoil instance.")
elif e.returncode == 8 or e.returncode == 136:
raise RuntimeError(
"XFoil returned a floating point exception. This is probably because you are trying to start\n"
"your analysis at an operating point where the viscous boundary layer can't be initialized based\n"
"on the computed inviscid flow. (You're probably hitting a Goldstein singularity.) Try starting\n"
"your XFoil run at a less-aggressive (alpha closer to 0, higher Re) operating point.")
elif e.returncode == 1:
raise RuntimeError(
f"Command '{command}' returned non-zero exit status 1.\n"
f"This is likely because AeroSandbox does not see XFoil on PATH with the given command.\n"
f"Check the logs (`asb.XFoil(..., verbose=True)`) to verify that this is the case, and if so,\n"
f"provide the correct path to the XFoil executable in the asb.XFoil constructor via `xfoil_command=`."
)
else:
raise e
### Parse the polar
try:
with open(directory / output_filename) as f:
lines = f.readlines()
title_line = lines[10]
columns = title_line.split()
output = {
column: []
for column in columns
}
except (FileNotFoundError, IndexError):
raise FileNotFoundError(
"It appears XFoil didn't produce an output file, probably because it crashed.\n"
"To troubleshoot, try some combination of the following:\n"
"\t - In the XFoil constructor, verify that either XFoil is on PATH or that the `xfoil_command` parameter is set.\n"
"\t - In the XFoil constructor, run with `verbose=True`.\n"
"\t - In the XFoil constructor, set the `working_directory` parameter to a known folder to see the XFoil input and output files.\n"
"\t - In the XFoil constructor, set the `timeout` parameter to a large number to see if XFoil is just taking a long time to run.\n"
"\t - On Windows, use `XFoil.open_interactive()` to run XFoil interactively in a new window.\n"
"\t - Try allowing XFoil to repanel the airfoil by setting `xfoil_repanel=True` in the XFoil constructor.\n"
)
def str_to_float(s: str) -> float:
try:
return float(s)
except ValueError:
return np.nan
for line in lines[12:]:
data = [str_to_float(entry) for entry in line.split()]
for i in range(len(columns)):
output[columns[i]].append(data[i])
output = {
k: np.array(v, dtype=float)
for k, v in output.items()
}
return output
def open_interactive(self) -> None:
"""
Opens a new terminal window and runs XFoil interactively. This is useful for detailed analysis or debugging.
Returns: None
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
### Handle the airplane file
airfoil_file = "airfoil.dat"
self.airfoil.write_dat(directory / airfoil_file)
### Open up AVL
import sys, os
if sys.platform == "win32":
# Run XFoil
print("Running XFoil interactively in a new window, quit it to continue...")
command = f'cmd /k "{self.xfoil_command} {airfoil_file}"'
process = subprocess.Popen(
command,
cwd=directory,
creationflags=subprocess.CREATE_NEW_CONSOLE
)
process.wait()
else:
raise NotImplementedError(
"Ability to auto-launch interactive XFoil sessions isn't yet implemented for non-Windows OSes."
)
def alpha(self,
alpha: Union[float, np.ndarray],
start_at: Union[float, None] = 0,
) -> Dict[str, np.ndarray]:
"""
Execute XFoil at a given angle of attack, or at a sequence of angles of attack.
Args:
alpha: The angle of attack [degrees]. Can be either a float or an iterable of floats, such as an array.
start_at: Chooses whether to split a large sweep into two runs that diverge away from some central value,
to improve convergence. As an example, if you wanted to sweep from alpha=-20 to alpha=20, you might want
to instead do two sweeps and stitch them together: 0 to 20, and 0 to -20. `start_at` can be either:
* None, in which case the alpha inputs are run as a single sequence in the order given.
* A float that corresponds to an angle of attack (in degrees), in which case the alpha inputs are
split into two sequences that diverge from the `start_at` value. Successful runs are then sorted by
`alpha` before returning.
Returns: A dictionary with the XFoil results. Dictionary values are arrays; they may not be the same shape as
your input array if some points did not converge.
"""
alphas = np.array(alpha).reshape(-1)
if np.length(alphas) > 1:
if start_at is not None:
if np.min(alphas) < start_at < np.max(alphas):
alphas = np.sort(alphas)
alphas_upper = alphas[alphas > start_at]
alphas_lower = alphas[alpha <= start_at][::-1]
output = self._run_xfoil(
"\n".join(
[
f"a {a}" + ("\nfmom" if self.hinge_point_x is not None else "")
for a in alphas_upper
] + [
"init"
] + [
f"a {a}" + ("\nfmom" if self.hinge_point_x is not None else "")
for a in alphas_lower
]
)
)
sort_order = np.argsort(output['alpha'])
output = {
k: v[sort_order]
for k, v in output.items()
}
return output
return self._run_xfoil(
"\n".join([
f"a {a}" + ("\nfmom" if self.hinge_point_x is not None else "")
for a in alphas
])
)
def cl(self,
cl: Union[float, np.ndarray],
start_at: Union[float, None] = 0,
) -> Dict[str, np.ndarray]:
"""
Execute XFoil at a given lift coefficient, or at a sequence of lift coefficients.
Args:
cl: The lift coefficient [-]. Can be either a float or an iterable of floats, such as an array.
start_at: Chooses whether to split a large sweep into two runs that diverge away from some central value,
to improve convergence. As an example, if you wanted to sweep from cl=-1.5 to cl=1.5, you might want to
instead do two sweeps and stitch them together: 0 to 1.5, and 0 to -1.5. `start_at` can be either:
* None, in which case the cl inputs are run as a single sequence in the order given.
* A float that corresponds to an lift coefficient, in which case the cl inputs are
split into two sequences that diverge from the `start_at` value. Successful runs are then sorted by
`alpha` before returning.
Returns: A dictionary with the XFoil results. Dictionary values are arrays; they may not be the same shape as
your input array if some points did not converge.
"""
cls = np.array(cl).reshape(-1)
if np.length(cls) > 1:
if start_at is not None:
if np.min(cls) < start_at < np.max(cls):
cls = np.sort(cls)
cls_upper = cls[cls > start_at]
cls_lower = cls[cls <= start_at][::-1]
output = self._run_xfoil(
"\n".join(
[
f"cl {c}" + ("\nfmom" if self.hinge_point_x is not None else "")
for c in cls_upper
] + [
"init"
] + [
f"cl {c}" + ("\nfmom" if self.hinge_point_x is not None else "")
for c in cls_lower
]
)
)
sort_order = np.argsort(output['alpha'])
output = {
k: v[sort_order]
for k, v in output.items()
}
return output
return self._run_xfoil(
"\n".join([
f"cl {c}" + ("\nfmom" if self.hinge_point_x is not None else "")
for c in cls
])
)
if __name__ == '__main__':
af = Airfoil("naca2412").repanel(n_points_per_side=100)
xf = XFoil(
airfoil=af,
Re=1e6,
hinge_point_x=0.75,
# verbose=True,
# working_directory=str(Path.home() / "Downloads" / "test"),
)
result_at_single_alpha = xf.alpha(5)
result_at_several_CLs = xf.cl([-0.1, 0.5, 0.7, 0.8, 0.9])
result_at_multiple_alphas = xf.alpha([3, 5, 60]) # Note: if a result does | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/xfoil.py | xfoil.py |
from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airfoil
from aerosandbox.aerodynamics.aero_3D.avl import AVL
from typing import Union, List, Dict
import tempfile
import warnings
import os
from textwrap import dedent
import shutil
class MSES(ExplicitAnalysis):
"""
An interface to MSES, MSET, and MPLOT, a 2D airfoil analysis system developed by Mark Drela at MIT.
Requires compiled binaries for all the programs to be on your computer;
MSES is available here: https://web.mit.edu/drela/Public/web/mses/
Academics can get a copy by emailing the MIT Tech. Licensing Office;
MIT affiliates can find a copy on Athena.
It is recommended (but not required) that you add MSES, MSET, and MPLOT to your system PATH environment variable
such that they can be called with the commands `mses`, `mset`, and `mplot`. If this is not the case, you need to
specify the path to these executables using the command arguments of the constructor.
-----
X11 Notes:
Note that MSES, MSET, and MPLOT by default open up X11 windows on your computer. If you prefer that this doesn't
happen (for extra speed), or if you cannot have this happen (e.g., you are computing in an environment without
proper X11 support, like Windows Subsystem for Linux), you should use XVFB. https://en.wikipedia.org/wiki/Xvfb
XVFB is a virtual "display" server that can receive X11 output and safely dump it. (If you don't use XVFB and you
don't have proper X11 support on your computer, this AeroSandbox MSES module will simply error out during the
MSET call - probably not what you want.)
To install XVFB on a Linux machine, use:
```bash
sudo apt-get install xvfb
```
Then, when instantiating this MSES instance in AeroSandbox, pass the `use_xvfb` flag to be True. Default behavior
here is that this class will look for the XVFB executable, `xvfb-run`, on your machine. If it finds it,
it will run with XVFB enabled. If it does not, it will run without XVFB.
-----
Usage example:
>>> ms = MSES(
>>> airfoil=Airfoil("naca2412").repanel(n_points_per_side=100),
>>> Re=1e6,
>>> mach=0.2,
>>> )
>>>
>>> result_at_single_alpha = ms.alpha(5)
>>> #result_at_several_CLs = ms.cl([0.5, 0.7, 0.8, 0.9])
>>> result_at_multiple_alphas = ms.alpha([3, 5, 60]) # Note: if a result does not converge (such as the 60 degree case here), it will not be included in the results.
"""
def __init__(self,
airfoil: Airfoil,
n_crit: float = 9.,
xtr_upper: float = 1.,
xtr_lower: float = 1.,
max_iter: int = 100,
mset_command: str = "mset",
mses_command: str = "mses",
mplot_command: str = "mplot",
use_xvfb: bool = None,
xvfb_command: str = "xvfb-run -a",
verbosity: int = 1,
timeout_mset: Union[float, int, None] = 10,
timeout_mses: Union[float, int, None] = 60,
timeout_mplot: Union[float, int, None] = 10,
working_directory: str = None,
behavior_after_unconverged_run: str = "reinitialize",
mset_alpha: float = 0,
mset_n: int = 141,
mset_e: float = 0.4,
mset_io: int = 37,
mset_x: float = 0.850,
mses_mcrit: float = 0.99,
mses_mucon: float = -1.0,
):
"""
Interface to XFoil. Compatible with both XFoil v6.xx (public) and XFoil v7.xx (private, contact Mark Drela at
MIT for a copy.)
Args:
# TODO docs
airfoil: The angle of attack [degrees]
Re: The chord-referenced Reynolds number
mach: The freestream Mach number
n_crit: The critical Tollmein-Schlichting wave amplification factor
xtr_upper: The upper-surface trip location [x/c]
xtr_lower: The lower-surface trip location [x/c]
full_potential: If this is set True, it will turn full-potential mode on. Note that full-potential mode
is only available in XFoil v7.xx or higher. (Unless you have specifically gone through the trouble of
acquiring a copy of XFoil v7.xx you likely have v6.xx. Version 7.xx is not publicly distributed as of
2022; contact Mark Drela at MIT for a copy.) Note that if you enable this flag with XFoil v6.xx,
you'll likely get an error (no output file generated).
max_iter: How many iterations should we let XFoil do?
xfoil_command: The command-line argument to call XFoil.
* If XFoil is on your system PATH, then you can just leave this as "xfoil".
* If XFoil is not on your system PATH, then you should provide a filepath to the XFoil executable.
Note that XFoil is not on your PATH by default. To tell if XFoil is not on your system PATH,
open up a terminal and type "xfoil".
* If the XFoil menu appears, it's on your PATH.
* If you get something like "'xfoil' is not recognized as an internal or external command..." or
"Command 'xfoil' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your XFoil executable as a string.
To add XFoil to your path, modify your system's environment variables. (Google how to do this for
your OS.)
xfoil_repanel: Controls whether to allow XFoil to repanel your airfoil using its internal methods (PANE
-> PPAR, both with default settings, 160 nodes)
verbose: Controls whether or not XFoil output is printed to command line.
timeout: Controls how long any individual XFoil run (i.e. alpha sweep) is allowed to run before the
process is killed. Given in units of seconds. To disable timeout, set this to None.
working_directory: Controls which working directory is used for the XFoil input and output files. By
default, this is set to a TemporaryDirectory that is deleted after the run. However, you can set it to
somewhere local for debugging purposes.
"""
if use_xvfb is None:
trial_run = subprocess.run(
xvfb_command, # Analogous to "xvfb-run", perhaps with additional arguments
capture_output=True,
shell=True,
text=True,
)
expected_result = 'xvfb-run: usage error:'
use_xvfb = expected_result in trial_run.stderr or expected_result in trial_run.stdout
if not use_xvfb:
xvfb_command = ""
self.airfoil = airfoil
self.n_crit = n_crit
self.xtr_upper = xtr_upper
self.xtr_lower = xtr_lower
self.max_iter = max_iter
self.mset_command = mset_command
self.mses_command = mses_command
self.mplot_command = mplot_command
self.use_xvfb = use_xvfb
self.xvfb_command = xvfb_command
self.verbosity = verbosity
self.timeout_mses = timeout_mses
self.timeout_mset = timeout_mset
self.timeout_mplot = timeout_mplot
self.working_directory = working_directory
self.behavior_after_unconverged_run = behavior_after_unconverged_run
self.mset_alpha = mset_alpha
self.mset_n = mset_n
self.mset_e = mset_e
self.mset_io = mset_io
self.mset_x = mset_x
self.mses_mcrit = mses_mcrit
self.mses_mucon = mses_mucon
def run(self,
alpha: Union[float, np.ndarray, List] = 0.,
Re: Union[float, np.ndarray, List] = 0.,
mach: Union[float, np.ndarray, List] = 0.01,
):
### Make all inputs iterables:
alphas, Res, machs = np.broadcast_arrays(
np.ravel(alpha),
np.ravel(Re),
np.ravel(mach),
)
# Set up a temporary directory
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Handle the airfoil file
airfoil_file = "airfoil.dat"
self.airfoil.write_dat(directory / airfoil_file)
def mset(mset_alpha):
mset_keystrokes = dedent(f"""\
15
case
7
n {self.mset_n}
e {self.mset_e}
i {self.mset_io}
o {self.mset_io}
x {self.mset_x}
1
{mset_alpha}
2
3
4
0
""")
if self.verbosity >= 1:
print(f"Generating mesh at alpha = {mset_alpha} with MSES...")
return subprocess.run(
f'{self.xvfb_command} "{self.mset_command}" "{airfoil_file}"',
input=mset_keystrokes,
cwd=directory,
capture_output=True,
text=True,
shell=True,
check=True,
timeout=self.timeout_mset
)
try:
mset_run = mset(mset_alpha=alphas[0])
except subprocess.CalledProcessError as e:
print(e.stdout)
print(e.stderr)
if "BadName (named color or font does not exist)" in e.stderr:
raise RuntimeError("MSET via AeroSandbox errored becausee it couldn't launch an X11 window.\n"
"Try either installing a typical X11 client, or install Xvfb, which is\n"
"a virtual X11 server. More details in the AeroSandbox MSES docstring.")
runs_output = {}
for i, (alpha, mach, Re) in enumerate(zip(alphas, machs, Res)):
if self.verbosity >= 1:
print(f"Solving alpha = {alpha:.3f}, mach = {mach:.4f}, Re = {Re:.3e} with MSES...")
with open(directory / "mses.case", "w+") as f:
f.write(dedent(f"""\
3 4 5 7
3 4 5 7
{mach} 0.0 {alpha} | MACHin CLIFin ALFAin
3 2 | ISMOM IFFBC [ DOUXin DOUYin SRCEin ]
{Re} {self.n_crit} | REYNin ACRIT [ KTRTYP ]
{self.xtr_lower} {self.xtr_upper} | XTR1 XTR2
{self.mses_mcrit} {self.mses_mucon} | MCRIT MUCON
0 0 | ISMOVE ISPRES
0 0 | NMODN NPOSN
"""))
mses_keystrokes = dedent(f"""\
{self.max_iter}
0
""")
mses_run = subprocess.run(
f'{self.xvfb_command} "{self.mses_command}" case',
input=mses_keystrokes,
cwd=directory,
capture_output=True,
text=True,
shell=True,
check=True,
timeout=self.timeout_mses
)
if self.verbosity >= 2:
print(mses_run.stdout)
print(mses_run.stderr)
converged = "Converged on tolerance" in mses_run.stdout
if not converged:
if self.behavior_after_unconverged_run == "reinitialize":
if self.verbosity >= 1:
print("Run did not converge. Reinitializing mesh and continuing...")
try:
next_alpha = alphas[i + 1]
except IndexError:
break
mset_run = mset(mset_alpha=next_alpha)
elif self.behavior_after_unconverged_run == "terminate":
if self.verbosity >= 1:
print("Run did not converge. Skipping all subsequent runs...")
break
continue
mplot_keystrokes = dedent(f"""\
1
12
0
0
""")
mplot_run = subprocess.run(
f'{self.xvfb_command} "{self.mplot_command}" case',
input=mplot_keystrokes,
cwd=directory,
capture_output=True,
text=True,
shell=True,
check=True,
timeout=self.timeout_mplot
)
if self.verbosity >= 2:
print(mplot_run.stdout)
print(mplot_run.stderr)
raw_output = mplot_run.stdout. \
replace("top Xtr", "xtr_top"). \
replace("bot Xtr", "xtr_bot"). \
replace("at x,y", "x_ac")
run_output = AVL.parse_unformatted_data_output(raw_output)
# Merge runs_output and run_output
for k in run_output.keys():
try:
runs_output[k].append(
run_output[k]
)
except KeyError: # List not created yet
runs_output[k] = [run_output[k]]
# Clean up the dictionary
runs_output = {k: np.array(v) for k, v in runs_output.items()}
# runs_output["mach"] = runs_output.pop("Ma")
runs_output = {
"mach": runs_output.pop("Ma"),
**runs_output
}
return runs_output
if __name__ == '__main__':
from pathlib import Path
from pprint import pprint
ms = MSES(
airfoil=Airfoil("rae2822"), # .repanel(n_points_per_side=30),
working_directory="/mnt/c/Users/peter/Downloads/msestest/",
# max_iter=120,
verbosity=1,
behavior_after_unconverged_run="terminate",
mset_n=300,
max_iter=100,
# verbose=False
)
res = ms.run(
alpha=3,
mach=np.arange(0.55, 0.8, 0.005),
# Re=1e6,
)
pprint(res)
import matplotlib
matplotlib.use("WebAgg")
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots()
plt.plot(res['mach'], res['CD'], ".-")
p.show_plot() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/mses.py | mses.py |
from aerosandbox.geometry import Airfoil
from aerosandbox.performance import OperatingPoint
import aerosandbox.numpy as np
import aerosandbox.library.aerodynamics as aerolib
def airfoil_coefficients_post_stall(
airfoil: Airfoil,
alpha: float,
):
"""
Estimates post-stall aerodynamics of an airfoil.
Uses methods given in:
Truong, V. K. "An analytical model for airfoil aerodynamic characteristics over the entire 360deg angle of attack
range". J. Renewable Sustainable Energy. 2020. doi: 10.1063/1.5126055
Args:
airfoil:
op_point:
Returns:
"""
sina = np.sind(alpha)
cosa = np.cosd(alpha)
##### Normal force calulation
# Cd90_fp = aerolib.Cd_flat_plate_normal() # TODO implement
# Cd90_0 = Cd90_fp - 0.83 * airfoil.LE_radius() - 1.46 / 2 * airfoil.max_thickness() + 1.46 * airfoil.max_camber()
# Cd270_0 = Cd90_fp - 0.83 * airfoil.LE_radius() - 1.46 / 2 * airfoil.max_thickness() - 1.46 * airfoil.max_camber()
### Values for NACA0012
Cd90_0 = 2.08
pn2_star = 8.36e-2
pn3_star = 4.06e-1
pt1_star = 9.00e-2
pt2_star = -1.78e-1
pt3_star = -2.98e-1
Cd90 = Cd90_0 + pn2_star * cosa + pn3_star * cosa ** 2
CN = Cd90 * sina
##### Tangential force calculation
CT = (pt1_star + pt2_star * cosa + pt3_star * cosa ** 3) * sina ** 2
##### Conversion to wind axes
CL = CN * cosa + CT * sina
CD = CN * sina - CT * cosa
CM = np.zeros_like(CL) # TODO
return CL, CD, CM
if __name__ == '__main__':
af = Airfoil("naca0012")
alpha = np.linspace(0, 360, 721)
CL, CD, CM = airfoil_coefficients_post_stall(
af, alpha
)
from aerosandbox.tools.pretty_plots import plt, show_plot, set_ticks
fig, ax = plt.subplots(1, 2, figsize=(8, 5))
plt.sca(ax[0])
plt.plot(alpha, CL)
plt.xlabel("AoA")
plt.ylabel("CL")
set_ticks(45, 15, 0.5, 0.1)
plt.sca(ax[1])
plt.plot(alpha, CD)
plt.xlabel("AoA")
plt.ylabel("CD")
set_ticks(45, 15, 0.5, 0.1)
show_plot() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_polar_functions.py | airfoil_polar_functions.py |
import aerosandbox.numpy as np
from aerosandbox.geometry import Airfoil
from aerosandbox.geometry.airfoil.airfoil_families import get_kulfan_coordinates
from scipy import optimize
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
if __name__ == '__main__':
### Design Conditions
Re_des = 3e5 # Re to design to
Cl_start = 1.0 # Lower bound of CLs that you care about
Cl_end = 1.5 # Upper bound of CLs that you care about (Effectively, CL_max)
Cm_min = -0.08 # Worst-allowable pitching moment that you'll allow
TE_thickness = 0.0015 # Sets trailing edge thickness
enforce_continuous_LE_radius = True # Should we force the leading edge to have continous curvature?
### Guesses for airfoil CST parameters; you usually don't need to change these
lower_guess = -0.05 * np.ones(30)
upper_guess = 0.25 * np.ones(30)
upper_guess[0] = 0.15
upper_guess[1] = 0.20
# lower_guess = [-0.21178419, -0.05500152, -0.04540216, -0.03436429, -0.03305599,
# -0.03121454, -0.04513736, -0.05491045, -0.02861083, -0.05673649,
# -0.06402239, -0.05963394, -0.0417384, -0.0310728, -0.04983729,
# -0.04211283, -0.04999657, -0.0632682, -0.07226548, -0.03604782,
# -0.06151112, -0.04030985, -0.02748867, -0.02705322, -0.04279788,
# -0.04734922, -0.033705, -0.02380217, -0.04480772, -0.03756881]
# upper_guess = [0.17240303, 0.26668075, 0.21499604, 0.26299318, 0.22545807,
# 0.24759903, 0.31644402, 0.2964658, 0.15360716, 0.31317824,
# 0.27760982, 0.23009955, 0.24045039, 0.37542525, 0.21361931,
# 0.18678503, 0.23466624, 0.20630533, 0.16191541, 0.20453953,
# 0.14370825, 0.13428077, 0.15387739, 0.13767285, 0.15173257,
# 0.14042002, 0.11336701, 0.35640688, 0.10953915, 0.08167446]
### Packing/Unpacking functions
n_lower = len(lower_guess)
n_upper = len(upper_guess)
pack = lambda lower, upper: np.concatenate((lower, upper))
unpack = lambda pack: (pack[:n_lower], pack[n_lower:])
def make_airfoil(x):
"""
A function that constructs an airfoil from a packed design vector.
:param x:
:return:
"""
lower, upper = unpack(x)
return Airfoil(
name="Optimization Airfoil",
coordinates=get_kulfan_coordinates(
lower_weights=lower,
upper_weights=upper,
enforce_continuous_LE_radius=enforce_continuous_LE_radius,
TE_thickness=TE_thickness,
n_points_per_side=80
)
)
### Initial guess construction
x0 = pack(lower_guess, upper_guess)
initial_airfoil = make_airfoil(x0)
### Initialize plotting
fig = plt.figure(figsize=(15, 2.5))
ax = fig.add_subplot(111)
trace_initial, = ax.plot(
initial_airfoil.coordinates[:, 0],
initial_airfoil.coordinates[:, 1],
':r',
label="Initial Airfoil"
)
trace_current, = ax.plot(
initial_airfoil.coordinates[:, 0],
initial_airfoil.coordinates[:, 1],
"-b",
label="Current Airfoil"
)
plt.axis("equal")
plt.xlabel(r"$x/c$")
plt.ylabel(r"$y/c$")
plt.title("Airfoil Optimization")
plt.legend()
def draw(
airfoil # type: Airfoil
):
"""
Updates the "current airfoil" line on the plot with the given airfoil.
:param airfoil:
:return:
"""
trace_current.set_xdata(airfoil.coordinates[:, 0])
trace_current.set_ydata(airfoil.coordinates[:, 1])
plt.draw()
plt.pause(0.001)
### Utilities for tracking the design vector and objective throughout the optimization run
iteration = 0
xs = []
fs = []
def augmented_objective(x):
"""
Objective function with constraints added via a multiplicative external penalty method
:param x: Packed design vector
:return: Value of the augmented objective
"""
airfoil = make_airfoil(x)
xfoil = airfoil.xfoil_cseq(
cl_start=Cl_start,
cl_step=0.02,
cl_end=Cl_end,
Re=Re_des,
verbose=False,
max_iter=40,
repanel=False
)
if np.isnan(xfoil["Cd"]).any():
return np.Inf
objective = np.sqrt(np.mean(xfoil["Cd"] ** 2)) # RMS
penalty = 0
penalty += np.sum(np.minimum(0, (xfoil["Cm"] - Cm_min) / 0.01) ** 2) # Cm constraint
penalty += np.minimum(0, (airfoil.TE_angle() - 5) / 1) ** 2 # TE angle constraint
penalty += np.minimum(0, (airfoil.local_thickness(0.90) - 0.015) / 0.005) ** 2 # Spar thickness constraint
penalty += np.minimum(0, (airfoil.local_thickness(0.30) - 0.12) / 0.005) ** 2 # Spar thickness constraint
xs.append(x)
fs.append(objective)
return objective * (1 + penalty)
def callback(x):
global iteration
iteration += 1
print(
f"Iteration {iteration}: Cd = {fs[-1]:.6f}"
)
if iteration % 1 == 0:
airfoil = make_airfoil(x)
draw(airfoil)
ax.set_title(f"Airfoil Optimization: Iteration {iteration}")
airfoil.write_dat("optimized_airfoil.dat")
draw(initial_airfoil)
initial_simplex = (
(0.5 + 1 * np.random.random((len(x0) + 1, len(x0))))
* x0
)
initial_simplex[0, :] = x0 # Include x0 in the simplex
print("Initializing simplex (give this a few minutes)...")
res = optimize.minimize(
fun=augmented_objective,
x0=pack(lower_guess, upper_guess),
method="Nelder-Mead",
callback=callback,
options={
'maxiter' : 10 ** 6,
'initial_simplex': initial_simplex,
'xatol' : 1e-8,
'fatol' : 1e-6,
'adaptive' : False,
}
)
final_airfoil = make_airfoil(res.x) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_optimizer/airfoil_optimizer.py | airfoil_optimizer.py |
# Airfoil Optimizer
by Peter Sharpe
## WARNING
Airfoil optimization is dangerous territory that is so ripe for misunderstanding - it's much more complicated and nuanced than the point-design-drag-minimization problem that it appears to be on the surface. Before you continue, please read and deeply understand these articles:
1. What you should and shouldn't do when optimizing airfoils: [Mark Drela, Pros and Cons of Airfoil Optimization](https://www.researchgate.net/publication/265279078_Pros_Cons_of_Airfoil_Optimization)
2. Comparisons of airfoil geometry parameterization techniques (there are many good papers on this topic, but I like this one): [D. A. Masters, "Geometric Comparison of Aerofoil Shape Parameterization Methods", AIAA Journal, 2017.](https://arc.aiaa.org/doi/pdf/10.2514/1.J054943)
3. The seminal paper on the CST (Kulfan) parameterization technique: [Brenda Kulfan, "Universal Parametric Geometry Representation Method"](http://mx1.brendakulfan.com/docs/CST6.pdf)
| AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/airfoil_optimizer/README.md | README.md |
import aerosandbox.numpy as np
from typing import Union
import casadi as cas
def _calculate_induced_velocity_line_singularity_panel_coordinates(
xp_field: Union[float, np.ndarray],
yp_field: Union[float, np.ndarray],
gamma_start: float = 0.,
gamma_end: float = 0.,
sigma_start: float = 0.,
sigma_end: float = 0.,
xp_panel_end: float = 1.,
) -> [Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Calculates the induced velocity at a point (xp_field, yp_field) in a 2D potential-flow flowfield.
The `p` suffix in `xp...` and `yp...` denotes the use of the panel coordinate system, where:
* xp_hat is along the length of the panel
* yp_hat is orthogonal (90 deg. counterclockwise) to it.
In this flowfield, there is only one singularity element: A line vortex going from (0, 0) to (xp_panel_end, 0).
The strength of this vortex varies linearly from:
* gamma_start at (0, 0), to:
* gamma_end at (xp_panel_end, 0). # TODO update paragraph
By convention here, positive gamma induces clockwise swirl in the flow field.
Function returns the 2D velocity u, v in the local coordinate system of the panel.
Inputs x and y can be 1D ndarrays representing various field points,
in which case the resulting velocities u and v have corresponding dimensionality.
Equations from the seminal textbook "Low Speed Aerodynamics" by Katz and Plotkin.
Vortex equations are Eq. 11.99 and Eq. 11.100.
* Note: there is an error in equation 11.100 in Katz and Plotkin, at least in the 2nd ed:
The last term of equation 11.100, which is given as:
(x_{j+1} - x_j) / z + (theta_{j+1} - theta_j)
has a sign error and should instead be written as:
(x_{j+1} - x_j) / z - (theta_{j+1} - theta_j)
Source equations are Eq. 11.89 and Eq. 11.90.
"""
### Modify any incoming floats
if isinstance(xp_field, (float, int)):
xp_field = np.array([xp_field])
if isinstance(yp_field, (float, int)):
yp_field = np.array([yp_field])
### Determine if you can skip either the vortex or source parts
skip_vortex_math = not (
isinstance(gamma_start, cas.MX) or
isinstance(gamma_end, cas.MX)
) and gamma_start == 0 and gamma_end == 0
skip_source_math = not (
isinstance(sigma_start, cas.MX) or
isinstance(sigma_end, cas.MX)
) and sigma_start == 0 and sigma_end == 0
### Determine which points are effectively on the panel, necessitating different math:
is_on_panel = np.fabs(yp_field) <= 1e-8
### Do some geometry calculation
r_1 = (
xp_field ** 2 +
yp_field ** 2
) ** 0.5
r_2 = (
(xp_field - xp_panel_end) ** 2 +
yp_field ** 2
) ** 0.5
### Regularize
is_on_endpoint = (
(r_1 == 0) | (r_2 == 0)
)
r_1 = np.where(
r_1 == 0,
1,
r_1,
)
r_2 = np.where(
r_2 == 0,
1,
r_2
)
### Continue geometry calculation
theta_1 = np.arctan2(yp_field, xp_field)
theta_2 = np.arctan2(yp_field, xp_field - xp_panel_end)
ln_r_2_r_1 = np.log(r_2 / r_1)
d_theta = theta_2 - theta_1
tau = 2 * np.pi
### Regularize if the point is on the panel.
yp_field_regularized = np.where(
is_on_panel,
1,
yp_field
)
### VORTEX MATH
if skip_vortex_math:
u_vortex = 0
v_vortex = 0
else:
d_gamma = gamma_end - gamma_start
u_vortex_term_1_quantity = (yp_field
/ tau
* d_gamma
/ xp_panel_end
)
u_vortex_term_2_quantity = (
gamma_start * xp_panel_end + d_gamma * xp_field
) / (
tau * xp_panel_end
)
# Calculate u_vortex
u_vortex_term_1 = u_vortex_term_1_quantity * ln_r_2_r_1
u_vortex_term_2 = u_vortex_term_2_quantity * d_theta
u_vortex = u_vortex_term_1 + u_vortex_term_2
# Correct the u-velocity if field point is on the panel
u_vortex = np.where(
is_on_panel,
0,
u_vortex
)
# Calculate v_vortex
v_vortex_term_1 = u_vortex_term_2_quantity * ln_r_2_r_1
v_vortex_term_2 = np.where(
is_on_panel,
d_gamma / tau,
u_vortex_term_1_quantity * (
xp_panel_end / yp_field_regularized -
d_theta
),
)
v_vortex = v_vortex_term_1 + v_vortex_term_2
### SOURCE MATH
if skip_source_math:
u_source = 0
v_source = 0
else:
d_sigma = sigma_end - sigma_start
v_source_term_1_quantity = (yp_field
/ tau
* d_sigma
/ xp_panel_end
)
v_source_term_2_quantity = (
sigma_start * xp_panel_end + d_sigma * xp_field
) / (
tau * xp_panel_end
)
# Calculate v_source
v_source_term_1 = -v_source_term_1_quantity * ln_r_2_r_1
v_source_term_2 = v_source_term_2_quantity * d_theta
v_source = v_source_term_1 + v_source_term_2
# Correct the v-velocity if field point is on the panel
v_source = np.where(
is_on_panel,
0,
v_source
)
# Calculate u_source
u_source_term_1 = -v_source_term_2_quantity * ln_r_2_r_1
u_source_term_2 = np.where(
is_on_panel,
-d_sigma / tau,
-v_source_term_1_quantity * (
xp_panel_end / yp_field_regularized -
d_theta
),
)
u_source = u_source_term_1 + u_source_term_2
### Return
u = u_vortex + u_source
v = v_vortex + v_source
### If the field point is on the endpoint of the panel, replace the NaN with a zero.
u = np.where(
is_on_endpoint,
0,
u
)
v = np.where(
is_on_endpoint,
0,
v
)
return u, v
def _calculate_induced_velocity_line_singularity(
x_field: Union[float, np.ndarray],
y_field: Union[float, np.ndarray],
x_panel_start: float,
y_panel_start: float,
x_panel_end: float,
y_panel_end: float,
gamma_start: float = 0.,
gamma_end: float = 0.,
sigma_start: float = 0.,
sigma_end: float = 0.,
) -> [Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Calculates the induced velocity at a point (x_field, y_field) in a 2D potential-flow flowfield.
In this flowfield, there is only one singularity element: # TODO update paragraph
A line vortex going from (x_panel_start, y_panel_start) to (x_panel_end, y_panel_end).
The strength of this vortex varies linearly from:
* gamma_start at (x_panel_start, y_panel_start), to:
* gamma_end at (x_panel_end, y_panel_end).
By convention here, positive gamma induces clockwise swirl in the flow field.
Function returns the 2D velocity u, v in the global coordinate system (x, y).
Inputs x and y can be 1D ndarrays representing various field points,
in which case the resulting velocities u and v have the corresponding dimensionality.
"""
### Calculate the panel coordinate transform (x -> xp, y -> yp), where
panel_dx = x_panel_end - x_panel_start
panel_dy = y_panel_end - y_panel_start
panel_length = (panel_dx ** 2 + panel_dy ** 2) ** 0.5
panel_length = np.fmax(panel_length, 1e-16)
xp_hat_x = panel_dx / panel_length # x-coordinate of the xp_hat vector
xp_hat_y = panel_dy / panel_length # y-coordinate of the yp_hat vector
yp_hat_x = -xp_hat_y
yp_hat_y = xp_hat_x
### Transform the field points in to panel coordinates
x_field_relative = x_field - x_panel_start
y_field_relative = y_field - y_panel_start
xp_field = x_field_relative * xp_hat_x + y_field_relative * xp_hat_y # dot product with the xp unit vector
yp_field = x_field_relative * yp_hat_x + y_field_relative * yp_hat_y # dot product with the xp unit vector
### Do the vortex math
up, vp = _calculate_induced_velocity_line_singularity_panel_coordinates(
xp_field=xp_field,
yp_field=yp_field,
gamma_start=gamma_start,
gamma_end=gamma_end,
sigma_start=sigma_start,
sigma_end=sigma_end,
xp_panel_end=panel_length,
)
### Transform the velocities in panel coordinates back to global coordinates
u = up * xp_hat_x + vp * yp_hat_x
v = up * xp_hat_y + vp * yp_hat_y
### Return
return u, v
def calculate_induced_velocity_line_singularities(
x_field: Union[float, np.ndarray],
y_field: Union[float, np.ndarray],
x_panels: np.ndarray,
y_panels: np.ndarray,
gamma: np.ndarray,
sigma: np.ndarray,
) -> [Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Calculates the induced velocity at a point (x_field, y_field) in a 2D potential-flow flowfield.
In this flowfield, the following singularity elements are assumed: # TODO update paragraph
* A line vortex that passes through the coordinates specified in (x_panel, y_panel). Each of these vertices is
called a "node".
* The vorticity of this line vortex per unit length varies linearly between subsequent nodes.
* The vorticity at each node is specified by the parameter gamma.
By convention here, positive gamma induces clockwise swirl in the flow field.
Function returns the 2D velocity u, v in the global coordinate system (x, y).
Inputs x_field and y_field can be 1D ndarrays representing various field points,
in which case the resulting velocities u and v have the corresponding dimensionality.
"""
try:
N = len(x_panels)
except TypeError:
N = x_panels.shape[0]
for i in range(N - 1):
u, v = _calculate_induced_velocity_line_singularity(
x_field=x_field,
y_field=y_field,
x_panel_start=x_panels[i],
y_panel_start=y_panels[i],
x_panel_end=x_panels[i + 1],
y_panel_end=y_panels[i + 1],
gamma_start=gamma[i],
gamma_end=gamma[i + 1],
sigma_start=sigma[i],
sigma_end=sigma[i + 1],
)
if i == 0:
u_field = u
v_field = v
else:
u_field += u
v_field += v
return u_field, v_field
if __name__ == '__main__':
X, Y = np.meshgrid(
np.linspace(-2, 2, 50),
np.linspace(-2, 2, 50),
)
X = X.flatten()
Y = Y.flatten()
x_panels = np.array([1, -1, -1, 1, 1])
y_panels = np.array([1, 1, -1, -1, 1])
U, V = calculate_induced_velocity_line_singularities(
x_field=X,
y_field=Y,
x_panels=x_panels,
y_panels=y_panels,
gamma=1 * np.ones_like(x_panels),
sigma=1 * np.ones_like(x_panels)
)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(palette=sns.color_palette("husl"))
fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=200)
plt.quiver(
X, Y, U, V,
(U ** 2 + V ** 2) ** 0.5,
scale=10
)
plt.axis("equal")
plt.xlabel(r"$x$")
plt.ylabel(r"$z$")
plt.title(r"Linear-Strength Vortex: Induced Velocity")
plt.tight_layout()
# plt.savefig("C:/Users/User/Downloads/temp.svg")
plt.show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_2D/singularities/linear_strength_line_singularities.py | linear_strength_line_singularities.py |
from aerosandbox import ExplicitAnalysis
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
import aerosandbox.library.aerodynamics as aero
import aerosandbox.numpy as np
from aerosandbox.aerodynamics.aero_3D.aero_buildup_submodels.fuselage_aerodynamics_utilities import *
from aerosandbox.library.aerodynamics import transonic
import aerosandbox.library.aerodynamics as aerolib
import copy
from typing import Union, List, Dict, Any
from aerosandbox.aerodynamics.aero_3D.aero_buildup_submodels.softmax_scalefree import softmax_scalefree
class AeroBuildup(ExplicitAnalysis):
"""
A workbook-style aerodynamics buildup.
Example usage:
>>> import aerosandbox as asb
>>> ab = asb.AeroBuildup( # This sets up the analysis, but doesn't execute calculation
>>> airplane=my_airplane, # type: asb.Airplane
>>> op_point=my_operating_point, # type: asb.OperatingPoint
>>> xyz_ref=[0.1, 0.2, 0.3], # Moment reference and center of rotation.
>>> )
>>> aero = ab.run() # This executes the actual aero analysis.
>>> aero_with_stability_derivs = ab.run_with_stability_derivatives() # Same, but also gets stability derivatives.
"""
default_analysis_specific_options = {
Fuselage: dict(
E_wave_drag=2.5, # Wave drag efficiency factor
# Defined by Raymer, "Aircraft Design: A Conceptual Approach", 2nd Ed. Chap. 12.5.9 "Supersonic Parasite Drag".
# Notated there as "E_WD".
#
# Various recommendations:
# * For a perfect Sears-Haack body, 1.0
# * For a clean aircraft with smooth volume distribution (e.g., BWB), 1.2
# * For a "more typical supersonic...", 1.8 - 2.2
# * For a "poor supersonic design", 2.5 - 3.0
# * The F-15 has E_WD = 2.9.
nose_fineness_ratio=3, # Fineness ratio (length / diameter) of the nose section of the fuselage.
# Impacts wave drag calculations, among other things.
),
}
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
xyz_ref: Union[np.ndarray, List[float]] = None,
include_wave_drag: bool = True,
):
"""
Initializes a new AeroBuildup analysis as an object.
Note: to run the analysis, you need to first instantiate the object, then call the .run() method.
Args:
airplane: The airplane to analyze.
op_point: The operating point to analyze at. Note that this can be vectorized (i.e., attributes of the OperatingPoint
object can be arrays, in which case AeroBuildup analysis will be vectorized).
xyz_ref: The reference point for the aerodynamic forces and moments. This is the point about which the moments are
taken, and the point at which the forces are applied. Defaults to the airplane's xyz_ref.
include_wave_drag: Whether to include wave drag in the analysis. Defaults to True.
Returns: None
"""
super().__init__()
### Set defaults
if xyz_ref is None:
xyz_ref = airplane.xyz_ref
### Initialize
self.airplane = airplane
self.op_point = op_point
self.xyz_ref = xyz_ref
self.include_wave_drag = include_wave_drag
def __repr__(self):
return self.__class__.__name__ + "(\n\t" + "\n\t".join([
f"airplane={self.airplane}",
f"op_point={self.op_point}",
f"xyz_ref={self.xyz_ref}",
]) + "\n)"
def run(self) -> Dict[str, Union[Union[float, np.ndarray], List[Union[float, np.ndarray]]]]:
"""
Computes the aerodynamic forces and moments on the airplane.
Returns: a dictionary with keys:
- 'F_g' : an [x, y, z] list of forces in geometry axes [N]
- 'F_b' : an [x, y, z] list of forces in body axes [N]
- 'F_w' : an [x, y, z] list of forces in wind axes [N]
- 'M_g' : an [x, y, z] list of moments about geometry axes [Nm]
- 'M_b' : an [x, y, z] list of moments about body axes [Nm]
- 'M_w' : an [x, y, z] list of moments about wind axes [Nm]
- 'L' : the lift force [N]. Definitionally, this is in wind axes.
- 'Y' : the side force [N]. This is in wind axes.
- 'D' : the drag force [N]. Definitionally, this is in wind axes.
- 'l_b', the rolling moment, in body axes [Nm]. Positive is roll-right.
- 'm_b', the pitching moment, in body axes [Nm]. Positive is pitch-up.
- 'n_b', the yawing moment, in body axes [Nm]. Positive is nose-right.
- 'CL', the lift coefficient [-]. Definitionally, this is in wind axes.
- 'CY', the sideforce coefficient [-]. This is in wind axes.
- 'CD', the drag coefficient [-]. Definitionally, this is in wind axes.
- 'Cl', the rolling coefficient [-], in body axes
- 'Cm', the pitching coefficient [-], in body axes
- 'Cn', the yawing coefficient [-], in body axes
Nondimensional values are nondimensionalized using reference values in the AeroBuildup.airplane object.
Data types:
- The "L", "Y", "D", "l_b", "m_b", "n_b", "CL", "CY", "CD", "Cl", "Cm", and "Cn" keys are:
- floats if the OperatingPoint object is not vectorized (i.e., if all attributes of OperatingPoint
are floats, not arrays).
- arrays if the OperatingPoint object is vectorized (i.e., if any attribute of OperatingPoint is an
array).
- The "F_g", "F_b", "F_w", "M_g", "M_b", and "M_w" keys are always lists, which will contain either
floats or arrays, again depending on whether the OperatingPoint object is vectorized or not.
"""
### Compute the forces on each component
aero_components = [
self.wing_aerodynamics(
wing=wing,
include_induced_drag=False
) for wing in
self.airplane.wings
] + [
self.fuselage_aerodynamics(
fuselage=fuse,
include_induced_drag=False
) for fuse in
self.airplane.fuselages
]
### Sum up the forces
aero_total = {
"F_g": [0., 0., 0.],
# "F_b": [0., 0., 0.],
# "F_w": [0., 0., 0.],
"M_g": [0., 0., 0.],
"M_b": [0., 0., 0.],
"M_w": [0., 0., 0.],
# "L" : 0.,
# "Y" : 0.,
# "D" : 0.,
# "l_b": 0.,
# "m_b": 0.,
# "n_b": 0.,
}
for k in aero_total.keys():
for aero_component in aero_components:
if isinstance(aero_total[k], list):
aero_total[k] = [
aero_total[k][i] + aero_component[k][i]
for i in range(3)
]
else:
aero_total[k] = aero_total[k] + aero_component[k]
##### Add in the induced drag
Q = self.op_point.dynamic_pressure()
y_span_effective_squared = softmax_scalefree([
comp["y_span_effective"] ** 2 * comp["oswalds_efficiency"]
for comp in aero_components
])
z_span_effective_squared = softmax_scalefree([
comp["z_span_effective"] ** 2 * comp["oswalds_efficiency"]
for comp in aero_components
])
_, sideforce, lift = self.op_point.convert_axes(
*aero_total['F_g'],
from_axes="geometry",
to_axes="wind"
)
D_induced = (
lift ** 2 / (Q * np.pi * y_span_effective_squared + 1e-100) +
sideforce ** 2 / (Q * np.pi * z_span_effective_squared + 1e-100)
)
D_induced_g = self.op_point.convert_axes(
-D_induced, 0, 0,
from_axes="wind",
to_axes="geometry"
)
for i in range(3):
aero_total['F_g'][i] += D_induced_g[i]
##### Add in other metrics
aero_total["F_b"] = self.op_point.convert_axes(
*aero_total["F_g"],
from_axes="geometry",
to_axes="body"
)
aero_total["F_w"] = self.op_point.convert_axes(
*aero_total["F_g"],
from_axes="geometry",
to_axes="wind"
)
aero_total["L"] = -aero_total["F_w"][2]
aero_total["Y"] = aero_total["F_w"][1]
aero_total["D"] = -aero_total["F_w"][0]
aero_total["l_b"] = aero_total["M_b"][0]
aero_total["m_b"] = aero_total["M_b"][1]
aero_total["n_b"] = aero_total["M_b"][2]
##### Compute dimensionalization factor
if self.airplane.s_ref is not None:
qS = self.op_point.dynamic_pressure() * self.airplane.s_ref
c = self.airplane.c_ref
b = self.airplane.b_ref
else:
raise ValueError(
"Airplane must have a reference area and length attributes.\n"
"(`Airplane.s_ref`, `Airplane.c_ref`, `Airplane.b_ref`)"
)
##### Add nondimensional forces, and nondimensional quantities.
aero_total["CL"] = aero_total["L"] / qS
aero_total["CY"] = aero_total["Y"] / qS
aero_total["CD"] = aero_total["D"] / qS
aero_total["Cl"] = aero_total["l_b"] / qS / b
aero_total["Cm"] = aero_total["m_b"] / qS / c
aero_total["Cn"] = aero_total["n_b"] / qS / b
self.output = aero_total
return aero_total
def run_with_stability_derivatives(self,
alpha=True,
beta=True,
p=True,
q=True,
r=True,
):
"""
Computes the aerodynamic forces and moments on the airplane, and the stability derivatives.
Arguments essentially determine which stability derivatives are computed. If a stability derivative is not
needed, leaving it False will speed up the computation.
Args:
- alpha (bool): If True, compute the stability derivatives with respect to the angle of attack (alpha).
- beta (bool): If True, compute the stability derivatives with respect to the sideslip angle (beta).
- p (bool): If True, compute the stability derivatives with respect to the body-axis roll rate (p).
- q (bool): If True, compute the stability derivatives with respect to the body-axis pitch rate (q).
- r (bool): If True, compute the stability derivatives with respect to the body-axis yaw rate (r).
Returns: a dictionary with keys:
- 'F_g' : an [x, y, z] list of forces in geometry axes [N]
- 'F_b' : an [x, y, z] list of forces in body axes [N]
- 'F_w' : an [x, y, z] list of forces in wind axes [N]
- 'M_g' : an [x, y, z] list of moments about geometry axes [Nm]
- 'M_b' : an [x, y, z] list of moments about body axes [Nm]
- 'M_w' : an [x, y, z] list of moments about wind axes [Nm]
- 'L' : the lift force [N]. Definitionally, this is in wind axes.
- 'Y' : the side force [N]. This is in wind axes.
- 'D' : the drag force [N]. Definitionally, this is in wind axes.
- 'l_b', the rolling moment, in body axes [Nm]. Positive is roll-right.
- 'm_b', the pitching moment, in body axes [Nm]. Positive is pitch-up.
- 'n_b', the yawing moment, in body axes [Nm]. Positive is nose-right.
- 'CL', the lift coefficient [-]. Definitionally, this is in wind axes.
- 'CY', the sideforce coefficient [-]. This is in wind axes.
- 'CD', the drag coefficient [-]. Definitionally, this is in wind axes.
- 'Cl', the rolling coefficient [-], in body axes
- 'Cm', the pitching coefficient [-], in body axes
- 'Cn', the yawing coefficient [-], in body axes
Along with additional keys, depending on the value of the `alpha`, `beta`, `p`, `q`, and `r` arguments. For
example, if `alpha=True`, then the following additional keys will be present:
- 'CLa', the lift coefficient derivative with respect to alpha [1/rad]
- 'CDa', the drag coefficient derivative with respect to alpha [1/rad]
- 'CYa', the sideforce coefficient derivative with respect to alpha [1/rad]
- 'Cla', the rolling moment coefficient derivative with respect to alpha [1/rad]
- 'Cma', the pitching moment coefficient derivative with respect to alpha [1/rad]
- 'Cna', the yawing moment coefficient derivative with respect to alpha [1/rad]
- 'x_np', the neutral point location in the x direction [m]
Nondimensional values are nondimensionalized using reference values in the AeroBuildup.airplane object.
Data types:
- The "L", "Y", "D", "l_b", "m_b", "n_b", "CL", "CY", "CD", "Cl", "Cm", and "Cn" keys are:
- floats if the OperatingPoint object is not vectorized (i.e., if all attributes of OperatingPoint
are floats, not arrays).
- arrays if the OperatingPoint object is vectorized (i.e., if any attribute of OperatingPoint is an
array).
- The "F_g", "F_b", "F_w", "M_g", "M_b", and "M_w" keys are always lists, which will contain either
floats or arrays, again depending on whether the OperatingPoint object is vectorized or not.
"""
abbreviations = {
"alpha": "a",
"beta" : "b",
"p" : "p",
"q" : "q",
"r" : "r",
}
finite_difference_amounts = {
"alpha": 0.001,
"beta" : 0.001,
"p" : 0.001 * (2 * self.op_point.velocity) / self.airplane.b_ref,
"q" : 0.001 * (2 * self.op_point.velocity) / self.airplane.c_ref,
"r" : 0.001 * (2 * self.op_point.velocity) / self.airplane.b_ref,
}
scaling_factors = {
"alpha": np.degrees(1),
"beta" : np.degrees(1),
"p" : (2 * self.op_point.velocity) / self.airplane.b_ref,
"q" : (2 * self.op_point.velocity) / self.airplane.c_ref,
"r" : (2 * self.op_point.velocity) / self.airplane.b_ref,
}
original_op_point = self.op_point
# Compute the point analysis, which returns a dictionary that we will later add key:value pairs to.
run_base = self.run()
# Note for the loops below: here, "derivative numerator" and "... denominator" refer to the quantity being
# differentiated and the variable of differentiation, respectively. In other words, in the expression df/dx,
# the "numerator" is f, and the "denominator" is x. I realize that this would make a mathematician cry (as a
# partial derivative is not a fraction), but the reality is that there seems to be no commonly-accepted name
# for these terms. (Curiously, this contrasts with integration, where there is an "integrand" and a "variable
# of integration".)
for derivative_denominator in abbreviations.keys():
if not locals()[derivative_denominator]: # Basically, if the parameter from the function input is not True,
continue # Skip this run.
# This way, you can (optionally) speed up this routine if you only need static derivatives,
# or longitudinal derivatives, etc.
# These lines make a copy of the original operating point, incremented by the finite difference amount
# along the variable defined by derivative_denominator.
incremented_op_point = copy.copy(original_op_point)
incremented_op_point.__setattr__(
derivative_denominator,
original_op_point.__getattribute__(derivative_denominator) + finite_difference_amounts[
derivative_denominator]
)
aerobuildup_incremented = copy.copy(self)
aerobuildup_incremented.op_point = incremented_op_point
run_incremented = aerobuildup_incremented.run()
for derivative_numerator in [
"CL",
"CD",
"CY",
"Cl",
"Cm",
"Cn",
]:
derivative_name = derivative_numerator + abbreviations[derivative_denominator] # Gives "CLa"
run_base[derivative_name] = (
( # Finite-difference out the derivatives
run_incremented[derivative_numerator] - run_base[
derivative_numerator]
) / finite_difference_amounts[derivative_denominator]
* scaling_factors[derivative_denominator]
)
### Try to compute and append neutral point, if possible
if derivative_denominator == "alpha":
run_base["x_np"] = self.xyz_ref[0] - (
run_base["Cma"] * (self.airplane.c_ref / run_base["CLa"])
)
if derivative_denominator == "beta":
run_base["x_np_lateral"] = self.xyz_ref[0] - (
run_base["Cnb"] * (self.airplane.b_ref / run_base["CYb"])
)
return run_base
def wing_aerodynamics(self,
wing: Wing,
include_induced_drag: bool = True,
) -> Dict[str, Any]:
"""
Estimates the aerodynamic forces, moments, and derivatives on a wing in isolation.
Moments are given with the reference at Wing [0, 0, 0].
Args:
wing: A Wing object that you wish to analyze.
op_point: The OperatingPoint that you wish to analyze the fuselage at.
Returns:
"""
##### Alias a few things for convenience
op_point = self.op_point
# wing_options = self.get_options(wing) # currently no wing options
##### Compute general wing properties
wing_MAC = wing.mean_aerodynamic_chord()
wing_taper = wing.taper_ratio()
wing_sweep = wing.mean_sweep_angle()
wing_dihedral = wing.mean_dihedral_angle()
###
# y_span_effective = wing.span(
# type="y",
# include_centerline_distance=True,
# )
# z_span_effective = wing.span(
# type="z",
# include_centerline_distance=True,
# )
#
# AR_with_center = wing.aspect_ratio(type="effective")
# AR_without_center = wing.aspect_ratio(type="geometric")
if wing.symmetric:
# AR_effective = AR_without_center + (AR_with_center - AR_without_center) * np.maximum(np.cosd(wing_dihedral),
# 0)
# # Approximately accounts for Trefftz-pane wake continuity.
#
# AR_geometric = AR_without_center
span_0_dihedral = wing.span(include_centerline_distance=True)
span_90_dihedral = wing.span(include_centerline_distance=False) * 0.5
area_0_dihedral = wing.area(include_centerline_distance=True)
area_90_dihedral = wing.area(include_centerline_distance=False) * 0.5
dihedral_factor = np.sind(wing_dihedral) ** 2
span_effective = (
span_0_dihedral +
(span_90_dihedral - span_0_dihedral) * dihedral_factor
)
area_effective = (
area_0_dihedral +
(area_90_dihedral - area_0_dihedral) * dihedral_factor
)
y_span_effective = wing.span(type="y", include_centerline_distance=True)
z_span_effective = wing.span(type="z", include_centerline_distance=False) / 2 ** 0.6
# Note: The 0.6 constant is tuned from calibration to VLM experiment.
else:
# AR_effective = AR_without_center
# AR_geometric = AR_without_center
y_span_effective = wing.span(type="y", include_centerline_distance=False)
z_span_effective = wing.span(type="z", include_centerline_distance=False)
span_effective = wing.span(type="yz", include_centerline_distance=False)
area_effective = wing.area(type="planform", include_centerline_distance=False)
AR_effective = span_effective ** 2 / area_effective
mach = op_point.mach()
# mach_normal = mach * np.cosd(sweep)
AR_3D_factor = aerolib.CL_over_Cl(
aspect_ratio=AR_effective,
mach=mach,
sweep=wing_sweep,
Cl_is_compressible=True
)
oswalds_efficiency = aerolib.oswalds_efficiency(
taper_ratio=wing_taper,
aspect_ratio=AR_effective,
sweep=wing_sweep,
fuselage_diameter_to_span_ratio=0 # an assumption
)
areas = wing.area(_sectional=True)
aerodynamic_centers = wing.aerodynamic_center(_sectional=True)
### Model for the neutral point movement due to lifting-line unsweep near centerline
# See /studies/AeroBuildup_LL_unsweep_calibration
a = AR_effective / (AR_effective + 2)
s = np.radians(wing_sweep)
t = np.exp(-wing_taper)
neutral_point_deviation_due_to_unsweep = -(
((((3.557726 ** (a ** 2.8443985)) * ((((s * a) + (t * 1.9149417)) + -1.4449639) * s)) + (
a + -0.89228547)) * -0.16073418)
) * wing_MAC
aerodynamic_centers = [
ac + np.array([neutral_point_deviation_due_to_unsweep, 0, 0])
for ac in aerodynamic_centers
]
xsec_quarter_chords = [
wing._compute_xyz_of_WingXSec(
index=i,
x_nondim=0.25,
z_nondim=0,
)
for i in range(len(wing.xsecs))
]
def compute_section_aerodynamics(
sect_id: int,
mirror_across_XZ: bool = False
):
"""
Computes the forces and moments about self.xyz_ref on a given wing section.
Args:
sect_id: Wing section id. An int that can be from 0 to len(wing.xsecs) - 2.
mirror_across_XZ: Boolean. If true, computes the forces and moments for the section that is mirrored across the XZ plane.
Returns: Forces and moments, in a `(F_g, M_g)` tuple, where `F_g` and `M_g` have the following formats:
F_g: a [Fx, Fy, Fz] list, given in geometry (`_g`) axes.
M_g: a [Mx, My, Mz] list, given in geometry (`_g`) axes. Moment reference is `AeroBuildup.xyz_ref`.
"""
##### Identify the wing cross sections adjacent to this wing section.
xsec_a = wing.xsecs[sect_id]
xsec_b = wing.xsecs[sect_id + 1]
##### When linearly interpolating, weight things by the relative chord.
a_weight = xsec_a.chord / (xsec_a.chord + xsec_b.chord)
b_weight = xsec_b.chord / (xsec_a.chord + xsec_b.chord)
mean_chord = (xsec_a.chord + xsec_b.chord) / 2
##### Compute the local frame of this section.
xg_local, yg_local, zg_local = wing._compute_frame_of_section(sect_id)
xg_local = [xg_local[0], xg_local[1], xg_local[2]] # convert it to a list
yg_local = [yg_local[0], yg_local[1], yg_local[2]] # convert it to a list
zg_local = [zg_local[0], zg_local[1], zg_local[2]] # convert it to a list
if mirror_across_XZ:
xg_local[1] *= -1
yg_local[1] *= -1
zg_local[1] *= -1 # Note: if mirrored, this results in a left-handed coordinate system.
##### Compute the moment arm from the section AC
sect_AC_raw = aerodynamic_centers[sect_id]
if mirror_across_XZ:
sect_AC_raw[1] *= -1
sect_AC = [
sect_AC_raw[i] - self.xyz_ref[i]
for i in range(3)
]
##### Compute the generalized angle of attack, which is the geometric alpha that the wing section "sees".
vel_vector_g_from_freestream = op_point.convert_axes( # Points backwards (with relative wind)
x_from=-op_point.velocity, y_from=0, z_from=0,
from_axes="wind",
to_axes="geometry"
)
vel_vector_g_from_rotation = np.cross(
sect_AC,
op_point.convert_axes(
op_point.p, op_point.q, op_point.r,
from_axes="body",
to_axes="geometry"
),
manual=True
)
vel_vector_g = [
vel_vector_g_from_freestream[i] + vel_vector_g_from_rotation[i]
for i in range(3)
]
vel_mag_g = np.sqrt(sum([comp ** 2 for comp in vel_vector_g]))
vel_dir_g = [
vel_vector_g[i] / vel_mag_g
for i in range(3)
]
vel_dot_x = np.dot(vel_dir_g, xg_local, manual=True)
vel_dot_z = np.dot(vel_dir_g, zg_local, manual=True)
# alpha_generalized = 90 - np.arccosd(np.clip(vel_dot_z, -1, 1)) # In range (-90 to 90)
alpha_generalized = np.where(
vel_dot_x > 0,
90 - np.arccosd(np.clip(vel_dot_z, -1, 1)), # In range (-90 to 90)
90 + np.arccosd(np.clip(vel_dot_z, -1, 1)) # In range (90 to 270)
)
##### Compute the effective generalized angle of attack, which roughly accounts for self-downwash
# effects (e.g., finite-wing effects on lift curve slope). Despite this being a tuned heuristic,
# it is surprisingly accurate! (<20% lift coefficient error against wind tunnel experiment, even at as
# low as AR = 0.5.)
alpha_generalized_effective = (
alpha_generalized -
(1 - AR_3D_factor ** 0.8) * np.sind(2 * alpha_generalized) / 2 * (180 / np.pi)
# TODO: "center" this scaling around alpha = alpha_{airfoil, Cl=0}, not around alpha = 0.
# TODO Can estimate airfoil's alpha_{Cl=0} by camber + thin airfoil theory + viscous decambering knockdown.
) # Models finite-wing increase in alpha_{CL_max}.
##### Compute sweep angle
xsec_a_quarter_chord = xsec_quarter_chords[sect_id]
xsec_b_quarter_chord = xsec_quarter_chords[sect_id + 1]
quarter_chord_vector_g = xsec_b_quarter_chord - xsec_a_quarter_chord
quarter_chord_dir_g = quarter_chord_vector_g / np.linalg.norm(quarter_chord_vector_g)
quarter_chord_dir_g = [ # Convert to list
quarter_chord_dir_g[i]
for i in range(3)
]
vel_dir_dot_quarter_chord_dir = np.dot(
vel_dir_g,
quarter_chord_dir_g,
manual=True
)
sweep_rad = np.arcsin(vel_dir_dot_quarter_chord_dir)
##### Compute Reynolds numbers
Re_a = op_point.reynolds(xsec_a.chord)
Re_b = op_point.reynolds(xsec_b.chord)
##### Compute Mach numbers
mach_normal = mach * np.cos(sweep_rad)
##### Compute effective alpha due to control surface deflections
symmetry_treated_control_surfaces = []
for surf in xsec_a.control_surfaces:
if mirror_across_XZ and not surf.symmetric:
surf = surf.copy()
surf.deflection = -surf.deflection
symmetry_treated_control_surfaces.append(surf)
##### Compute sectional lift at cross-sections using lookup functions. Merge them linearly to get section CL.
kwargs = dict(
model_size="medium",
alpha=alpha_generalized_effective,
mach=mach_normal,
control_surfaces=symmetry_treated_control_surfaces,
control_surface_strategy="polar_modification"
)
xsec_a_airfoil_aero = xsec_a.airfoil.get_aero_from_neuralfoil(
Re=Re_a,
**kwargs
)
xsec_b_airfoil_aero = xsec_b.airfoil.get_aero_from_neuralfoil(
Re=Re_b,
**kwargs
)
xsec_a_Cl = xsec_a_airfoil_aero["CL"]
xsec_b_Cl = xsec_b_airfoil_aero["CL"]
sect_CL = (
xsec_a_Cl * a_weight +
xsec_b_Cl * b_weight
) * AR_3D_factor ** 0.2 # Models slight decrease in finite-wing CL_max.
##### Compute sectional drag at cross-sections using lookup functions. Merge them linearly to get section CD.
xsec_a_Cdp = xsec_a_airfoil_aero["CD"]
xsec_b_Cdp = xsec_b_airfoil_aero["CD"]
sect_CDp = (
(
xsec_a_Cdp * a_weight +
xsec_b_Cdp * b_weight
)
)
##### Compute sectional moment at cross-sections using lookup functions. Merge them linearly to get section CM.
xsec_a_Cm = xsec_a_airfoil_aero["CM"]
xsec_b_Cm = xsec_b_airfoil_aero["CM"]
sect_CM = (
xsec_a_Cm * a_weight +
xsec_b_Cm * b_weight
)
##### Compute induced drag from local CL and full-wing properties (AR, e)
if include_induced_drag:
sect_CDi = (
sect_CL ** 2 / (np.pi * AR_effective * oswalds_efficiency)
)
sect_CD = sect_CDp + sect_CDi
else:
sect_CD = sect_CDp
##### Go to dimensional quantities using the area.
area = areas[sect_id]
q_local = 0.5 * op_point.atmosphere.density() * vel_mag_g ** 2
sect_L = q_local * area * sect_CL
sect_D = q_local * area * sect_CD
sect_M = q_local * area * sect_CM * mean_chord
##### Compute the direction of the lift by projecting the section's normal vector into the plane orthogonal to the local freestream.
L_direction_g_unnormalized = [
zg_local[i] - vel_dot_z * vel_dir_g[i]
for i in range(3)
]
L_direction_g_unnormalized = [ # Handles the 90 degree to 270 degree cases
np.where(
vel_dot_x > 0,
L_direction_g_unnormalized[i],
-1 * L_direction_g_unnormalized[i],
)
for i in range(3)
]
L_direction_g_mag = np.sqrt(sum([comp ** 2 for comp in L_direction_g_unnormalized]))
L_direction_g = [
L_direction_g_unnormalized[i] / L_direction_g_mag
for i in range(3)
]
##### Compute the direction of the drag by aligning the drag vector with the freestream vector.
D_direction_g = vel_dir_g
##### Compute the force vector in geometry axes.
sect_F_g = [
sect_L * L_direction_g[i] + sect_D * D_direction_g[i]
for i in range(3)
]
##### Compute the moment vector in geometry axes.
M_g_lift = np.cross(
sect_AC,
sect_F_g,
manual=True
)
M_direction_g = np.cross(L_direction_g, D_direction_g, manual=True)
M_g_pitching_moment = [
M_direction_g[i] * sect_M
for i in range(3)
]
sect_M_g = [
M_g_lift[i] + M_g_pitching_moment[i]
for i in range(3)
]
return sect_F_g, sect_M_g
##### Iterate through all sections and add up all forces/moments.
F_g = [0., 0., 0.]
M_g = [0., 0., 0.]
for sect_id in range(len(wing.xsecs) - 1):
sect_F_g, sect_M_g = compute_section_aerodynamics(sect_id=sect_id)
for i in range(3):
F_g[i] += sect_F_g[i]
M_g[i] += sect_M_g[i]
if wing.symmetric:
sect_F_g, sect_M_g = compute_section_aerodynamics(sect_id=sect_id, mirror_across_XZ=True)
for i in range(3):
F_g[i] += sect_F_g[i]
M_g[i] += sect_M_g[i]
##### Convert F_g and M_g to body and wind axes for reporting.
F_b = op_point.convert_axes(*F_g, from_axes="geometry", to_axes="body")
F_w = op_point.convert_axes(*F_b, from_axes="body", to_axes="wind")
M_b = op_point.convert_axes(*M_g, from_axes="geometry", to_axes="body")
M_w = op_point.convert_axes(*M_b, from_axes="body", to_axes="wind")
return {
"F_g" : F_g,
"F_b" : F_b,
"F_w" : F_w,
"M_g" : M_g,
"M_b" : M_b,
"M_w" : M_w,
"L" : -F_w[2],
"Y" : F_w[1],
"D" : -F_w[0],
"l_b" : M_b[0],
"m_b" : M_b[1],
"n_b" : M_b[2],
"y_span_effective" : y_span_effective,
"z_span_effective" : z_span_effective,
"oswalds_efficiency": oswalds_efficiency
}
def fuselage_aerodynamics(self,
fuselage: Fuselage,
include_induced_drag: bool = True
) -> Dict[str, Any]:
"""
Estimates the aerodynamic forces, moments, and derivatives on a fuselage in isolation.
Assumes:
* The fuselage is a body of revolution aligned with the x_b axis.
* The angle between the nose and the freestream is less than 90 degrees.
Moments are given with the reference at Fuselage [0, 0, 0].
Uses methods from Jorgensen, Leland Howard. "Prediction of Static Aerodynamic Characteristics for Slender Bodies
Alone and with Lifting Surfaces to Very High Angles of Attack". NASA TR R-474. 1977.
Args:
fuselage: A Fuselage object that you wish to analyze.
Returns:
"""
##### Alias a few things for convenience
op_point = self.op_point
length = fuselage.length()
Re = op_point.reynolds(reference_length=length)
fuse_options = self.get_options(fuselage)
##### Compute general fuselage properties
q = op_point.dynamic_pressure()
eta = jorgensen_eta(fuselage.fineness_ratio())
volume = fuselage.volume()
y_span_effective = softmax_scalefree(
[
xsec.width
for xsec in fuselage.xsecs
],
)
z_span_effective = softmax_scalefree(
[
xsec.height
for xsec in fuselage.xsecs
],
)
def compute_section_aerodynamics(
sect_id: int,
):
##### Identify the fuselage cross sections adjacent to this fuselage section.
xsec_a = fuselage.xsecs[sect_id]
xsec_b = fuselage.xsecs[sect_id + 1]
### Some metrics, like effective force location, are area-weighted. Here, we compute those weights.
r_a = xsec_a.equivalent_radius(
preserve="area") # TODO modify AeroBuildup for improved accuracy on non-circular fuses
r_b = xsec_b.equivalent_radius(preserve="area")
xyz_a = xsec_a.xyz_c
xyz_b = xsec_b.xyz_c
area_a = xsec_a.xsec_area()
area_b = xsec_b.xsec_area()
total_area = area_a + area_b
a_weight = area_a / total_area
b_weight = area_b / total_area
mean_geometric_radius = (r_a + r_b) / 2
mean_aerodynamic_radius = r_a * a_weight + r_b * b_weight
### Compute the key geometric properties of the centerline between the two sections.
sect_length = np.sqrt(sum([(xyz_b[i] - xyz_a[i]) ** 2 for i in range(3)]))
xg_local = [
np.where(
sect_length != 0,
(xyz_b[i] - xyz_a[i]) / (sect_length + 1e-100),
1 if i == 0 else 0 # Default to [1, 0, 0]
)
for i in range(3)
]
##### Compute the moment arm from the section AC
sect_AC = [
(xyz_a[i] + xyz_b[i]) / 2 - self.xyz_ref[i]
for i in range(3)
]
##### Compute the generalized angle of attack that the section sees
vel_direction_g = op_point.convert_axes(-1, 0, 0, from_axes="wind", to_axes="geometry")
vel_dot_x = np.dot(
vel_direction_g,
xg_local,
manual=True
)
def soft_norm(xyz):
return (
sum([comp ** 2 for comp in xyz])
+ 1e-100 # Keeps the derivative from NaNing
) ** 0.5
generalized_alpha = 2 * np.arctan2d(
soft_norm([vel_direction_g[i] - xg_local[i] for i in range(3)]),
soft_norm([vel_direction_g[i] + xg_local[i] for i in range(3)])
)
sin_generalized_alpha = np.sind(generalized_alpha)
##### Compute the normal-force and axial-force directions
normal_direction_g_unnormalized = [
vel_direction_g[i] - vel_dot_x * xg_local[i]
for i in range(3)
]
normal_direction_g_unnormalized[2] += 1e-100 # A hack that prevents NaN for 0-AoA case.
normal_direction_g_mag = np.sqrt(sum([comp ** 2 for comp in normal_direction_g_unnormalized]))
normal_direction_g = [
normal_direction_g_unnormalized[i] / normal_direction_g_mag
for i in range(3)
]
axial_direction_g = xg_local
##### Inviscid Forces
### Jorgensen model
### Note the (N)ormal, (A)ligned coordinate system. (See Jorgensen for definitions.)
force_potential_flow = q * ( # From Munk, via Jorgensen
np.sind(2 * generalized_alpha) *
(area_b - area_a)
) # Matches Drela, Flight Vehicle Aerodynamics Eqn. 6.75 in the small-alpha limit.
# Note that no delta_x should be here; dA/dx * dx = dA.
# Make the direction of the force perpendicular to the velocity vector
force_potential_flow_g = op_point.convert_axes(
0, 0, -force_potential_flow,
from_axes="wind",
to_axes="geometry",
)
##### Viscous Forces
### Jorgensen model
Re_n = sin_generalized_alpha * op_point.reynolds(reference_length=2 * mean_aerodynamic_radius)
M_n = sin_generalized_alpha * op_point.mach()
C_d_n = np.where(
Re_n != 0,
aerolib.Cd_cylinder(
Re_D=Re_n,
mach=M_n
), # Replace with 1.20 from Jorgensen Table 1 if this isn't working well
0,
)
force_viscous_crossflow = sect_length * q * (
2 * eta * C_d_n *
sin_generalized_alpha ** 2 *
mean_geometric_radius
)
##### Viscous crossflow acts exactly normal to fuselage section axis, definitionally.
# (Axial viscous forces accounted for on a total-body basis)
force_viscous_crossflow_g = [
force_viscous_crossflow * normal_direction_g[i]
for i in range(3)
]
##### Compute the force vector in geometry axes
sect_F_g = [
force_potential_flow_g[i] + force_viscous_crossflow_g[i]
for i in range(3)
]
##### Compute the moment vector in geometry axes.
sect_M_g = np.cross(
sect_AC,
sect_F_g,
manual=True
)
return sect_F_g, sect_M_g
##### Iterate through all sections and add up all forces/moments.
F_g = [0., 0., 0.]
M_g = [0., 0., 0.]
for sect_id in range(len(fuselage.xsecs) - 1):
sect_F_g, sect_M_g = compute_section_aerodynamics(sect_id=sect_id)
for i in range(3):
F_g[i] += sect_F_g[i]
M_g[i] += sect_M_g[i]
##### Add in profile drag: viscous drag forces and wave drag forces
### Base Drag
base_drag_coefficient = fuselage_base_drag_coefficient(mach=op_point.mach())
D_base = base_drag_coefficient * fuselage.area_base() * q
### Skin friction drag
form_factor = fuselage_form_factor(
fineness_ratio=fuselage.fineness_ratio(),
ratio_of_corner_radius_to_body_width=0.5
)
C_f_ideal = (
# From the same study as the `fuselage_form_factor` function above. This is done on purpose
# as the form factor in this particular paper is a fit that correlates best using this precise
# definition of C_f_ideal.
3.46 * np.log10(Re) - 5.6
) ** -2
C_f = C_f_ideal * form_factor
D_skin = C_f * fuselage.area_wetted() * q
### Wave drag
S_ref = 1 # Does not matter here, just for accounting.
if self.include_wave_drag:
sears_haack_drag_area = transonic.sears_haack_drag_from_volume(
volume=fuselage.volume(),
length=fuselage.length()
) # Units of area
sears_haack_C_D_wave = sears_haack_drag_area / S_ref
C_D_wave = transonic.approximate_CD_wave(
mach=op_point.mach(),
mach_crit=critical_mach(
fineness_ratio_nose=fuse_options["nose_fineness_ratio"]
),
CD_wave_at_fully_supersonic=fuse_options["E_wave_drag"] * sears_haack_C_D_wave,
)
else:
C_D_wave = 0
D_wave = C_D_wave * q * S_ref
### Sum up the profile drag
D_profile = D_base + D_skin + D_wave
D_profile_g = op_point.convert_axes(
-D_profile, 0, 0,
from_axes="wind",
to_axes="geometry",
)
drag_moment_arm = [
fuselage.xsecs[-1].xyz_c[i] - self.xyz_ref[i] # TODO make this act at centroid
for i in range(3)
]
M_g_from_D_profile = np.cross(
drag_moment_arm,
D_profile_g,
manual=True
)
for i in range(3):
F_g[i] += D_profile_g[i]
M_g[i] += M_g_from_D_profile[i]
### Compute the induced drag, if relevant
if include_induced_drag:
_, sideforce, lift = op_point.convert_axes(
*F_g,
from_axes="geometry",
to_axes="wind"
)
D_induced = (
lift ** 2 / (op_point.dynamic_pressure() * np.pi * y_span_effective ** 2) +
sideforce ** 2 / (op_point.dynamic_pressure() * np.pi * z_span_effective ** 2)
)
D_induced_g = op_point.convert_axes(
-D_induced, 0, 0,
from_axes="wind",
to_axes="geometry",
)
for i in range(3):
F_g[i] += D_induced_g[i]
##### Convert F_g and M_g to body and wind axes for reporting.
F_b = op_point.convert_axes(*F_g, from_axes="geometry", to_axes="body")
F_w = op_point.convert_axes(*F_b, from_axes="body", to_axes="wind")
M_b = op_point.convert_axes(*M_g, from_axes="geometry", to_axes="body")
M_w = op_point.convert_axes(*M_b, from_axes="body", to_axes="wind")
return {
"F_g" : F_g,
"F_b" : F_b,
"F_w" : F_w,
"M_g" : M_g,
"M_b" : M_b,
"M_w" : M_w,
"L" : -F_w[2],
"Y" : F_w[1],
"D" : -F_w[0],
"l_b" : M_b[0],
"m_b" : M_b[1],
"n_b" : M_b[2],
"y_span_effective" : y_span_effective,
"z_span_effective" : z_span_effective,
"oswalds_efficiency": 0.95,
}
if __name__ == '__main__':
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.conventional import airplane
import os
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
aero = AeroBuildup(
airplane=airplane,
op_point=OperatingPoint(alpha=0, beta=1),
).run()
fig, ax = plt.subplots(2, 2)
alpha = np.linspace(-20, 20, 1000)
aero = AeroBuildup(
airplane=airplane,
op_point=OperatingPoint(
velocity=10,
alpha=alpha,
beta=0
),
).run()
plt.sca(ax[0, 0])
plt.plot(alpha, aero["CL"])
plt.xlabel(r"$\alpha$ [deg]")
plt.ylabel(r"$C_L$")
p.set_ticks(5, 1, 0.5, 0.1)
plt.sca(ax[0, 1])
plt.plot(alpha, aero["CD"])
plt.xlabel(r"$\alpha$ [deg]")
plt.ylabel(r"$C_D$")
p.set_ticks(5, 1, 0.05, 0.01)
plt.ylim(bottom=0)
plt.sca(ax[1, 0])
plt.plot(alpha, aero["Cm"])
plt.xlabel(r"$\alpha$ [deg]")
plt.ylabel(r"$C_m$")
p.set_ticks(5, 1, 0.5, 0.1)
plt.sca(ax[1, 1])
plt.plot(alpha, aero["CL"] / aero["CD"])
plt.xlabel(r"$\alpha$ [deg]")
plt.ylabel(r"$C_L/C_D$")
p.set_ticks(5, 1, 10, 2)
p.show_plot(
"`asb.AeroBuildup` Aircraft Aerodynamics"
)
Beta, Alpha = np.meshgrid(np.linspace(-90, 90, 200), np.linspace(-90, 90, 200))
aero = AeroBuildup(
airplane=airplane,
op_point=OperatingPoint(
velocity=10,
alpha=Alpha.flatten(),
beta=Beta.flatten()
),
).run()
def show():
p.set_ticks(15, 5, 15, 5)
p.equal()
p.show_plot(
"`asb.AeroBuildup` Aircraft Aerodynamics",
r"Sideslip angle $\beta$ [deg]",
r"Angle of Attack $\alpha$ [deg]"
)
fig, ax = plt.subplots(figsize=(6, 5))
p.contour(
Beta, Alpha, aero["CL"].reshape(Alpha.shape),
colorbar_label="Lift Coefficient $C_L$ [-]",
linelabels_format=lambda x: f"{x:.2f}",
linelabels_fontsize=7,
cmap="RdBu",
alpha=0.6
)
plt.clim(*np.array([-1, 1]) * np.max(np.abs(aero["CL"])))
show()
fig, ax = plt.subplots(figsize=(6, 5))
p.contour(
Beta, Alpha, aero["CD"].reshape(Alpha.shape),
colorbar_label="Drag Coefficient $C_D$ [-]",
linelabels_format=lambda x: f"{x:.2f}",
linelabels_fontsize=7,
z_log_scale=True,
cmap="YlOrRd",
alpha=0.6
)
show()
fig, ax = plt.subplots(figsize=(6, 5))
p.contour(
Beta, Alpha, (aero["CL"] / aero["CD"]).reshape(Alpha.shape),
levels=15,
colorbar_label="$C_L / C_D$ [-]",
linelabels_format=lambda x: f"{x:.0f}",
linelabels_fontsize=7,
cmap="RdBu",
alpha=0.6
)
plt.clim(*np.array([-1, 1]) * np.max(np.abs(aero["CL"] / aero["CD"])))
show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/aero_buildup.py | aero_buildup.py |
from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airplane, Wing, WingXSec, Fuselage, ControlSurface
from aerosandbox.performance import OperatingPoint
from typing import Union, List, Dict, Any
import copy
import tempfile
import warnings
class AVL(ExplicitAnalysis):
"""
An interface to AVL, a 3D vortex lattice aerodynamics code developed by Mark Drela at MIT.
Requires AVL to be on your computer; AVL is available here: https://web.mit.edu/drela/Public/web/avl/
It is recommended (but not required) that you add AVL to your system PATH environment variable such that it can
be called with the command `avl`. If this is not the case, you need to specify the path to your AVL
executable using the `avl_command` argument of the constructor.
Usage example:
>>> avl = asb.AVL(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>> )
>>> outputs = avl.run()
"""
default_analysis_specific_options = {
Airplane: dict(
profile_drag_coefficient=0
),
Wing : dict(
wing_level_spanwise_spacing=True,
spanwise_resolution=12,
spanwise_spacing="cosine",
chordwise_resolution=12,
chordwise_spacing="cosine",
component=None, # This is an int
no_wake=False,
no_alpha_beta=False,
no_load=False,
drag_polar=dict(
CL1=0,
CD1=0,
CL2=0,
CD2=0,
CL3=0,
CD3=0,
),
),
WingXSec: dict(
spanwise_resolution=12,
spanwise_spacing="cosine",
cl_alpha_factor=None, # This is a float
drag_polar=dict(
CL1=0,
CD1=0,
CL2=0,
CD2=0,
CL3=0,
CD3=0,
)
),
Fuselage: dict(
panel_resolution=24,
panel_spacing="cosine"
)
}
AVL_spacing_parameters = {
"uniform": 0,
"cosine" : 1,
"sine" : 2,
"-sine" : -2,
"equal" : 0, # "uniform" is preferred
}
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
xyz_ref: List[float] = None,
avl_command: str = "avl",
verbose: bool = False,
timeout: Union[float, int, None] = 5,
working_directory: str = None,
ground_effect: bool = False,
ground_effect_height: float = 0
):
"""
Interface to AVL.
Args:
airplane: The airplane object you wish to analyze.
op_point: The operating point you wish to analyze at.
avl_command: The command-line argument to call AVL.
* If AVL is on your system PATH, then you can just leave this as "avl".
* If AVL is not on your system PATH, then you should provide a filepath to the AVL executable.
Note that AVL is not on your PATH by default. To tell if AVL is on your system PATH, open up a
terminal and type "avl".
* If the AVL menu appears, it's on your PATH.
* If you get something like "'avl' is not recognized as an internal or external command..." or
"Command 'avl' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your AVL executable as a string.
To add AVL to your path, modify your system's environment variables. (Google how to do this for your OS.)
verbose: Controls whether or not AVL output is printed to command line.
timeout: Controls how long any individual AVL run is allowed to run before the
process is killed. Given in units of seconds. To disable timeout, set this to None.
working_directory: Controls which working directory is used for the AVL input and output files. By
default, this is set to a TemporaryDirectory that is deleted after the run. However, you can set it to
somewhere local for debugging purposes.
"""
super().__init__()
### Set defaults
if xyz_ref is None:
xyz_ref = airplane.xyz_ref
### Initialize
self.airplane = airplane
self.op_point = op_point
self.xyz_ref = xyz_ref
self.avl_command = avl_command
self.verbose = verbose
self.timeout = timeout
self.working_directory = working_directory
self.ground_effect = ground_effect
self.ground_effect_height = ground_effect_height
def __repr__(self):
return self.__class__.__name__ + "(\n\t" + "\n\t".join([
f"airplane={self.airplane}",
f"op_point={self.op_point}",
f"xyz_ref={self.xyz_ref}",
]) + "\n)"
def open_interactive(self) -> None:
"""
Opens a new terminal window and runs AVL interactively. This is useful for detailed analysis or debugging.
Returns: None
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
### Handle the airplane file
airplane_file = "airplane.avl"
self.write_avl(directory / airplane_file)
### Open up AVL
import sys, os
if sys.platform == "win32":
# Run AVL
print("Running AVL interactively in a new window, quit it to continue...")
command = f'cmd /k "{self.avl_command} {airplane_file}"'
process = subprocess.Popen(
command,
cwd=directory,
creationflags=subprocess.CREATE_NEW_CONSOLE,
)
process.wait()
else:
raise NotImplementedError(
"Ability to auto-launch interactive AVL sessions isn't yet implemented for non-Windows OSes."
)
def run(self,
run_command: str = None,
) -> Dict[str, float]:
"""
Private function to run AVL.
Args: run_command: A string with any AVL keystroke inputs that you'd like. By default, you start off within the OPER
menu. All of the inputs indicated in the constructor have been set already, but you can override them here (
for this run only) if you want.
Returns: A dictionary containing all of your results.
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Designate an intermediate file for file I/O
output_filename = "output.txt"
# Handle the airplane file
airplane_file = "airplane.avl"
self.write_avl(directory / airplane_file)
# Handle the run file
keystroke_file_contents = self._default_keystroke_file_contents()
if run_command is not None:
keystroke_file_contents += [run_command]
keystroke_file_contents += [
"x",
"st",
f"{output_filename}",
"o",
"",
"",
"quit"
]
keystrokes = "\n".join(keystroke_file_contents)
command = f'{self.avl_command} {airplane_file}'
### Execute
try:
proc = subprocess.Popen(
command,
cwd=directory,
stdin=subprocess.PIPE,
stdout=None if self.verbose else subprocess.DEVNULL,
stderr=None if self.verbose else subprocess.DEVNULL,
text=True,
# shell=True,
# timeout=self.timeout,
# check=True
)
outs, errs = proc.communicate(
input=keystrokes,
timeout=self.timeout
)
return_code = proc.poll()
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
warnings.warn(
"AVL run timed out!\n"
"If this was not expected, try increasing the `timeout` parameter\n"
"when you create this AeroSandbox AVL instance.",
stacklevel=2
)
##### Parse the output file
# Read the file
try:
with open(directory / output_filename, "r") as f:
output_data = f.read()
except FileNotFoundError:
raise FileNotFoundError(
"It appears AVL didn't produce an output file, probably because it crashed.\n"
"To troubleshoot, try some combination of the following:\n"
"\t - In the AVL constructor, verify that either AVL is on PATH or that the `avl_command` parameter is set.\n"
"\t - In the AVL constructor, run with `verbose=True`.\n"
"\t - In the AVL constructor, set the `working_directory` parameter to a known folder to see the AVL input and output files.\n"
"\t - In the AVL constructor, set the `timeout` parameter to a large number to see if AVL is just taking a long time to run.\n"
"\t - On Windows, use `avl.open_interactive()` to run AVL interactively in a new window.\n"
)
res = self.parse_unformatted_data_output(output_data, data_identifier=" =", overwrite=False)
##### Clean up results
for key_to_lowerize in ["Alpha", "Beta", "Mach"]:
res[key_to_lowerize.lower()] = res.pop(key_to_lowerize)
for key in list(res.keys()):
if "tot" in key:
res[key.replace("tot", "")] = res.pop(key)
##### Add in missing useful results
q = self.op_point.dynamic_pressure()
S = self.airplane.s_ref
b = self.airplane.b_ref
c = self.airplane.c_ref
res["p"] = res["pb/2V"] * (2 * self.op_point.velocity / b)
res["q"] = res["qc/2V"] * (2 * self.op_point.velocity / c)
res["r"] = res["rb/2V"] * (2 * self.op_point.velocity / b)
res["L"] = q * S * res["CL"]
res["Y"] = q * S * res["CY"]
res["D"] = q * S * res["CD"]
res["l_b"] = q * S * b * res["Cl"]
res["m_b"] = q * S * c * res["Cm"]
res["n_b"] = q * S * b * res["Cn"]
try:
res["Clb Cnr / Clr Cnb"] = res["Clb"] * res["Cnr"] / (res["Clr"] * res["Cnb"])
except ZeroDivisionError:
res["Clb Cnr / Clr Cnb"] = np.nan
res["F_w"] = [
-res["D"], res["Y"], -res["L"]
]
res["F_b"] = self.op_point.convert_axes(*res["F_w"], from_axes="wind", to_axes="body")
res["F_g"] = self.op_point.convert_axes(*res["F_b"], from_axes="body", to_axes="geometry")
res["M_b"] = [
res["l_b"], res["m_b"], res["n_b"]
]
res["M_g"] = self.op_point.convert_axes(*res["M_b"], from_axes="body", to_axes="geometry")
res["M_w"] = self.op_point.convert_axes(*res["M_b"], from_axes="body", to_axes="wind")
return res
def _default_keystroke_file_contents(self) -> List[str]:
run_file_contents = []
# Disable graphics
run_file_contents += [
"plop",
"g",
"",
]
# Enter oper mode
run_file_contents += [
"oper",
]
# Direct p, q, r to be in body axes, to match ASB convention
run_file_contents += [
"o",
"r",
"",
]
# Set parameters
run_file_contents += [
"m",
f"mn {float(self.op_point.mach())}",
f"v {float(self.op_point.velocity)}",
f"d {float(self.op_point.atmosphere.density())}",
"g 9.81",
""
]
# Set analysis state
p_bar = self.op_point.p * self.airplane.b_ref / (2 * self.op_point.velocity)
q_bar = self.op_point.q * self.airplane.c_ref / (2 * self.op_point.velocity)
r_bar = self.op_point.r * self.airplane.b_ref / (2 * self.op_point.velocity)
run_file_contents += [
f"a a {float(self.op_point.alpha)}",
f"b b {float(self.op_point.beta)}",
f"r r {float(p_bar)}",
f"p p {float(q_bar)}",
f"y y {float(r_bar)}"
]
# Set control surface deflections
run_file_contents += [
f"d1 d1 1"
]
return run_file_contents
def write_avl(self,
filepath: Union[Path, str] = None,
) -> None:
"""
Writes a .avl file corresponding to this airplane to a filepath.
For use with the AVL vortex-lattice-method aerodynamics analysis tool by Mark Drela at MIT.
AVL is available here: https://web.mit.edu/drela/Public/web/avl/
Args:
filepath: filepath (including the filename and .avl extension) [string]
If None, this function returns the .avl file as a string.
Returns: None
"""
def clean(s):
"""
Removes leading and trailing whitespace from each line of a multi-line string.
"""
return "\n".join([line.strip() for line in s.split("\n")])
airplane = self.airplane
avl_file = ""
airplane_options = self.get_options(airplane)
avl_file += clean(f"""\
{airplane.name}
#Mach
0 ! AeroSandbox note: This is overwritten later to match the current OperatingPoint Mach during the AVL run.
#IYsym IZsym Zsym
0 {1 if self.ground_effect else 0} {self.ground_effect_height}
#Sref Cref Bref
{airplane.s_ref} {airplane.c_ref} {airplane.b_ref}
#Xref Yref Zref
{self.xyz_ref[0]} {self.xyz_ref[1]} {self.xyz_ref[2]}
# CDp
{airplane_options["profile_drag_coefficient"]}
""")
control_surface_counter = 1
for wing in airplane.wings:
wing_options = self.get_options(wing)
spacing_line = f"{wing_options['chordwise_resolution']} {self.AVL_spacing_parameters[wing_options['chordwise_spacing']]}"
if wing_options["wing_level_spanwise_spacing"]:
spacing_line += f" {wing_options['spanwise_resolution']} {self.AVL_spacing_parameters[wing_options['spanwise_spacing']]}"
avl_file += clean(f"""\
#{"=" * 50}
SURFACE
{wing.name}
#Nchordwise Cspace [Nspanwise Sspace]
{spacing_line}
""")
if wing_options["component"] is not None:
avl_file += clean(f"""\
COMPONENT
{wing_options['component']}
""")
if wing.symmetric:
avl_file += clean(f"""\
YDUPLICATE
0
""")
if wing_options["no_wake"]:
avl_file += clean(f"""\
NOWAKE
""")
if wing_options["no_alpha_beta"]:
avl_file += clean(f"""\
NOALBE
""")
if wing_options["no_load"]:
avl_file += clean(f"""\
NOLOAD
""")
polar = wing_options["drag_polar"]
avl_file += clean(f"""\
CDCL
#CL1 CD1 CL2 CD2 CL3 CD3
{polar["CL1"]} {polar["CD1"]} {polar["CL2"]} {polar["CD2"]} {polar["CL3"]} {polar["CD3"]}
""")
### Build up a buffer of the control surface strings to write to each section
control_surface_commands: List[List[str]] = [
[]
for _ in wing.xsecs
]
for i, xsec in enumerate(wing.xsecs[:-1]):
for surf in xsec.control_surfaces:
xhinge = surf.hinge_point if surf.trailing_edge else -surf.hinge_point
xyz_hinge_vector = wing._compute_frame_of_section(i)[1]
sign_dup = 1 if surf.symmetric else -1
command = clean(f"""\
CONTROL
#name, gain, Xhinge, XYZhvec, SgnDup
all_deflections {surf.deflection} {xhinge} {xyz_hinge_vector[0]} {xyz_hinge_vector[1]} {xyz_hinge_vector[2]} {sign_dup}
""")
control_surface_commands[i].append(command)
control_surface_commands[i + 1].append(command)
### Write the commands for each wing section
for i, xsec in enumerate(wing.xsecs):
xsec_options = self.get_options(xsec)
xsec_def_line = f"{xsec.xyz_le[0]} {xsec.xyz_le[1]} {xsec.xyz_le[2]} {xsec.chord} {xsec.twist}"
if not wing_options["wing_level_spanwise_spacing"]:
xsec_def_line += f" {xsec_options['spanwise_resolution']} {self.AVL_spacing_parameters[xsec_options['spanwise_spacing']]}"
if xsec_options["cl_alpha_factor"] is None:
claf_line = f"{1 + 0.77 * xsec.airfoil.max_thickness()} # Computed using rule from avl_doc.txt"
else:
claf_line = f"{xsec_options['cl_alpha_factor']}"
avl_file += clean(f"""\
#{"-" * 50}
SECTION
#Xle Yle Zle Chord Ainc [Nspanwise Sspace]
{xsec_def_line}
AIRFOIL
{xsec.airfoil.repanel(50).write_dat(filepath=None, include_name=False)}
CLAF
{claf_line}
""")
polar = xsec_options["drag_polar"]
avl_file += clean(f"""\
CDCL
#CL1 CD1 CL2 CD2 CL3 CD3
{polar["CL1"]} {polar["CD1"]} {polar["CL2"]} {polar["CD2"]} {polar["CL3"]} {polar["CD3"]}
""")
for control_surface_command in control_surface_commands[i]:
avl_file += control_surface_command
filepath = Path(filepath)
for i, fuse in enumerate(airplane.fuselages):
fuse_filepath = Path(str(filepath) + f".fuse{i}")
self.write_avl_bfile(
fuselage=fuse,
filepath=fuse_filepath
)
fuse_options = self.get_options(fuse)
avl_file += clean(f"""\
#{"=" * 50}
BODY
{fuse.name}
{fuse_options['panel_resolution']} {self.AVL_spacing_parameters[fuse_options['panel_spacing']]}
BFIL
{fuse_filepath}
""")
if filepath is not None:
with open(filepath, "w+") as f:
f.write(avl_file)
@staticmethod
def write_avl_bfile(fuselage,
filepath: Union[Path, str] = None,
include_name: bool = True,
) -> str:
"""
Writes an AVL-compatible BFILE corresponding to this fuselage to a filepath.
For use with the AVL vortex-lattice-method aerodynamics analysis tool by Mark Drela at MIT.
AVL is available here: https://web.mit.edu/drela/Public/web/avl/
Args:
filepath: filepath (including the filename and .avl extension) [string]
If None, this function returns the would-be file contents as a string.
include_name: Should the name of the fuselage be included in the .dat file? (This should be True for use with AVL.)
Returns:
"""
filepath = Path(filepath)
contents = []
if include_name:
contents += [fuselage.name]
contents += [
f"{xyz_c[0]} {xyz_c[2] + r}"
for xyz_c, r in zip(
[xsec.xyz_c for xsec in fuselage.xsecs][::-1],
[xsec.equivalent_radius(preserve="area") for xsec in fuselage.xsecs][::-1]
)
] + [
f"{xyz_c[0]} {xyz_c[2] - r}"
for xyz_c, r in zip(
[xsec.xyz_c for xsec in fuselage.xsecs][1:],
[xsec.equivalent_radius(preserve="area") for xsec in fuselage.xsecs][1:]
)
]
string = "\n".join(contents)
if filepath is not None:
with open(filepath, "w+") as f:
f.write(string)
return string
@staticmethod
def parse_unformatted_data_output(
s: str,
data_identifier: str = " = ",
cast_outputs_to_float: bool = True,
overwrite: bool = None
) -> Dict[str, float]:
"""
Parses a (multiline) string of unformatted data into a nice and tidy dictionary.
The expected input string looks like what you might get as an output from AVL (or many other Drela codes),
which may list data in ragged order.
An example input `s` that you might want to parse could look like the following:
```
Standard axis orientation, X fwd, Z down
Run case: -unnamed-
Alpha = 0.43348 pb/2V = -0.00000 p'b/2V = -0.00000
Beta = 0.00000 qc/2V = 0.00000
Mach = 0.003 rb/2V = -0.00000 r'b/2V = -0.00000
CXtot = -0.02147 Cltot = 0.00000 Cl'tot = 0.00000
CYtot = 0.00000 Cmtot = 0.28149
CZtot = -1.01474 Cntot = -0.00000 Cn'tot = -0.00000
CLtot = 1.01454
CDtot = 0.02915
CDvis = 0.00000 CDind = 0.0291513
CLff = 1.00050 CDff = 0.0297201 | Trefftz
CYff = 0.00000 e = 0.9649 | Plane
```
Here, this function will go through this string and extract each key-value pair, as denoted by the data
identifier (by default, " = "). It will pull the next whole word without spaces to the left as the key,
and it will pull the next whole word without spaces to the right as the value. Together, these will be
returned as a Dict.
So, the output for the input above would be:
{
'Alpha' : 0.43348,
'pb/2V' : -0.00000,
'p'b/2V' : -0.00000,
'Beta' : 0.00000,
# and so on...
}
Args:
s: The input string to identify. Can be multiline.
data_identifier: The triggering substring for a new key-value pair. By default, it's " = ",
which is convention in many output files from Mark Drela's codes. Be careful if you decide to change this
to "=", as you could pick up on heading separators ('=======') in Markdown-like files.
cast_outputs_to_float: If this boolean flag is set true, the values of the key-value pairs are cast to
floating-point numbers before returning (as opposed to the default type, string). If a value can't be
cast, a NaN is returned (guaranteeing that you can do floating-point math with the outputs in downstream
applications.)
overwrite: Determines the behavior if you find a key that's already in the dictionary.
* By default, value is None. In this case, an error is raised.
* If you set it to True, the new value will overwrite the old one. Thus, your dictionary will have
the last matching value from the string.
* If you set it to False, the new value will be discarded. Thus, your dictionary will have the first
matching value from the string.
Returns: A dictionary of key-value pairs, corresponding to the unformatted data in the input string.
Keys are strings, values are floats if `cast_outputs_to_float` is True, otherwise also strings.
"""
items = {}
index = s.find(data_identifier)
while index != -1: # While there are still data identifiers:
key = "" # start with a blank key, which we will build up as we read
i = index - 1 # Starting from the left of the identifier
while s[i] == " " and i >= 0:
# First, skip any blanks
i -= 1
while s[i] != " " and s[i] != "\n" and i >= 0:
# Then, read the key in backwards order until you get to a blank or newline
key = s[i] + key
i -= 1
value = "" # start with a blank value, which we will build up as we read
i = index + len(data_identifier) # Starting from the right of the identifier
while s[i] == " " and i <= len(s):
# First, skip any blanks
i += 1
while s[i] != " " and s[i] != "\n" and i <= len(s):
# Then, read the key in forward order until you get to a blank or newline
value += s[i]
i += 1
if cast_outputs_to_float:
try: # Try to convert the value into a float. If you can't, return a NaN
value = float(value)
except Exception:
value = np.nan
if key in items.keys(): # If you already have this key
if overwrite is None: # If the `overwrite` parameter wasn't explicitly defined True/False, raise an error
raise ValueError(
f"Key \"{key}\" is being overwritten, and no behavior has been specified here (Default behavior is to error).\n"
f"Check that the output file doesn't have a duplicate here.\n"
f"Alternatively, set the `overwrite` parameter of this function to True or False (rather than the default None).",
)
else:
if overwrite:
items[key] = value # Assign (and overwrite) the key-value pair to the output we're writing
else:
pass
else:
items[key] = value # Assign the key-value pair to the output we're writing
s = s[index + len(data_identifier):] # Trim the string by starting to read from the next point.
index = s.find(data_identifier)
return items
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from aerosandbox.aerodynamics.aero_3D.test_aero_3D.geometries.vanilla import airplane as vanilla
vanilla.analysis_specific_options[AVL] = dict(
profile_drag_coefficient=0.1
)
vanilla.wings[0].xsecs[0].control_surfaces.append(
ControlSurface(
name="Flap",
trailing_edge=True,
hinge_point=0.75,
symmetric=True,
deflection=10
)
)
### Do the AVL run
avl = AVL(
airplane=vanilla,
op_point=OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=1,
alpha=0.433476,
beta=0,
p=0,
q=0,
r=0,
),
working_directory=str(Path.home() / "Downloads" / "test"),
verbose=True
)
res = avl.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}") | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/avl.py | avl.py |
import aerosandbox.numpy as np
from aerosandbox import ExplicitAnalysis, AeroSandboxObject
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_3D.singularities.uniform_strength_horseshoe_singularities import \
calculate_induced_velocity_horseshoe
from typing import Dict, Any, List, Callable, Optional, Union, Tuple
import copy
from functools import cached_property, lru_cache, partial
from collections import namedtuple
from dataclasses import dataclass
from abc import abstractmethod, ABC
### Define some helper functions that take a vector and make it a Nx1 or 1xN, respectively.
# Useful for broadcasting with matrices later.
def tall(array):
return np.reshape(array, (-1, 1))
def wide(array):
return np.reshape(array, (1, -1))
immutable_dataclass = partial(dataclass, frozen=True, repr=False)
class LinearPotentialFlow(ExplicitAnalysis):
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
xyz_ref: List[float] = None,
run_symmetric_if_possible: bool = False,
verbose: bool = False,
wing_model: Union[str, Dict[Wing, str]] = "vortex_lattice_all_horseshoe",
fuselage_model: Union[str, Dict[Fuselage, str]] = "none",
wing_options: Union[Dict[str, Any], Dict[Wing, Dict[str, Any]]] = None,
fuselage_options: Union[Dict[str, Any], Dict[Fuselage, Dict[str, Any]]] = None,
):
import warnings
warnings.warn("LinearPotentialFlow is under active development and is not yet ready for use.", UserWarning)
super().__init__()
##### Set defaults
if xyz_ref is None:
xyz_ref = airplane.xyz_ref
if wing_options is None:
wing_options = {}
if fuselage_options is None:
fuselage_options = {}
##### Initialize
self.airplane = airplane
self.op_point = op_point
self.xyz_ref = xyz_ref
self.verbose = verbose
##### Set up the modeling methods
if isinstance(wing_model, str):
wing_model = {wing: wing_model for wing in self.airplane.wings}
if isinstance(fuselage_model, str):
fuselage_model = {fuselage: fuselage_model for fuselage in self.airplane.fuselages}
self.wing_model: Dict[Wing, str] = wing_model
self.fuselage_model: Dict[Fuselage, str] = fuselage_model
##### Set up the modeling options
### Check the format of the wing options
if not (
all([isinstance(k, str) for k in wing_options.keys()]) or
all([issubclass(k, Wing) for k in wing_options.keys()])
):
raise ValueError("`wing_options` must be either:\n"
" - A dictionary of the form `{str: value}`, which is applied to all Wings\n"
" - A nested dictionary of the form `{Wing: {str: value}}`, which is applied to the corresponding Wings\n"
)
elif all([isinstance(k, str) for k in wing_options.keys()]):
wing_options = {wing: wing_options for wing in self.airplane.wings}
### Check the format of the fuselage options
if not (
all([isinstance(k, str) for k in fuselage_options.keys()]) or
all([issubclass(k, Fuselage) for k in fuselage_options.keys()])
):
raise ValueError("`fuselage_options` must be either:\n"
" - A dictionary of the form `{str: value}`, which is applied to all Fuselages\n"
" - A nested dictionary of the form `{Fuselage: {str: value}}`, which is applied to the corresponding Fuselages\n"
)
elif all([isinstance(k, str) for k in fuselage_options.keys()]):
fuselage_options = {fuselage: fuselage_options for fuselage in self.airplane.fuselages}
### Set user-specified values
self.wing_options: Dict[Wing, Dict[str, Any]] = wing_options
self.fuselage_options: Dict[Fuselage, Dict[str, Any]] = fuselage_options
### Set default values
wing_model_default_options = {
"none" : {},
"vortex_lattice_all_horseshoe": {
"spanwise_resolution" : 10,
"spanwise_spacing_function" : np.cosspace,
"chordwise_resolution" : 10,
"chordwise_spacing_function" : np.cosspace,
"vortex_core_radius" : 1e-8,
"align_trailing_vortices_with_wind": False,
},
"vortex_lattice_ring" : {
"spanwise_resolution" : 10,
"spanwise_spacing_function" : np.cosspace,
"chordwise_resolution" : 10,
"chordwise_spacing_function" : np.cosspace,
"vortex_core_radius" : 1e-8,
"align_trailing_vortices_with_wind": False,
},
"lifting_line" : {
"sectional_data_source": "neuralfoil",
},
}
for wing in self.airplane.wings:
if self.wing_model[wing] in wing_model_default_options.keys():
self.wing_options[wing] = {
**wing_model_default_options[self.wing_model[wing]],
**self.wing_options[wing],
}
else:
raise ValueError(f"Invalid wing model specified: \"{self.wing_model[wing]}\"\n"
f"Must be one of: {list(wing_model_default_options.keys())}")
fuselage_model_default_options = {
"none" : {},
"prescribed_source_line": {
"lengthwise_resolution" : 1,
"lengthwise_spacing_function": np.cosspace,
},
}
for fuselage in self.airplane.fuselages:
if self.fuselage_model[fuselage] in fuselage_model_default_options.keys():
self.fuselage_options[fuselage] = {
**fuselage_model_default_options[self.fuselage_model[fuselage]],
**self.fuselage_options[fuselage],
}
else:
raise ValueError(f"Invalid fuselage model specified: \"{self.fuselage_model[fuselage]}\"\n"
f"Must be one of: {list(fuselage_model_default_options.keys())}")
### Determine whether you should run the problem as symmetric
self.run_symmetric = False
if run_symmetric_if_possible:
raise NotImplementedError("LinearPotentialFlow with symmetry detection not yet implemented!")
# try:
# self.run_symmetric = ( # Satisfies assumptions
# self.op_point.beta == 0 and
# self.op_point.p == 0 and
# self.op_point.r == 0 and
# self.airplane.is_entirely_symmetric()
# )
# except RuntimeError: # Required because beta, p, r, etc. may be non-numeric (e.g. opti variables)
# pass
def __repr__(self):
return self.__class__.__name__ + "(\n" + "\n".join([
f"\tairplane={self.airplane}",
f"\top_point={self.op_point}",
f"\txyz_ref={self.xyz_ref}",
]) + "\n)"
@immutable_dataclass
class Elements(ABC):
parent_component: AeroSandboxObject
start_index: int
end_index: int
def __repr__(self):
return self.__class__.__name__ + "(\n" + "\n".join([
f"\tparent_component={self.parent_component}",
f"\tstart_index={self.start_index}",
f"\tend_index={self.end_index}",
f"\tlength={len(self)}",
]) + "\n)"
@abstractmethod
def __len__(self):
pass
# @abstractmethod
# def get_induced_velocity_at_points(self,
# points: np.ndarray
# ) -> np.ndarray:
# pass
@immutable_dataclass
class PanelElements(Elements, ABC):
front_left_vertices: np.ndarray # Nx3 array of panel corners
back_left_vertices: np.ndarray # Nx3 array of panel corners
back_right_vertices: np.ndarray # Nx3 array of panel corners
front_right_vertices: np.ndarray # Nx3 array of panel corners
def __len__(self):
return np.length(self.front_left_vertices)
@cached_property
def crosses(self):
diag1 = self.front_right_vertices - self.back_left_vertices
diag2 = self.front_left_vertices - self.back_right_vertices
return np.cross(diag1, diag2)
@cached_property
def cross_norms(self):
return np.linalg.norm(self.crosses, axis=1)
@cached_property
def areas(self) -> np.ndarray:
return self.cross_norms / 2
@cached_property
def normal_directions(self):
return self.crosses / tall(self.cross_norms)
@immutable_dataclass
class WingHorseshoeVortexElements(PanelElements):
trailing_vortex_direction: np.ndarray # Nx3 array of trailing vortex directions
vortex_core_radius: float # [meters]
@cached_property
def left_vortex_vertices(self):
return 0.75 * self.front_left_vertices + 0.25 * self.back_left_vertices
@cached_property
def right_vortex_vertices(self):
return 0.75 * self.front_right_vertices + 0.25 * self.back_right_vertices
@cached_property
def vortex_centers(self):
return (self.left_vortex_vertices + self.right_vortex_vertices) / 2
@cached_property
def vortex_bound_legs(self):
return self.right_vortex_vertices - self.left_vortex_vertices
@cached_property
def collocation_points(self):
return (
0.5 * (0.25 * self.front_left_vertices + 0.75 * self.back_left_vertices) +
0.5 * (0.25 * self.front_right_vertices + 0.75 * self.back_right_vertices)
)
def get_induced_velocity_at_points(self,
points: np.ndarray,
vortex_strengths: np.ndarray,
sum_across_elements: bool = True
) -> Tuple[np.ndarray]:
u_induced, v_induced, w_induced = calculate_induced_velocity_horseshoe(
x_field=tall(points[:, 0]),
y_field=tall(points[:, 1]),
z_field=tall(points[:, 2]),
x_left=wide(self.left_vortex_vertices[:, 0]),
y_left=wide(self.left_vortex_vertices[:, 1]),
z_left=wide(self.left_vortex_vertices[:, 2]),
x_right=wide(self.right_vortex_vertices[:, 0]),
y_right=wide(self.right_vortex_vertices[:, 1]),
z_right=wide(self.right_vortex_vertices[:, 2]),
trailing_vortex_direction=self.trailing_vortex_direction,
gamma=wide(vortex_strengths),
vortex_core_radius=self.vortex_core_radius
)
if sum_across_elements:
u_induced = np.sum(u_induced, axis=1)
v_induced = np.sum(v_induced, axis=1)
w_induced = np.sum(w_induced, axis=1)
return u_induced, v_induced, w_induced
@immutable_dataclass
class WingLiftingLineElements(WingHorseshoeVortexElements):
CL0: np.ndarray # length N array of zero-angle-of-attack lift coefficients
CLa: np.ndarray # length N array of lift slopes (1/rad)
@property
def collocation_points(self):
raise NotImplementedError
@immutable_dataclass
class FuselagePrescribedSourceLineElements(Elements):
front_vertices: np.ndarray
back_vertices: np.ndarray
strength: np.ndarray
def __len__(self):
return np.length(self.front_vertices)
@cached_property
def discretization(self):
"""
Returns: A list of dictionaries, where each item in the list represents a single element.
Each item in the list is a namedtuple (effectively, a dictionary), and one of the following types:
* `wing_vlm_element`
* `wing_lifting_line_element`
* `fuselage_prescribed_source_line`
"""
### Initialize
discretization = []
index = 0
### Wings
for wing in self.airplane.wings:
element_type: str = self.wing_model[wing]
options = self.wing_options[wing]
if element_type == "none":
continue
elif element_type == "vortex_lattice_all_horseshoe":
if options["spanwise_resolution"] > 1:
subdivided_wing = wing.subdivide_sections(
ratio=options["spanwise_resolution"],
spacing_function=options["spanwise_spacing_function"],
)
points, faces = subdivided_wing.mesh_thin_surface(
method="quad",
chordwise_resolution=options["chordwise_resolution"],
chordwise_spacing_function=options["chordwise_spacing_function"],
add_camber=True
)
if options["align_trailing_vortices_with_wind"]:
raise NotImplementedError("align_trailing_vortices_with_wind not yet implemented!")
else:
trailing_vortex_direction = np.array([1, 0, 0])
discretization.append(
self.WingHorseshoeVortexElements(
parent_component=wing,
start_index=index,
end_index=(index := index + len(faces)),
front_left_vertices=points[faces[:, 0], :],
back_left_vertices=points[faces[:, 1], :],
back_right_vertices=points[faces[:, 2], :],
front_right_vertices=points[faces[:, 3], :],
trailing_vortex_direction=trailing_vortex_direction,
vortex_core_radius=options["vortex_core_radius"],
)
)
elif element_type == "vortex_lattice_ring":
raise NotImplementedError("vortex_lattice_ring not yet implemented!")
elif element_type == "lifting_line":
raise NotImplementedError("lifting_line not yet implemented!")
else:
raise ValueError(f"Invalid wing model specified: \"{element_type}\"")
### Fuselages
for fuselage in self.airplane.fuselages:
element_type: str = self.fuselage_model[fuselage]
options = self.fuselage_options[fuselage]
if element_type == "none":
continue
elif element_type == "prescribed_source_line":
raise NotImplementedError("prescribed_source_line not yet implemented!")
else:
raise ValueError(f"Invalid fuselage model specified: \"{element_type}\"")
return discretization
@cached_property
def N_elements(self):
return sum([len(element_collection) for element_collection in self.discretization])
@cached_property
def AIC(self):
A = np.empty((self.N_elements, self.N_elements)) * np.nan
for element_collection in self.discretization:
if isinstance(element_collection, self.WingHorseshoeVortexElements):
raise NotImplementedError("AIC not yet implemented for horseshoe vortices.")
elif isinstance(element_collection, self.WingLiftingLineElements):
raise NotImplementedError("AIC not yet implemented for lifting lines.")
elif isinstance(element_collection, self.FuselagePrescribedSourceLineElements):
raise NotImplementedError("AIC not yet implemented for fuselages.")
else:
raise ValueError(f"Invalid element type: {type(element_collection)}")
return A
def run(self) -> Dict[str, Any]:
"""
Computes the aerodynamic forces.
Returns a dictionary with keys:
- 'F_g' : an [x, y, z] list of forces in geometry axes [N]
- 'F_b' : an [x, y, z] list of forces in body axes [N]
- 'F_w' : an [x, y, z] list of forces in wind axes [N]
- 'M_g' : an [x, y, z] list of moments about geometry axes [Nm]
- 'M_b' : an [x, y, z] list of moments about body axes [Nm]
- 'M_w' : an [x, y, z] list of moments about wind axes [Nm]
- 'L' : the lift force [N]. Definitionally, this is in wind axes.
- 'Y' : the side force [N]. This is in wind axes.
- 'D' : the drag force [N]. Definitionally, this is in wind axes.
- 'l_b', the rolling moment, in body axes [Nm]. Positive is roll-right.
- 'm_b', the pitching moment, in body axes [Nm]. Positive is pitch-up.
- 'n_b', the yawing moment, in body axes [Nm]. Positive is nose-right.
- 'CL', the lift coefficient [-]. Definitionally, this is in wind axes.
- 'CY', the sideforce coefficient [-]. This is in wind axes.
- 'CD', the drag coefficient [-]. Definitionally, this is in wind axes.
- 'Cl', the rolling coefficient [-], in body axes
- 'Cm', the pitching coefficient [-], in body axes
- 'Cn', the yawing coefficient [-], in body axes
Nondimensional values are nondimensionalized using reference values in the LinearPotentialFlow.airplane object.
"""
raise NotImplementedError
def get_induced_velocity_at_points(self,
points: np.ndarray
) -> np.ndarray:
raise NotImplementedError
def get_velocity_at_points(self,
points: np.ndarray
) -> np.ndarray:
raise NotImplementedError
def get_streamlines(self,
seed_points: np.ndarray = None,
n_steps: int = 300,
length: float = None,
):
raise NotImplementedError
def draw(self,
c: np.ndarray = None,
cmap: str = None,
colorbar_label: str = None,
show: bool = True,
show_kwargs: Dict = None,
draw_streamlines=True,
recalculate_streamlines=False,
backend: str = "pyvista"
):
raise NotImplementedError
def draw_three_view(self):
raise NotImplementedError
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(__file__).parent / "test_aero_3D" / "geometries"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
lpf = LinearPotentialFlow(
airplane=vanilla,
op_point=asb.OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=10,
alpha=0,
beta=0,
p=0,
q=0,
r=0,
),
)
dis = lpf.discretization
res = lpf.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}") | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/linear_potential_flow.py | linear_potential_flow.py |
from aerosandbox import Opti, ImplicitAnalysis
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_3D.singularities.uniform_strength_horseshoe_singularities import \
calculate_induced_velocity_horseshoe
from typing import Dict
### Define some helper functions that take a vector and make it a Nx1 or 1xN, respectively.
# Useful for broadcasting with matrices later.
def tall(array):
return np.reshape(array, (-1, 1))
def wide(array):
return np.reshape(array, (1, -1))
class LiftingLine(ImplicitAnalysis):
"""
An implicit aerodynamics analysis based on lifting line theory, with modifications for nonzero sweep
and dihedral + multiple wings.
Nonlinear, and includes viscous effects based on 2D data.
Usage example:
>>> analysis = asb.LiftingLine(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>> )
>>> outputs = analysis.run()
"""
@ImplicitAnalysis.initialize
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
run_symmetric_if_possible=True,
verbose=True,
spanwise_resolution=8, # TODO document
spanwise_spacing="cosine", # TODO document,
vortex_core_radius: float = 1e-8,
):
"""
Initializes and conducts a LiftingLine analysis.
Args:
airplane: An Airplane object that you want to analyze.
op_point: The OperatingPoint that you want to analyze the Airplane at.
run_symmetric_if_possible: If this flag is True and the problem fomulation is XZ-symmetric, the solver will
attempt to exploit the symmetry. This results in roughly half the number of governing equations.
opti: An asb.Opti environment.
If provided, adds the governing equations to that instance. Does not solve the equations (you need to
call `sol = opti.solve()` to do that).
If not provided, creates and solves the governing equations in a new instance.
"""
super().__init__()
### Initialize
self.airplane = airplane
self.op_point = op_point
self.run_symmetric_if_possible = run_symmetric_if_possible
self.verbose = verbose
self.spanwise_resolution = spanwise_resolution
self.spanwise_spacing = spanwise_spacing
self.vortex_core_radius = vortex_core_radius
### Determine whether you should run the problem as symmetric
self.run_symmetric = False
if self.run_symmetric_if_possible:
try:
self.run_symmetric = ( # Satisfies assumptions
self.op_point.beta == 0 and
self.op_point.p == 0 and
self.op_point.r == 0 and
self.airplane.is_entirely_symmetric()
)
except RuntimeError: # Required because beta, p, r, etc. may be non-numeric (e.g. opti variables)
pass
def run(self) -> Dict:
self.setup_mesh()
def setup_mesh(self) -> None:
if self.verbose:
print("Meshing...")
##### Make Panels
front_left_vertices = []
back_left_vertices = []
back_right_vertices = []
front_right_vertices = []
CL_functions = []
CD_functions = []
CM_functions = []
wing_ids = []
for wing_id, wing in enumerate(self.airplane.wings): # Iterate through wings
for xsec_a, xsec_b in zip(
wing.xsecs[:-1],
wing.xsecs[1:]
): # Iterate through pairs of wing cross sections
wing_section = Wing(
xsecs=[
xsec_a, # Inside cross section
xsec_b # Outside cross section
],
symmetric=wing.symmetric
)
points, faces = wing_section.mesh_thin_surface(
method="quad",
chordwise_resolution=1,
add_camber=False
)
front_left_vertices.append(points[faces[:, 0], :])
back_left_vertices.append(points[faces[:, 1], :])
back_right_vertices.append(points[faces[:, 2], :])
front_right_vertices.append(points[faces[:, 3], :])
wing_ids.append(wing_id * np.ones(len(faces)))
if self.spanwise_spacing == 'uniform':
y_nondim_vertices = np.linspace(0, 1, self.spanwise_resolution + 1)
elif self.spanwise_spacing == 'cosine':
y_nondim_vertices = np.cosspace(0, 1, self.spanwise_resolution + 1)
else:
raise Exception("Bad value of `LiftingLine.spanwise_spacing`!")
y_nondim = (y_nondim_vertices[:-1] + y_nondim_vertices[1:]) / 2
if wing_section.symmetric:
y_nondim = np.concatenate([y_nondim, y_nondim])
for y_nondim_i in y_nondim:
CL_functions.append(
lambda alpha, Re, mach,
xsec_a=xsec_a, xsec_b=xsec_b, y_nondim=y_nondim_i:
xsec_a.airfoil.CL_function(alpha, Re, mach) * (1 - y_nondim) +
xsec_b.airfoil.CL_function(alpha, Re, mach) * (y_nondim)
)
CD_functions.append(
lambda alpha, Re, mach,
xsec_a=xsec_a, xsec_b=xsec_b, y_nondim=y_nondim_i:
xsec_a.airfoil.CD_function(alpha, Re, mach) * (1 - y_nondim) +
xsec_b.airfoil.CD_function(alpha, Re, mach) * (y_nondim)
)
CM_functions.append(
lambda alpha, Re, mach,
xsec_a=xsec_a, xsec_b=xsec_b, y_nondim=y_nondim_i:
xsec_a.airfoil.CM_function(alpha, Re, mach) * (1 - y_nondim) +
xsec_b.airfoil.CM_function(alpha, Re, mach) * (y_nondim)
)
front_left_vertices = np.concatenate(front_left_vertices)
back_left_vertices = np.concatenate(back_left_vertices)
back_right_vertices = np.concatenate(back_right_vertices)
front_right_vertices = np.concatenate(front_right_vertices)
wing_ids = np.concatenate(wing_ids)
### Compute panel statistics
diag1 = front_right_vertices - back_left_vertices
diag2 = front_left_vertices - back_right_vertices
cross = np.cross(diag1, diag2)
cross_norm = np.linalg.norm(cross, axis=1)
normal_directions = cross / tall(cross_norm)
areas = cross_norm / 2
# Compute the location of points of interest on each panel
left_vortex_vertices = 0.75 * front_left_vertices + 0.25 * back_left_vertices
right_vortex_vertices = 0.75 * front_right_vertices + 0.25 * back_right_vertices
vortex_centers = (left_vortex_vertices + right_vortex_vertices) / 2
vortex_bound_leg = right_vortex_vertices - left_vortex_vertices
chord_vectors = (
(back_left_vertices + back_right_vertices) / 2 -
(front_left_vertices + front_right_vertices) / 2
)
chords = np.linalg.norm(chord_vectors, axis=1)
### Save things to the instance for later access
self.front_left_vertices = front_left_vertices
self.back_left_vertices = back_left_vertices
self.back_right_vertices = back_right_vertices
self.front_right_vertices = front_right_vertices
self.CL_functions = CL_functions # type: list # of callables
self.CD_functions = CD_functions # type: list # of callables
self.CM_functions = CM_functions # type: list # of callables
self.wing_ids = wing_ids
self.normal_directions = normal_directions
self.areas = areas
self.left_vortex_vertices = left_vortex_vertices
self.right_vortex_vertices = right_vortex_vertices
self.vortex_centers = vortex_centers
self.vortex_bound_leg = vortex_bound_leg
self.chord_vectors = chord_vectors
self.chords = chords
if self.verbose:
print("Calculating the freestream influence...")
steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
steady_freestream_direction = steady_freestream_velocity / np.linalg.norm(steady_freestream_velocity)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
vortex_centers)
freestream_velocities = wide(steady_freestream_velocity) + rotation_freestream_velocities
# Nx3, represents the freestream velocity at each panel collocation point (c)
freestream_influences = np.sum(freestream_velocities * normal_directions, axis=1)
### Save things to the instance for later access
self.steady_freestream_velocity = steady_freestream_velocity
self.steady_freestream_direction = steady_freestream_direction
self.freestream_velocities = freestream_velocities
def get_induced_velocity_at_points(self,
points: np.ndarray,
vortex_strengths: np.ndarray = None
) -> np.ndarray:
"""
Computes the induced velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the induced velocities at. Given in geometry axes.
Returns: A Nx3 of the induced velocity at those points. Given in geometry axes.
"""
if vortex_strengths is None:
try:
vortex_strengths = self.vortex_strengths
except AttributeError:
raise ValueError(
"`LiftingLine.vortex_strengths` doesn't exist, so you need to pass in the `vortex_strengths` parameter.")
u_induced, v_induced, w_induced = calculate_induced_velocity_horseshoe(
x_field=tall(points[:, 0]),
y_field=tall(points[:, 1]),
z_field=tall(points[:, 2]),
x_left=wide(self.left_vortex_vertices[:, 0]),
y_left=wide(self.left_vortex_vertices[:, 1]),
z_left=wide(self.left_vortex_vertices[:, 2]),
x_right=wide(self.right_vortex_vertices[:, 0]),
y_right=wide(self.right_vortex_vertices[:, 1]),
z_right=wide(self.right_vortex_vertices[:, 2]),
trailing_vortex_direction=self.steady_freestream_direction,
gamma=wide(vortex_strengths),
vortex_core_radius=self.vortex_core_radius
)
u_induced = np.sum(u_induced, axis=1)
v_induced = np.sum(v_induced, axis=1)
w_induced = np.sum(w_induced, axis=1)
V_induced = np.stack([
u_induced, v_induced, w_induced
], axis=1)
return V_induced
def get_velocity_at_points(self,
points: np.ndarray,
vortex_strengths: np.ndarray = None,
) -> np.ndarray:
"""
Computes the velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the velocities at. Given in geometry axes.
Returns: A Nx3 of the velocity at those points. Given in geometry axes.
"""
V_induced = self.get_induced_velocity_at_points(
points=points,
vortex_strengths=vortex_strengths,
)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
points
)
freestream_velocities = self.steady_freestream_velocity + rotation_freestream_velocities
V = V_induced + freestream_velocities
return V
def compute_solution_quantities(self, vortex_strengths: np.ndarray) -> Dict:
velocities = self.get_velocity_at_points(
points=self.vortex_centers,
vortex_strengths=vortex_strengths
)
velocity_magnitudes = np.linalg.norm(velocities, axis=1)
velocity_directions = velocities / tall(velocity_magnitudes)
alphas = 90 - np.arccosd(
np.sum(velocity_directions * self.normal_directions, axis=1)
)
Res = (
velocity_magnitudes *
self.chords /
self.op_point.atmosphere.kinematic_viscosity()
) # TODO add multiply by cos_sweeps
machs = velocity_magnitudes / self.op_point.atmosphere.speed_of_sound()
CLs, CDs, CMs = [
np.array([
polar_function(
alpha=alphas[i],
Re=Res[i],
mach=machs[i],
)
for i, polar_function in enumerate(polar_functions)
])
for polar_functions in [
self.CL_functions,
self.CD_functions,
self.CM_functions
]
]
Vi_cross_li = np.cross(velocities, self.vortex_bound_leg, axis=1)
Vi_cross_li_magnitudes = np.linalg.norm(Vi_cross_li, axis=1)
residuals = (
vortex_strengths * Vi_cross_li_magnitudes * 2 / velocity_magnitudes ** 2 / self.areas - CLs
)
return {
"residuals": residuals,
"alphas" : alphas,
"Res" : Res,
"machs" : machs,
"CLs" : CLs,
"CDs" : CDs,
"CMs" : CMs,
}
def calculate_streamlines(self,
seed_points: np.ndarray = None,
n_steps: int = 300,
length: float = None
) -> np.ndarray:
"""
Computes streamlines, starting at specific seed points.
After running this function, a new instance variable `VortexLatticeFilaments.streamlines` is computed
Uses simple forward-Euler integration with a fixed spatial stepsize (i.e., velocity vectors are normalized
before ODE integration). After investigation, it's not worth doing fancier ODE integration methods (adaptive
schemes, RK substepping, etc.), due to the near-singular conditions near vortex filaments.
Args:
seed_points: A Nx3 ndarray that contains a list of points where streamlines are started. Will be
auto-calculated if not specified.
n_steps: The number of individual streamline steps to trace. Minimum of 2.
length: The approximate total length of the streamlines desired, in meters. Will be auto-calculated if
not specified.
Returns:
streamlines: a 3D array with dimensions: (n_seed_points) x (3) x (n_steps).
Consists of streamlines data.
Result is also saved as an instance variable, VortexLatticeMethod.streamlines.
"""
if self.verbose:
print("Calculating streamlines...")
if length is None:
length = self.airplane.c_ref * 5
if seed_points is None:
left_TE_vertices = self.back_left_vertices[self.is_trailing_edge]
right_TE_vertices = self.back_right_vertices[self.is_trailing_edge]
N_streamlines_target = 200
seed_points_per_panel = np.maximum(1, N_streamlines_target // len(left_TE_vertices))
nondim_node_locations = np.linspace(0, 1, seed_points_per_panel + 1)
nondim_seed_locations = (nondim_node_locations[1:] + nondim_node_locations[:-1]) / 2
seed_points = np.concatenate([
x * left_TE_vertices + (1 - x) * right_TE_vertices
for x in nondim_seed_locations
])
streamlines = np.empty((len(seed_points), 3, n_steps))
streamlines[:, :, 0] = seed_points
for i in range(1, n_steps):
V = self.get_velocity_at_points(streamlines[:, :, i - 1])
streamlines[:, :, i] = (
streamlines[:, :, i - 1] +
length / n_steps * V / tall(np.linalg.norm(V, axis=1))
)
self.streamlines = streamlines
if self.verbose:
print("Streamlines calculated.")
return streamlines
def draw(self,
c: np.ndarray = None,
cmap: str = None,
colorbar_label: str = None,
show: bool = True,
show_kwargs: Dict = None,
draw_streamlines=True,
recalculate_streamlines=False,
backend: str = "pyvista"
):
"""
Draws the solution. Note: Must be called on a SOLVED AeroProblem object.
To solve an AeroProblem, use opti.solve(). To substitute a solved solution, use ap = ap.substitute_solution(sol).
:return:
"""
if c is None:
c = self.vortex_strengths
colorbar_label = "Vortex Strengths"
if draw_streamlines:
if (not hasattr(self, 'streamlines')) or recalculate_streamlines:
self.calculate_streamlines()
if backend == "plotly":
from aerosandbox.visualization.plotly_Figure3D import Figure3D
fig = Figure3D()
for i in range(len(self.front_left_vertices)):
fig.add_quad(
points=[
self.front_left_vertices[i, :],
self.back_left_vertices[i, :],
self.back_right_vertices[i, :],
self.front_right_vertices[i, :],
],
intensity=c[i],
outline=True,
)
if draw_streamlines:
for i in range(self.streamlines.shape[0]):
fig.add_streamline(self.streamlines[i, :, :].T)
return fig.draw(
show=show,
colorbar_title=colorbar_label
)
elif backend == "pyvista":
import pyvista as pv
plotter = pv.Plotter()
plotter.title = "ASB VortexLatticeMethod"
plotter.add_axes()
plotter.show_grid(color='gray')
### Draw the airplane mesh
points = np.concatenate([
self.front_left_vertices,
self.back_left_vertices,
self.back_right_vertices,
self.front_right_vertices
])
N = len(self.front_left_vertices)
range_N = np.arange(N)
faces = tall(range_N) + wide(np.array([0, 1, 2, 3]) * N)
mesh = pv.PolyData(
*mesh_utils.convert_mesh_to_polydata_format(points, faces)
)
scalar_bar_args = {}
if colorbar_label is not None:
scalar_bar_args["title"] = colorbar_label
plotter.add_mesh(
mesh=mesh,
scalars=c,
show_edges=True,
show_scalar_bar=c is not None,
scalar_bar_args=scalar_bar_args,
cmap=cmap,
)
### Draw the streamlines
if draw_streamlines:
import aerosandbox.tools.pretty_plots as p
for i in range(self.streamlines.shape[0]):
plotter.add_mesh(
pv.Spline(self.streamlines[i, :, :].T),
color=p.adjust_lightness("#7700FF", 1.5),
opacity=0.7,
line_width=1
)
if show:
plotter.show()
return plotter
else:
raise ValueError("Bad value of `backend`!")
def _setup_geometry(self):
if self.verbose:
print("Calculating the vortex center velocity influence matrix...")
self.Vij_x, self.Vij_y, self.Vij_z = self.calculate_Vij(self.vortex_centers)
if self.verbose:
print("Calculating fuselage influences...")
self.beta = np.sqrt(1 - self.op_point.mach())
self.fuselage_velocities = self.calculate_fuselage_influences(self.vortex_centers)
# TODO do this
def _setup_operating_point(self):
if self.verbose:
print("Calculating the freestream influence...")
self.steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
self.rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
self.vortex_centers)
self.freestream_velocities = cas.transpose(self.steady_freestream_velocity + cas.transpose(
self.rotation_freestream_velocities)) # Nx3, represents the freestream velocity at each vortex center
def _calculate_vortex_strengths(self):
if self.verbose:
print("Calculating vortex strengths...")
# Set up implicit solve (explicit is not possible for general nonlinear problem)
self.vortex_strengths = self.opti.variable(self.n_panels)
self.opti.set_initial(self.vortex_strengths, 0)
# Find velocities
self.induced_velocities = cas.horzcat(
self.Vij_x @ self.vortex_strengths,
self.Vij_y @ self.vortex_strengths,
self.Vij_z @ self.vortex_strengths,
)
self.velocities = self.induced_velocities + self.freestream_velocities + self.fuselage_velocities # TODO just a reminder, fuse added here
self.alpha_eff_perpendiculars = cas.atan2(
(
self.velocities[:, 0] * self.normal_directions[:, 0] +
self.velocities[:, 1] * self.normal_directions[:, 1] +
self.velocities[:, 2] * self.normal_directions[:, 2]
),
(
self.velocities[:, 0] * -self.local_forward_directions[:, 0] +
self.velocities[:, 1] * -self.local_forward_directions[:, 1] +
self.velocities[:, 2] * -self.local_forward_directions[:, 2]
)
) * (180 / cas.pi)
self.velocity_magnitudes = np.sqrt(
self.velocities[:, 0] ** 2 +
self.velocities[:, 1] ** 2 +
self.velocities[:, 2] ** 2
)
self.Res = self.op_point.density * self.velocity_magnitudes * self.chords / self.op_point.viscosity
self.machs = [self.op_point.mach] * self.n_panels # TODO incorporate sweep effects here!
# Get perpendicular parameters
self.cos_sweeps = (
self.velocities[:, 0] * -self.local_forward_directions[:, 0] +
self.velocities[:, 1] * -self.local_forward_directions[:, 1] +
self.velocities[:, 2] * -self.local_forward_directions[:, 2]
) / self.velocity_magnitudes
self.chord_perpendiculars = self.chords * self.cos_sweeps
self.velocity_magnitude_perpendiculars = self.velocity_magnitudes * self.cos_sweeps
self.Res_perpendicular = self.Res * self.cos_sweeps
self.machs_perpendicular = self.machs * self.cos_sweeps
CL_locals = [
self.CL_functions[i](
alpha=self.alpha_eff_perpendiculars[i],
Re=self.Res_perpendicular[i],
mach=self.machs_perpendicular[i],
) for i in range(self.n_panels)
]
CDp_locals = [
self.CD_functions[i](
alpha=self.alpha_eff_perpendiculars[i],
Re=self.Res_perpendicular[i],
mach=self.machs_perpendicular[i],
) for i in range(self.n_panels)
]
Cm_locals = [
self.CM_functions[i](
alpha=self.alpha_eff_perpendiculars[i],
Re=self.Res_perpendicular[i],
mach=self.machs_perpendicular[i],
) for i in range(self.n_panels)
]
self.CL_locals = cas.vertcat(*CL_locals)
self.CDp_locals = cas.vertcat(*CDp_locals)
self.Cm_locals = cas.vertcat(*Cm_locals)
self.Vi_cross_li = cas.horzcat(
self.velocities[:, 1] * self.vortex_bound_leg[:, 2] - self.velocities[:, 2] * self.vortex_bound_leg[:, 1],
self.velocities[:, 2] * self.vortex_bound_leg[:, 0] - self.velocities[:, 0] * self.vortex_bound_leg[:, 2],
self.velocities[:, 0] * self.vortex_bound_leg[:, 1] - self.velocities[:, 1] * self.vortex_bound_leg[:, 0],
)
Vi_cross_li_magnitudes = np.sqrt(
self.Vi_cross_li[:, 0] ** 2 +
self.Vi_cross_li[:, 1] ** 2 +
self.Vi_cross_li[:, 2] ** 2
)
# self.opti.subject_to([
# self.vortex_strengths * Vi_cross_li_magnitudes ==
# 0.5 * self.velocity_magnitude_perpendiculars ** 2 * self.CL_locals * self.areas
# ])
self.opti.subject_to([
self.vortex_strengths * Vi_cross_li_magnitudes * 2 / self.velocity_magnitude_perpendiculars ** 2 / self.areas ==
self.CL_locals
])
def _calculate_forces(self):
if self.verbose:
print("Calculating induced forces...")
self.forces_inviscid_geometry = self.op_point.density * self.Vi_cross_li * self.vortex_strengths
force_total_inviscid_geometry = cas.vertcat(
cas.sum1(self.forces_inviscid_geometry[:, 0]),
cas.sum1(self.forces_inviscid_geometry[:, 1]),
cas.sum1(self.forces_inviscid_geometry[:, 2]),
) # Remember, this is in GEOMETRY AXES, not WIND AXES or BODY AXES.
if self.run_symmetric:
forces_inviscid_geometry_from_symmetry = cas.if_else(
self.use_symmetry,
reflect_over_XZ_plane(self.forces_inviscid_geometry),
0
)
force_total_inviscid_geometry_from_symmetry = cas.vertcat(
cas.sum1(forces_inviscid_geometry_from_symmetry[:, 0]),
cas.sum1(forces_inviscid_geometry_from_symmetry[:, 1]),
cas.sum1(forces_inviscid_geometry_from_symmetry[:, 2]),
)
force_total_inviscid_geometry += force_total_inviscid_geometry_from_symmetry
self.force_total_inviscid_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ force_total_inviscid_geometry
if self.verbose:
print("Calculating induced moments...")
self.moments_inviscid_geometry = cas.cross(
cas.transpose(cas.transpose(self.vortex_centers) - self.airplane.xyz_ref),
self.forces_inviscid_geometry
)
moment_total_inviscid_geometry = cas.vertcat(
cas.sum1(self.moments_inviscid_geometry[:, 0]),
cas.sum1(self.moments_inviscid_geometry[:, 1]),
cas.sum1(self.moments_inviscid_geometry[:, 2]),
) # Remember, this is in GEOMETRY AXES, not WIND AXES or BODY AXES.
if self.run_symmetric:
moments_inviscid_geometry_from_symmetry = cas.if_else(
self.use_symmetry,
-reflect_over_XZ_plane(self.moments_inviscid_geometry),
0
)
moment_total_inviscid_geometry_from_symmetry = cas.vertcat(
cas.sum1(moments_inviscid_geometry_from_symmetry[:, 0]),
cas.sum1(moments_inviscid_geometry_from_symmetry[:, 1]),
cas.sum1(moments_inviscid_geometry_from_symmetry[:, 2]),
)
moment_total_inviscid_geometry += moment_total_inviscid_geometry_from_symmetry
self.moment_total_inviscid_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ moment_total_inviscid_geometry
if self.verbose:
print("Calculating profile forces...")
self.forces_profile_geometry = (
(0.5 * self.op_point.density * self.velocity_magnitudes * self.velocities)
* self.CDp_locals * self.areas
)
force_total_profile_geometry = cas.vertcat(
cas.sum1(self.forces_profile_geometry[:, 0]),
cas.sum1(self.forces_profile_geometry[:, 1]),
cas.sum1(self.forces_profile_geometry[:, 2]),
)
if self.run_symmetric:
forces_profile_geometry_from_symmetry = cas.if_else(
self.use_symmetry,
reflect_over_XZ_plane(self.forces_profile_geometry),
0
)
force_total_profile_geometry_from_symmetry = cas.vertcat(
cas.sum1(forces_profile_geometry_from_symmetry[:, 0]),
cas.sum1(forces_profile_geometry_from_symmetry[:, 1]),
cas.sum1(forces_profile_geometry_from_symmetry[:, 2]),
)
force_total_profile_geometry += force_total_profile_geometry_from_symmetry
self.force_total_profile_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ force_total_profile_geometry
if self.verbose:
print("Calculating profile moments...")
self.moments_profile_geometry = cas.cross(
cas.transpose(cas.transpose(self.vortex_centers) - self.airplane.xyz_ref),
self.forces_profile_geometry
)
moment_total_profile_geometry = cas.vertcat(
cas.sum1(self.moments_profile_geometry[:, 0]),
cas.sum1(self.moments_profile_geometry[:, 1]),
cas.sum1(self.moments_profile_geometry[:, 2]),
)
if self.run_symmetric:
moments_profile_geometry_from_symmetry = cas.if_else(
self.use_symmetry,
-reflect_over_XZ_plane(self.moments_profile_geometry),
0
)
moment_total_profile_geometry_from_symmetry = cas.vertcat(
cas.sum1(moments_profile_geometry_from_symmetry[:, 0]),
cas.sum1(moments_profile_geometry_from_symmetry[:, 1]),
cas.sum1(moments_profile_geometry_from_symmetry[:, 2]),
)
moment_total_profile_geometry += moment_total_profile_geometry_from_symmetry
self.moment_total_profile_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ moment_total_profile_geometry
if self.verbose:
print("Calculating pitching moments...")
bound_leg_YZ = self.vortex_bound_leg
bound_leg_YZ[:, 0] = 0
self.moments_pitching_geometry = (
(0.5 * self.op_point.density * self.velocity_magnitudes ** 2) *
self.Cm_locals * self.chords ** 2 * bound_leg_YZ
)
moment_total_pitching_geometry = cas.vertcat(
cas.sum1(self.moments_pitching_geometry[:, 0]),
cas.sum1(self.moments_pitching_geometry[:, 1]),
cas.sum1(self.moments_pitching_geometry[:, 2]),
)
if self.run_symmetric:
moments_pitching_geometry_from_symmetry = cas.if_else(
self.use_symmetry,
-reflect_over_XZ_plane(self.moments_pitching_geometry),
0
)
moment_total_pitching_geometry_from_symmetry = cas.vertcat(
cas.sum1(moments_pitching_geometry_from_symmetry[:, 0]),
cas.sum1(moments_pitching_geometry_from_symmetry[:, 1]),
cas.sum1(moments_pitching_geometry_from_symmetry[:, 2]),
)
moment_total_pitching_geometry += moment_total_pitching_geometry_from_symmetry
self.moment_total_pitching_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ moment_total_pitching_geometry
if self.verbose:
print("Calculating total forces and moments...")
self.force_total_wind = self.force_total_inviscid_wind + self.force_total_profile_wind
self.moment_total_wind = self.moment_total_inviscid_wind + self.moment_total_profile_wind
# Calculate dimensional forces
self.lift_force = -self.force_total_wind[2]
self.drag_force = -self.force_total_wind[0]
self.drag_force_induced = -self.force_total_inviscid_wind[0]
self.drag_force_profile = -self.force_total_profile_wind[0]
self.side_force = self.force_total_wind[1]
# Calculate nondimensional forces
q = self.op_point.dynamic_pressure()
s_ref = self.airplane.s_ref
b_ref = self.airplane.b_ref
c_ref = self.airplane.c_ref
self.CL = self.lift_force / q / s_ref
self.CD = self.drag_force / q / s_ref
self.CDi = self.drag_force_induced / q / s_ref
self.CDp = self.drag_force_profile / q / s_ref
self.CY = self.side_force / q / s_ref
self.Cl = self.moment_total_wind[0] / q / s_ref / b_ref
self.Cm = self.moment_total_wind[1] / q / s_ref / c_ref
self.Cn = self.moment_total_wind[2] / q / s_ref / b_ref
# Solves divide by zero error
self.CL_over_CD = cas.if_else(self.CD == 0, 0, self.CL / self.CD)
def calculate_Vij(self,
points, # type: cas.MX
align_trailing_vortices_with_freestream=True, # Otherwise, aligns with x-axis
):
# Calculates Vij, the velocity influence matrix (First index is collocation point number, second index is vortex number).
# points: the list of points (Nx3) to calculate the velocity influence at.
n_points = points.shape[0]
# Make a and b vectors.
# a: Vector from all collocation points to all horseshoe vortex left vertices.
# # First index is collocation point #, second is vortex #.
# b: Vector from all collocation points to all horseshoe vortex right vertices.
# # First index is collocation point #, second is vortex #.
a_x = points[:, 0] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 0]), n_points, 1)
a_y = points[:, 1] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 1]), n_points, 1)
a_z = points[:, 2] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 2]), n_points, 1)
b_x = points[:, 0] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 0]), n_points, 1)
b_y = points[:, 1] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 1]), n_points, 1)
b_z = points[:, 2] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 2]), n_points, 1)
if align_trailing_vortices_with_freestream:
freestream_direction = self.op_point.compute_freestream_direction_geometry_axes()
u_x = freestream_direction[0]
u_y = freestream_direction[1]
u_z = freestream_direction[2]
else:
u_x = 1
u_y = 0
u_z = 0
# Do some useful arithmetic
a_cross_b_x = a_y * b_z - a_z * b_y
a_cross_b_y = a_z * b_x - a_x * b_z
a_cross_b_z = a_x * b_y - a_y * b_x
a_dot_b = a_x * b_x + a_y * b_y + a_z * b_z
a_cross_u_x = a_y * u_z - a_z * u_y
a_cross_u_y = a_z * u_x - a_x * u_z
a_cross_u_z = a_x * u_y - a_y * u_x
a_dot_u = a_x * u_x + a_y * u_y + a_z * u_z
b_cross_u_x = b_y * u_z - b_z * u_y
b_cross_u_y = b_z * u_x - b_x * u_z
b_cross_u_z = b_x * u_y - b_y * u_x
b_dot_u = b_x * u_x + b_y * u_y + b_z * u_z
norm_a = np.sqrt(a_x ** 2 + a_y ** 2 + a_z ** 2)
norm_b = np.sqrt(b_x ** 2 + b_y ** 2 + b_z ** 2)
norm_a_inv = 1 / norm_a
norm_b_inv = 1 / norm_b
# Handle the special case where the collocation point is along a bound vortex leg
a_cross_b_squared = (
a_cross_b_x ** 2 +
a_cross_b_y ** 2 +
a_cross_b_z ** 2
)
a_dot_b = cas.if_else(a_cross_b_squared < 1e-8, a_dot_b + 1, a_dot_b)
a_cross_u_squared = (
a_cross_u_x ** 2 +
a_cross_u_y ** 2 +
a_cross_u_z ** 2
)
a_dot_u = cas.if_else(a_cross_u_squared < 1e-8, a_dot_u + 1, a_dot_u)
b_cross_u_squared = (
b_cross_u_x ** 2 +
b_cross_u_y ** 2 +
b_cross_u_z ** 2
)
b_dot_u = cas.if_else(b_cross_u_squared < 1e-8, b_dot_u + 1, b_dot_u)
# Calculate Vij
term1 = (norm_a_inv + norm_b_inv) / (norm_a * norm_b + a_dot_b)
term2 = norm_a_inv / (norm_a - a_dot_u)
term3 = norm_b_inv / (norm_b - b_dot_u)
Vij_x = 1 / (4 * np.pi) * (
a_cross_b_x * term1 +
a_cross_u_x * term2 -
b_cross_u_x * term3
)
Vij_y = 1 / (4 * np.pi) * (
a_cross_b_y * term1 +
a_cross_u_y * term2 -
b_cross_u_y * term3
)
Vij_z = 1 / (4 * np.pi) * (
a_cross_b_z * term1 +
a_cross_u_z * term2 -
b_cross_u_z * term3
)
if self.run_symmetric: # If it's a symmetric problem, you've got to add the other side's influence.
# If it is symmetric, re-do it with flipped coordinates
# Make a and b vectors.
# a: Vector from all collocation points to all horseshoe vortex left vertices.
# # First index is collocation point #, second is vortex #.
# b: Vector from all collocation points to all horseshoe vortex right vertices.
# # First index is collocation point #, second is vortex #.
a_x = points[:, 0] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 0]), n_points, 1)
a_y = points[:, 1] - cas.repmat(cas.transpose(-self.right_vortex_vertices[:, 1]), n_points, 1)
a_z = points[:, 2] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 2]), n_points, 1)
b_x = points[:, 0] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 0]), n_points, 1)
b_y = points[:, 1] - cas.repmat(cas.transpose(-self.left_vortex_vertices[:, 1]), n_points, 1)
b_z = points[:, 2] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 2]), n_points, 1)
# Do some useful arithmetic
a_cross_b_x = a_y * b_z - a_z * b_y
a_cross_b_y = a_z * b_x - a_x * b_z
a_cross_b_z = a_x * b_y - a_y * b_x
a_dot_b = a_x * b_x + a_y * b_y + a_z * b_z
a_cross_u_x = a_y * u_z - a_z * u_y
a_cross_u_y = a_z * u_x - a_x * u_z
a_cross_u_z = a_x * u_y - a_y * u_x
a_dot_u = a_x * u_x + a_y * u_y + a_z * u_z
b_cross_u_x = b_y * u_z - b_z * u_y
b_cross_u_y = b_z * u_x - b_x * u_z
b_cross_u_z = b_x * u_y - b_y * u_x
b_dot_u = b_x * u_x + b_y * u_y + b_z * u_z
norm_a = np.sqrt(a_x ** 2 + a_y ** 2 + a_z ** 2)
norm_b = np.sqrt(b_x ** 2 + b_y ** 2 + b_z ** 2)
norm_a_inv = 1 / norm_a
norm_b_inv = 1 / norm_b
# Handle the special case where the collocation point is along a bound vortex leg
a_cross_b_squared = (
a_cross_b_x ** 2 +
a_cross_b_y ** 2 +
a_cross_b_z ** 2
)
a_dot_b = cas.if_else(a_cross_b_squared < 1e-8, a_dot_b + 1, a_dot_b)
a_cross_u_squared = (
a_cross_u_x ** 2 +
a_cross_u_y ** 2 +
a_cross_u_z ** 2
)
a_dot_u = cas.if_else(a_cross_u_squared < 1e-8, a_dot_u + 1, a_dot_u)
b_cross_u_squared = (
b_cross_u_x ** 2 +
b_cross_u_y ** 2 +
b_cross_u_z ** 2
)
b_dot_u = cas.if_else(b_cross_u_squared < 1e-8, b_dot_u + 1, b_dot_u)
# Calculate Vij
term1 = (norm_a_inv + norm_b_inv) / (norm_a * norm_b + a_dot_b)
term2 = norm_a_inv / (norm_a - a_dot_u)
term3 = norm_b_inv / (norm_b - b_dot_u)
Vij_x_from_symmetry = 1 / (4 * np.pi) * (
a_cross_b_x * term1 +
a_cross_u_x * term2 -
b_cross_u_x * term3
)
Vij_y_from_symmetry = 1 / (4 * np.pi) * (
a_cross_b_y * term1 +
a_cross_u_y * term2 -
b_cross_u_y * term3
)
Vij_z_from_symmetry = 1 / (4 * np.pi) * (
a_cross_b_z * term1 +
a_cross_u_z * term2 -
b_cross_u_z * term3
)
Vij_x += cas.transpose(cas.if_else(self.use_symmetry, cas.transpose(Vij_x_from_symmetry), 0))
Vij_y += cas.transpose(cas.if_else(self.use_symmetry, cas.transpose(Vij_y_from_symmetry), 0))
Vij_z += cas.transpose(cas.if_else(self.use_symmetry, cas.transpose(Vij_z_from_symmetry), 0))
return Vij_x, Vij_y, Vij_z
def calculate_fuselage_influences(self,
points, # type: cas.MX
):
n_points = points.shape[0]
fuselage_influences_x = cas.GenDM_zeros(n_points, 1)
fuselage_influences_y = cas.GenDM_zeros(n_points, 1)
fuselage_influences_z = cas.GenDM_zeros(n_points, 1)
for fuse_num in range(len(self.airplane.fuselages)):
this_fuse_centerline_points = self.fuse_centerline_points[fuse_num]
this_fuse_radii = self.fuse_radii[fuse_num]
dx = points[:, 0] - cas.repmat(cas.transpose(this_fuse_centerline_points[:, 0]), n_points, 1)
dy = points[:, 1] - cas.repmat(cas.transpose(this_fuse_centerline_points[:, 1]), n_points, 1)
dz = points[:, 2] - cas.repmat(cas.transpose(this_fuse_centerline_points[:, 2]), n_points, 1)
# # Compressibility
# dy *= self.beta
# dz *= self.beta
# For now, we're just putting a point source at the middle... # TODO make an actual line source
source_x = (dx[:, 1:] + dx[:, :-1]) / 2
source_y = (dy[:, 1:] + dy[:, :-1]) / 2
source_z = (dz[:, 1:] + dz[:, :-1]) / 2
areas = cas.pi * this_fuse_radii ** 2
freestream_x_component = self.op_point.compute_freestream_velocity_geometry_axes()[
0] # TODO add in rotation corrections, add in doublets for alpha
strengths = freestream_x_component * cas.diff(areas)
denominator = 4 * cas.pi * (source_x ** 2 + source_y ** 2 + source_z ** 2) ** 1.5
u = cas.transpose(strengths * cas.transpose(source_x / denominator))
v = cas.transpose(strengths * cas.transpose(source_y / denominator))
w = cas.transpose(strengths * cas.transpose(source_z / denominator))
fuselage_influences_x += cas.sum2(u)
fuselage_influences_y += cas.sum2(v)
fuselage_influences_z += cas.sum2(w)
fuselage_influences = cas.horzcat(
fuselage_influences_x,
fuselage_influences_y,
fuselage_influences_z
)
return fuselage_influences
def get_induced_velocity_at_point(self, point):
if self.verbose and not self.opti.return_status() == 'Solve_Succeeded':
print("WARNING: This method should only be used after a solution has been found!!!\n"
"Running anyway for debugging purposes - this is likely to not work.")
Vij_x, Vij_y, Vij_z = self.calculate_Vij(point)
# vortex_strengths = self.opti.debug.value(self.vortex_strengths)
Vi_x = Vij_x @ self.vortex_strengths
Vi_y = Vij_y @ self.vortex_strengths
Vi_z = Vij_z @ self.vortex_strengths
get = lambda x: self.opti.debug.value(x)
Vi_x = get(Vi_x)
Vi_y = get(Vi_y)
Vi_z = get(Vi_z)
Vi = np.vstack((Vi_x, Vi_y, Vi_z)).T
return Vi
def get_velocity_at_point(self, point):
# Input: a Nx3 numpy array of points that you would like to know the velocities at.
# Output: a Nx3 numpy array of the velocities at those points.
Vi = self.get_induced_velocity_at_point(point) + self.calculate_fuselage_influences(
point) # TODO just a reminder, fuse added here
freestream = self.op_point.compute_freestream_velocity_geometry_axes()
V = cas.transpose(cas.transpose(Vi) + freestream)
return V
def calculate_streamlines(self,
seed_points=None, # will be auto-calculated if not specified
n_steps=100, # minimum of 2
length=None # will be auto-calculated if not specified
):
if length is None:
length = self.airplane.c_ref * 5
if seed_points is None:
seed_points = (self.back_left_vertices + self.back_right_vertices) / 2
# Resolution
length_per_step = length / n_steps
# Initialize
streamlines = [seed_points]
# Iterate
for step_num in range(1, n_steps):
update_amount = self.get_velocity_at_point(streamlines[-1])
norm_update_amount = np.sqrt(
update_amount[:, 0] ** 2 + update_amount[:, 1] ** 2 + update_amount[:, 2] ** 2)
update_amount = length_per_step * update_amount / norm_update_amount
streamlines.append(streamlines[-1] + update_amount)
self.streamlines = streamlines
def draw(self,
data_to_plot=None,
data_name=None,
show=True,
draw_streamlines=True,
recalculate_streamlines=False
):
"""
Draws the solution. Note: Must be called on a SOLVED AeroProblem object.
To solve an AeroProblem, use opti.solve(). To substitute a solved solution, use ap = ap.substitute_solution(sol).
:return:
"""
# TODO rewrite me
if self.verbose:
print("Drawing...")
if self.verbose and not self.opti.return_status() == 'Solve_Succeeded':
print("WARNING: This method should only be used after a solution has been found!\n"
"Running anyway for debugging purposes - this is likely to not work...")
# Do substitutions
get = lambda x: self.opti.debug.value(x)
front_left_vertices = get(self.front_left_vertices)
front_right_vertices = get(self.front_right_vertices)
back_left_vertices = get(self.back_left_vertices)
back_right_vertices = get(self.back_right_vertices)
left_vortex_vertices = get(self.left_vortex_vertices)
right_vortex_vertices = get(self.right_vortex_vertices)
self.vortex_strengths = get(self.vortex_strengths)
try:
data_to_plot = get(data_to_plot)
except NotImplementedError:
pass
if data_to_plot is None:
CL_locals = get(self.CL_locals)
chords = get(self.chords)
c_ref = get(self.airplane.c_ref)
data_name = "Cl * c / c_ref"
data_to_plot = CL_locals * chords / c_ref
fig = Figure3D()
for index in range(len(front_left_vertices)):
fig.add_quad(
points=[
front_left_vertices[index, :],
front_right_vertices[index, :],
back_right_vertices[index, :],
back_left_vertices[index, :],
],
intensity=data_to_plot[index],
outline=True,
mirror=self.run_symmetric and self.use_symmetry[index]
)
fig.add_line(
points=[
left_vortex_vertices[index],
right_vortex_vertices[index]
],
mirror=self.run_symmetric and self.use_symmetry[index]
)
# Fuselages
for fuse_id in range(len(self.airplane.fuselages)):
fuse = self.airplane.fuselages[fuse_id] # type: Fuselage
for xsec_id in range(len(fuse.xsecs) - 1):
xsec_1 = fuse.xsecs[xsec_id] # type: FuselageXSec
xsec_2 = fuse.xsecs[xsec_id + 1] # type: FuselageXSec
r1 = xsec_1.equivalent_radius(preserve="area")
r2 = xsec_2.equivalent_radius(preserve="area")
points_1 = np.zeros((fuse.circumferential_panels, 3))
points_2 = np.zeros((fuse.circumferential_panels, 3))
for point_index in range(fuse.circumferential_panels):
rot = rotation_matrix_angle_axis(
2 * cas.pi * point_index / fuse.circumferential_panels,
[1, 0, 0],
True
).toarray()
points_1[point_index, :] = rot @ np.array([0, 0, r1])
points_2[point_index, :] = rot @ np.array([0, 0, r2])
points_1 = points_1 + np.array(xsec_1.xyz_c).reshape(-1)
points_2 = points_2 + np.array(xsec_2.xyz_c).reshape(-1)
for point_index in range(fuse.circumferential_panels):
fig.add_quad(points=[
points_1[(point_index) % fuse.circumferential_panels, :],
points_1[(point_index + 1) % fuse.circumferential_panels, :],
points_2[(point_index + 1) % fuse.circumferential_panels, :],
points_2[(point_index) % fuse.circumferential_panels, :],
],
intensity=0,
)
if draw_streamlines:
if (not hasattr(self, 'streamlines')) or recalculate_streamlines:
if self.verbose:
print("Calculating streamlines...")
seed_points = (back_left_vertices + back_right_vertices) / 2
self.calculate_streamlines(seed_points=seed_points)
if self.verbose:
print("Parsing streamline data...")
n_streamlines = self.streamlines[0].shape[0]
n_timesteps = len(self.streamlines)
for streamlines_num in range(n_streamlines):
streamline = [self.streamlines[ts][streamlines_num, :] for ts in range(n_timesteps)]
fig.add_streamline(
points=streamline,
mirror=self.run_symmetric
)
return fig.draw(
show=show,
colorbar_title=data_name
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/lifting_line.py | lifting_line.py |
import aerosandbox.numpy as np
from aerosandbox import ExplicitAnalysis
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_3D.singularities.uniform_strength_horseshoe_singularities import \
calculate_induced_velocity_horseshoe
from typing import Dict, Any, List, Callable
import copy
### Define some helper functions that take a vector and make it a Nx1 or 1xN, respectively.
# Useful for broadcasting with matrices later.
def tall(array):
return np.reshape(array, (-1, 1))
def wide(array):
return np.reshape(array, (1, -1))
class VortexLatticeMethod(ExplicitAnalysis):
"""
An explicit (linear) vortex-lattice-method aerodynamics analysis.
Usage example:
>>> analysis = asb.VortexLatticeMethod(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>> )
>>> aero_data = analysis.run()
>>> analysis.draw()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
xyz_ref: List[float] = None,
run_symmetric_if_possible: bool = False,
verbose: bool = False,
spanwise_resolution: int = 10,
spanwise_spacing_function: Callable[[float, float, float], np.ndarray] = np.cosspace,
chordwise_resolution: int = 10,
chordwise_spacing_function: Callable[[float, float, float], np.ndarray] = np.cosspace,
vortex_core_radius: float = 1e-8,
align_trailing_vortices_with_wind: bool = False,
):
super().__init__()
### Set defaults
if xyz_ref is None:
xyz_ref = airplane.xyz_ref
self.airplane = airplane
self.op_point = op_point
self.xyz_ref = xyz_ref
self.verbose = verbose
self.spanwise_resolution = spanwise_resolution
self.spanwise_spacing_function = spanwise_spacing_function
self.chordwise_resolution = chordwise_resolution
self.chordwise_spacing_function = chordwise_spacing_function
self.vortex_core_radius = vortex_core_radius
self.align_trailing_vortices_with_wind = align_trailing_vortices_with_wind
### Determine whether you should run the problem as symmetric
self.run_symmetric = False
if run_symmetric_if_possible:
raise NotImplementedError("VLM with symmetry detection not yet implemented!")
# try:
# self.run_symmetric = ( # Satisfies assumptions
# self.op_point.beta == 0 and
# self.op_point.p == 0 and
# self.op_point.r == 0 and
# self.airplane.is_entirely_symmetric()
# )
# except RuntimeError: # Required because beta, p, r, etc. may be non-numeric (e.g. opti variables)
# pass
def __repr__(self):
return self.__class__.__name__ + "(\n\t" + "\n\t".join([
f"airplane={self.airplane}",
f"op_point={self.op_point}",
f"xyz_ref={self.xyz_ref}",
]) + "\n)"
def run(self) -> Dict[str, Any]:
"""
Computes the aerodynamic forces.
Returns a dictionary with keys:
- 'F_g' : an [x, y, z] list of forces in geometry axes [N]
- 'F_b' : an [x, y, z] list of forces in body axes [N]
- 'F_w' : an [x, y, z] list of forces in wind axes [N]
- 'M_g' : an [x, y, z] list of moments about geometry axes [Nm]
- 'M_b' : an [x, y, z] list of moments about body axes [Nm]
- 'M_w' : an [x, y, z] list of moments about wind axes [Nm]
- 'L' : the lift force [N]. Definitionally, this is in wind axes.
- 'Y' : the side force [N]. This is in wind axes.
- 'D' : the drag force [N]. Definitionally, this is in wind axes.
- 'l_b', the rolling moment, in body axes [Nm]. Positive is roll-right.
- 'm_b', the pitching moment, in body axes [Nm]. Positive is pitch-up.
- 'n_b', the yawing moment, in body axes [Nm]. Positive is nose-right.
- 'CL', the lift coefficient [-]. Definitionally, this is in wind axes.
- 'CY', the sideforce coefficient [-]. This is in wind axes.
- 'CD', the drag coefficient [-]. Definitionally, this is in wind axes.
- 'Cl', the rolling coefficient [-], in body axes
- 'Cm', the pitching coefficient [-], in body axes
- 'Cn', the yawing coefficient [-], in body axes
Nondimensional values are nondimensionalized using reference values in the VortexLatticeMethod.airplane object.
"""
if self.verbose:
print("Meshing...")
##### Make Panels
front_left_vertices = []
back_left_vertices = []
back_right_vertices = []
front_right_vertices = []
is_trailing_edge = []
for wing in self.airplane.wings:
if self.spanwise_resolution > 1:
wing = wing.subdivide_sections(
ratio=self.spanwise_resolution,
spacing_function=self.spanwise_spacing_function
)
points, faces = wing.mesh_thin_surface(
method="quad",
chordwise_resolution=self.chordwise_resolution,
chordwise_spacing_function=self.chordwise_spacing_function,
add_camber=True
)
front_left_vertices.append(points[faces[:, 0], :])
back_left_vertices.append(points[faces[:, 1], :])
back_right_vertices.append(points[faces[:, 2], :])
front_right_vertices.append(points[faces[:, 3], :])
is_trailing_edge.append(
(np.arange(len(faces)) + 1) % self.chordwise_resolution == 0
)
front_left_vertices = np.concatenate(front_left_vertices)
back_left_vertices = np.concatenate(back_left_vertices)
back_right_vertices = np.concatenate(back_right_vertices)
front_right_vertices = np.concatenate(front_right_vertices)
is_trailing_edge = np.concatenate(is_trailing_edge)
### Compute panel statistics
diag1 = front_right_vertices - back_left_vertices
diag2 = front_left_vertices - back_right_vertices
cross = np.cross(diag1, diag2)
cross_norm = np.linalg.norm(cross, axis=1)
normal_directions = cross / tall(cross_norm)
areas = cross_norm / 2
# Compute the location of points of interest on each panel
left_vortex_vertices = 0.75 * front_left_vertices + 0.25 * back_left_vertices
right_vortex_vertices = 0.75 * front_right_vertices + 0.25 * back_right_vertices
vortex_centers = (left_vortex_vertices + right_vortex_vertices) / 2
vortex_bound_leg = right_vortex_vertices - left_vortex_vertices
collocation_points = (
0.5 * (0.25 * front_left_vertices + 0.75 * back_left_vertices) +
0.5 * (0.25 * front_right_vertices + 0.75 * back_right_vertices)
)
### Save things to the instance for later access
self.front_left_vertices = front_left_vertices
self.back_left_vertices = back_left_vertices
self.back_right_vertices = back_right_vertices
self.front_right_vertices = front_right_vertices
self.is_trailing_edge = is_trailing_edge
self.normal_directions = normal_directions
self.areas = areas
self.left_vortex_vertices = left_vortex_vertices
self.right_vortex_vertices = right_vortex_vertices
self.vortex_centers = vortex_centers
self.vortex_bound_leg = vortex_bound_leg
self.collocation_points = collocation_points
##### Setup Operating Point
if self.verbose:
print("Calculating the freestream influence...")
steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
steady_freestream_direction = steady_freestream_velocity / np.linalg.norm(steady_freestream_velocity)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
collocation_points)
freestream_velocities = np.add(wide(steady_freestream_velocity), rotation_freestream_velocities)
# Nx3, represents the freestream velocity at each panel collocation point (c)
freestream_influences = np.sum(freestream_velocities * normal_directions, axis=1)
### Save things to the instance for later access
self.steady_freestream_velocity = steady_freestream_velocity
self.steady_freestream_direction = steady_freestream_direction
self.freestream_velocities = freestream_velocities
##### Setup Geometry
### Calculate AIC matrix
if self.verbose:
print("Calculating the collocation influence matrix...")
u_collocations_unit, v_collocations_unit, w_collocations_unit = calculate_induced_velocity_horseshoe(
x_field=tall(collocation_points[:, 0]),
y_field=tall(collocation_points[:, 1]),
z_field=tall(collocation_points[:, 2]),
x_left=wide(left_vortex_vertices[:, 0]),
y_left=wide(left_vortex_vertices[:, 1]),
z_left=wide(left_vortex_vertices[:, 2]),
x_right=wide(right_vortex_vertices[:, 0]),
y_right=wide(right_vortex_vertices[:, 1]),
z_right=wide(right_vortex_vertices[:, 2]),
trailing_vortex_direction=(
steady_freestream_direction
if self.align_trailing_vortices_with_wind else
np.array([1, 0, 0])
),
gamma=1.,
vortex_core_radius=self.vortex_core_radius
)
AIC = (
u_collocations_unit * tall(normal_directions[:, 0]) +
v_collocations_unit * tall(normal_directions[:, 1]) +
w_collocations_unit * tall(normal_directions[:, 2])
)
##### Calculate Vortex Strengths
if self.verbose:
print("Calculating vortex strengths...")
self.vortex_strengths = np.linalg.solve(AIC, -freestream_influences)
##### Calculate forces
### Calculate Near-Field Forces and Moments
# Governing Equation: The force on a straight, small vortex filament is F = rho * cross(V, l) * gamma,
# where rho is density, V is the velocity vector, cross() is the cross product operator,
# l is the vector of the filament itself, and gamma is the circulation.
if self.verbose:
print("Calculating forces on each panel...")
# Calculate the induced velocity at the center of each bound leg
V_centers = self.get_velocity_at_points(vortex_centers)
# Calculate forces_inviscid_geometry, the force on the ith panel. Note that this is in GEOMETRY AXES,
# not WIND AXES or BODY AXES.
Vi_cross_li = np.cross(V_centers, vortex_bound_leg, axis=1)
forces_geometry = self.op_point.atmosphere.density() * Vi_cross_li * tall(self.vortex_strengths)
moments_geometry = np.cross(
np.add(vortex_centers, -wide(np.array(self.xyz_ref))),
forces_geometry
)
# Calculate total forces and moments
force_geometry = np.sum(forces_geometry, axis=0)
moment_geometry = np.sum(moments_geometry, axis=0)
force_body = self.op_point.convert_axes(
force_geometry[0], force_geometry[1], force_geometry[2],
from_axes="geometry",
to_axes="body"
)
force_wind = self.op_point.convert_axes(
force_body[0], force_body[1], force_body[2],
from_axes="body",
to_axes="wind"
)
moment_body = self.op_point.convert_axes(
moment_geometry[0], moment_geometry[1], moment_geometry[2],
from_axes="geometry",
to_axes="body"
)
moment_wind = self.op_point.convert_axes(
moment_body[0], moment_body[1], moment_body[2],
from_axes="body",
to_axes="wind"
)
### Save things to the instance for later access
self.forces_geometry = forces_geometry
self.moments_geometry = moments_geometry
self.force_geometry = force_geometry
self.force_body = force_body
self.force_wind = force_wind
self.moment_geometry = moment_geometry
self.moment_body = moment_body
self.moment_wind = moment_wind
# Calculate dimensional forces
L = -force_wind[2]
D = -force_wind[0]
Y = force_wind[1]
l_b = moment_body[0]
m_b = moment_body[1]
n_b = moment_body[2]
# Calculate nondimensional forces
q = self.op_point.dynamic_pressure()
s_ref = self.airplane.s_ref
b_ref = self.airplane.b_ref
c_ref = self.airplane.c_ref
CL = L / q / s_ref
CD = D / q / s_ref
CY = Y / q / s_ref
Cl = l_b / q / s_ref / b_ref
Cm = m_b / q / s_ref / c_ref
Cn = n_b / q / s_ref / b_ref
return {
"F_g": force_geometry,
"F_b": force_body,
"F_w": force_wind,
"M_g": moment_geometry,
"M_b": moment_body,
"M_w": moment_wind,
"L" : L,
"D" : D,
"Y" : Y,
"l_b": l_b,
"m_b": m_b,
"n_b": n_b,
"CL" : CL,
"CD" : CD,
"CY" : CY,
"Cl" : Cl,
"Cm" : Cm,
"Cn" : Cn,
}
def run_with_stability_derivatives(self,
alpha=True,
beta=True,
p=True,
q=True,
r=True,
):
"""
Computes the aerodynamic forces and moments on the airplane, and the stability derivatives.
Arguments essentially determine which stability derivatives are computed. If a stability derivative is not
needed, leaving it False will speed up the computation.
Args:
- alpha (bool): If True, compute the stability derivatives with respect to the angle of attack (alpha).
- beta (bool): If True, compute the stability derivatives with respect to the sideslip angle (beta).
- p (bool): If True, compute the stability derivatives with respect to the body-axis roll rate (p).
- q (bool): If True, compute the stability derivatives with respect to the body-axis pitch rate (q).
- r (bool): If True, compute the stability derivatives with respect to the body-axis yaw rate (r).
Returns: a dictionary with keys:
- 'F_g' : an [x, y, z] list of forces in geometry axes [N]
- 'F_b' : an [x, y, z] list of forces in body axes [N]
- 'F_w' : an [x, y, z] list of forces in wind axes [N]
- 'M_g' : an [x, y, z] list of moments about geometry axes [Nm]
- 'M_b' : an [x, y, z] list of moments about body axes [Nm]
- 'M_w' : an [x, y, z] list of moments about wind axes [Nm]
- 'L' : the lift force [N]. Definitionally, this is in wind axes.
- 'Y' : the side force [N]. This is in wind axes.
- 'D' : the drag force [N]. Definitionally, this is in wind axes.
- 'l_b', the rolling moment, in body axes [Nm]. Positive is roll-right.
- 'm_b', the pitching moment, in body axes [Nm]. Positive is pitch-up.
- 'n_b', the yawing moment, in body axes [Nm]. Positive is nose-right.
- 'CL', the lift coefficient [-]. Definitionally, this is in wind axes.
- 'CY', the sideforce coefficient [-]. This is in wind axes.
- 'CD', the drag coefficient [-]. Definitionally, this is in wind axes.
- 'Cl', the rolling coefficient [-], in body axes
- 'Cm', the pitching coefficient [-], in body axes
- 'Cn', the yawing coefficient [-], in body axes
Along with additional keys, depending on the value of the `alpha`, `beta`, `p`, `q`, and `r` arguments. For
example, if `alpha=True`, then the following additional keys will be present:
- 'CLa', the lift coefficient derivative with respect to alpha [1/rad]
- 'CDa', the drag coefficient derivative with respect to alpha [1/rad]
- 'CYa', the sideforce coefficient derivative with respect to alpha [1/rad]
- 'Cla', the rolling moment coefficient derivative with respect to alpha [1/rad]
- 'Cma', the pitching moment coefficient derivative with respect to alpha [1/rad]
- 'Cna', the yawing moment coefficient derivative with respect to alpha [1/rad]
- 'x_np', the neutral point location in the x direction [m]
Nondimensional values are nondimensionalized using reference values in the
VortexLatticeMethod.airplane object.
Data types:
- The "L", "Y", "D", "l_b", "m_b", "n_b", "CL", "CY", "CD", "Cl", "Cm", and "Cn" keys are:
- floats if the OperatingPoint object is not vectorized (i.e., if all attributes of OperatingPoint
are floats, not arrays).
- arrays if the OperatingPoint object is vectorized (i.e., if any attribute of OperatingPoint is an
array).
- The "F_g", "F_b", "F_w", "M_g", "M_b", and "M_w" keys are always lists, which will contain either
floats or arrays, again depending on whether the OperatingPoint object is vectorized or not.
"""
abbreviations = {
"alpha": "a",
"beta" : "b",
"p" : "p",
"q" : "q",
"r" : "r",
}
finite_difference_amounts = {
"alpha": 0.001,
"beta" : 0.001,
"p" : 0.001 * (2 * self.op_point.velocity) / self.airplane.b_ref,
"q" : 0.001 * (2 * self.op_point.velocity) / self.airplane.c_ref,
"r" : 0.001 * (2 * self.op_point.velocity) / self.airplane.b_ref,
}
scaling_factors = {
"alpha": np.degrees(1),
"beta" : np.degrees(1),
"p" : (2 * self.op_point.velocity) / self.airplane.b_ref,
"q" : (2 * self.op_point.velocity) / self.airplane.c_ref,
"r" : (2 * self.op_point.velocity) / self.airplane.b_ref,
}
original_op_point = self.op_point
# Compute the point analysis, which returns a dictionary that we will later add key:value pairs to.
run_base = self.run()
# Note for the loops below: here, "derivative numerator" and "... denominator" refer to the quantity being
# differentiated and the variable of differentiation, respectively. In other words, in the expression df/dx,
# the "numerator" is f, and the "denominator" is x. I realize that this would make a mathematician cry (as a
# partial derivative is not a fraction), but the reality is that there seems to be no commonly-accepted name
# for these terms. (Curiously, this contrasts with integration, where there is an "integrand" and a "variable
# of integration".)
for derivative_denominator in abbreviations.keys():
if not locals()[derivative_denominator]: # Basically, if the parameter from the function input is not True,
continue # Skip this run.
# This way, you can (optionally) speed up this routine if you only need static derivatives,
# or longitudinal derivatives, etc.
# These lines make a copy of the original operating point, incremented by the finite difference amount
# along the variable defined by derivative_denominator.
incremented_op_point = copy.copy(original_op_point)
incremented_op_point.__setattr__(
derivative_denominator,
original_op_point.__getattribute__(derivative_denominator) + finite_difference_amounts[
derivative_denominator]
)
vlm_incremented = copy.copy(self)
vlm_incremented.op_point = incremented_op_point
run_incremented = vlm_incremented.run()
for derivative_numerator in [
"CL",
"CD",
"CY",
"Cl",
"Cm",
"Cn",
]:
derivative_name = derivative_numerator + abbreviations[derivative_denominator] # Gives "CLa"
run_base[derivative_name] = (
( # Finite-difference out the derivatives
run_incremented[derivative_numerator] - run_base[
derivative_numerator]
) / finite_difference_amounts[derivative_denominator]
* scaling_factors[derivative_denominator]
)
### Try to compute and append neutral point, if possible
if derivative_denominator == "alpha":
run_base["x_np"] = self.xyz_ref[0] - (
run_base["Cma"] * (self.airplane.c_ref / run_base["CLa"])
)
if derivative_denominator == "beta":
run_base["x_np_lateral"] = self.xyz_ref[0] - (
run_base["Cnb"] * (self.airplane.b_ref / run_base["CYb"])
)
return run_base
def get_induced_velocity_at_points(self,
points: np.ndarray,
) -> np.ndarray:
"""
Computes the induced velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the induced velocities at. Given in geometry axes.
Returns: A Nx3 of the induced velocity at those points. Given in geometry axes.
"""
u_induced, v_induced, w_induced = calculate_induced_velocity_horseshoe(
x_field=tall(points[:, 0]),
y_field=tall(points[:, 1]),
z_field=tall(points[:, 2]),
x_left=wide(self.left_vortex_vertices[:, 0]),
y_left=wide(self.left_vortex_vertices[:, 1]),
z_left=wide(self.left_vortex_vertices[:, 2]),
x_right=wide(self.right_vortex_vertices[:, 0]),
y_right=wide(self.right_vortex_vertices[:, 1]),
z_right=wide(self.right_vortex_vertices[:, 2]),
trailing_vortex_direction=self.steady_freestream_direction if self.align_trailing_vortices_with_wind else np.array(
[1, 0, 0]),
gamma=wide(self.vortex_strengths),
vortex_core_radius=self.vortex_core_radius
)
u_induced = np.sum(u_induced, axis=1)
v_induced = np.sum(v_induced, axis=1)
w_induced = np.sum(w_induced, axis=1)
V_induced = np.stack([
u_induced, v_induced, w_induced
], axis=1)
return V_induced
def get_velocity_at_points(self,
points: np.ndarray
) -> np.ndarray:
"""
Computes the velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the velocities at. Given in geometry axes.
Returns: A Nx3 of the velocity at those points. Given in geometry axes.
"""
V_induced = self.get_induced_velocity_at_points(points)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
points
)
freestream_velocities = np.add(wide(self.steady_freestream_velocity), rotation_freestream_velocities)
V = V_induced + freestream_velocities
return V
def calculate_streamlines(self,
seed_points: np.ndarray = None,
n_steps: int = 300,
length: float = None,
) -> np.ndarray:
"""
Computes streamlines, starting at specific seed points.
After running this function, a new instance variable `VortexLatticeFilaments.streamlines` is computed
Uses simple forward-Euler integration with a fixed spatial stepsize (i.e., velocity vectors are normalized
before ODE integration). After investigation, it's not worth doing fancier ODE integration methods (adaptive
schemes, RK substepping, etc.), due to the near-singular conditions near vortex filaments.
Args:
seed_points: A Nx3 ndarray that contains a list of points where streamlines are started. Will be
auto-calculated if not specified.
n_steps: The number of individual streamline steps to trace. Minimum of 2.
length: The approximate total length of the streamlines desired, in meters. Will be auto-calculated if
not specified.
Returns:
streamlines: a 3D array with dimensions: (n_seed_points) x (3) x (n_steps).
Consists of streamlines data.
Result is also saved as an instance variable, VortexLatticeMethod.streamlines.
"""
if self.verbose:
print("Calculating streamlines...")
if length is None:
length = self.airplane.c_ref * 5
if seed_points is None:
left_TE_vertices = self.back_left_vertices[self.is_trailing_edge.astype(bool)]
right_TE_vertices = self.back_right_vertices[self.is_trailing_edge.astype(bool)]
N_streamlines_target = 200
seed_points_per_panel = np.maximum(1, N_streamlines_target // len(left_TE_vertices))
nondim_node_locations = np.linspace(0, 1, seed_points_per_panel + 1)
nondim_seed_locations = (nondim_node_locations[1:] + nondim_node_locations[:-1]) / 2
seed_points = np.concatenate([
x * left_TE_vertices + (1 - x) * right_TE_vertices
for x in nondim_seed_locations
])
streamlines = np.empty((len(seed_points), 3, n_steps))
streamlines[:, :, 0] = seed_points
for i in range(1, n_steps):
V = self.get_velocity_at_points(streamlines[:, :, i - 1])
streamlines[:, :, i] = (
streamlines[:, :, i - 1] +
length / n_steps * V / tall(np.linalg.norm(V, axis=1))
)
self.streamlines = streamlines
if self.verbose:
print("Streamlines calculated.")
return streamlines
def draw(self,
c: np.ndarray = None,
cmap: str = None,
colorbar_label: str = None,
show: bool = True,
show_kwargs: Dict = None,
draw_streamlines=True,
recalculate_streamlines=False,
backend: str = "pyvista"
):
"""
Draws the solution. Note: Must be called on a SOLVED AeroProblem object.
To solve an AeroProblem, use opti.solve(). To substitute a solved solution, use ap = ap.substitute_solution(sol).
:return:
"""
if show_kwargs is None:
show_kwargs = {}
if c is None:
c = self.vortex_strengths
colorbar_label = "Vortex Strengths"
if draw_streamlines:
if (not hasattr(self, 'streamlines')) or recalculate_streamlines:
self.calculate_streamlines()
if backend == "plotly":
from aerosandbox.visualization.plotly_Figure3D import Figure3D
fig = Figure3D()
for i in range(len(self.front_left_vertices)):
fig.add_quad(
points=[
self.front_left_vertices[i, :],
self.back_left_vertices[i, :],
self.back_right_vertices[i, :],
self.front_right_vertices[i, :],
],
intensity=c[i],
outline=True,
)
if draw_streamlines:
for i in range(self.streamlines.shape[0]):
fig.add_streamline(self.streamlines[i, :, :].T)
return fig.draw(
show=show,
colorbar_title=colorbar_label,
**show_kwargs,
)
elif backend == "pyvista":
import pyvista as pv
plotter = pv.Plotter()
plotter.title = "ASB VortexLatticeMethod"
plotter.add_axes()
plotter.show_grid(color='gray')
### Draw the airplane mesh
points = np.concatenate([
self.front_left_vertices,
self.back_left_vertices,
self.back_right_vertices,
self.front_right_vertices
])
N = len(self.front_left_vertices)
range_N = np.arange(N)
faces = tall(range_N) + wide(np.array([0, 1, 2, 3]) * N)
mesh = pv.PolyData(
*mesh_utils.convert_mesh_to_polydata_format(points, faces)
)
scalar_bar_args = {}
if colorbar_label is not None:
scalar_bar_args["title"] = colorbar_label
plotter.add_mesh(
mesh=mesh,
scalars=c,
show_edges=True,
show_scalar_bar=c is not None,
scalar_bar_args=scalar_bar_args,
cmap=cmap,
)
### Draw the streamlines
if draw_streamlines:
import aerosandbox.tools.pretty_plots as p
for i in range(self.streamlines.shape[0]):
plotter.add_mesh(
pv.Spline(self.streamlines[i, :, :].T),
color=p.adjust_lightness("#7700FF", 1.5),
opacity=0.7,
line_width=1
)
if show:
plotter.show(**show_kwargs)
return plotter
else:
raise ValueError("Bad value of `backend`!")
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(__file__).parent / "test_aero_3D" / "geometries"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
vlm = VortexLatticeMethod(
airplane=vanilla,
op_point=asb.OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=10,
alpha=0,
beta=0,
p=0,
q=0,
r=0,
),
spanwise_resolution=12,
chordwise_resolution=12,
)
res = vlm.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}") | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/vortex_lattice_method.py | vortex_lattice_method.py |
import aerosandbox as asb
import aerosandbox.numpy as np
import pytest
wing_airfoil = asb.Airfoil("naca0010") # asb.Airfoil("sd7037")
tail_airfoil = asb.Airfoil("naca0010")
### Define the 3D geometry you want to analyze/optimize.
# Here, all distances are in meters and all angles are in degrees.
airplane = asb.Airplane(
name="Peter's Glider",
xyz_ref=[0.18 * 0.32, 0, 0], # CG location
wings=[
asb.Wing(
name="Main Wing",
symmetric=True, # Should this wing be mirrored across the XZ plane?
xsecs=[ # The wing's cross ("X") sections
asb.WingXSec( # Root
xyz_le=[0, 0, 0], # Coordinates of the XSec's leading edge, relative to the wing's leading edge.
chord=0.18,
twist=0, # degrees
airfoil=wing_airfoil, # Airfoils are blended between a given XSec and the next one.
),
asb.WingXSec( # Mid
xyz_le=[0.01, 0.5, 0],
chord=0.16,
twist=0,
airfoil=wing_airfoil,
),
asb.WingXSec( # Tip
xyz_le=[0.08, 1, 0.1],
chord=0.08,
twist=-0,
airfoil=wing_airfoil,
),
]
),
asb.Wing(
name="Horizontal Stabilizer",
symmetric=True,
xsecs=[
asb.WingXSec( # root
xyz_le=[0, 0, 0],
chord=0.1,
twist=-10,
airfoil=tail_airfoil,
),
asb.WingXSec( # tip
xyz_le=[0.02, 0.17, 0],
chord=0.08,
twist=-10,
airfoil=tail_airfoil
)
]
).translate([0.6, 0, 0.06]),
asb.Wing(
name="Vertical Stabilizer",
symmetric=False,
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=0.1,
twist=0,
airfoil=tail_airfoil,
),
asb.WingXSec(
xyz_le=[0.04, 0, 0.15],
chord=0.06,
twist=0,
airfoil=tail_airfoil
)
]
).translate([0.6, 0, 0.07])
],
fuselages=[
# asb.Fuselage(
# name="Fuselage",
# xsecs=[
# asb.FuselageXSec(
# xyz_c=[0.8 * xi - 0.1, 0, 0.1 * xi - 0.03],
# radius=0.6 * asb.Airfoil("dae51").local_thickness(x_over_c=xi)
# )
# for xi in np.cosspace(0, 1, 30)
# ]
# )
]
)
op_point = asb.OperatingPoint(
velocity=100,
alpha=5,
beta=0,
)
ab = asb.AeroBuildup(
airplane,
op_point,
).run_with_stability_derivatives()
av = asb.AVL(
airplane,
op_point
).run()
vl = asb.VortexLatticeMethod(
airplane,
op_point
).run_with_stability_derivatives()
keys = set()
keys.update(ab.keys())
keys.update(av.keys())
keys = list(keys)
keys.sort()
titles = [
'Output',
'AeroBuildup',
'AVL ',
'VLM ',
'AB & AVL Significantly Different?'
]
def println(*data):
print(
" | ".join([
d.ljust(len(t)) if isinstance(d, str) else f"{{0:{len(t)}.3g}}".format(d)
for d, t in zip(data, titles)
])
)
println(*titles)
print("-" * 80)
for k in keys:
try:
rel = 0.20
abs = 0.01
if 'l' in k or 'm' in k or 'n' in k:
rel = 0.5
abs = 0.05
differences = ab[k] != pytest.approx(av[k], rel=rel, abs=abs)
differences_text = '*' if differences else ''
if differences and ('D' in k):
differences_text = 'Expected'
println(
k,
ab[k],
av[k],
vl[k] if k in vl.keys() else ' ' * 5 + '-',
differences_text
)
except (KeyError, TypeError):
pass | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/test_aero_3D/test_aero_buildup/avl_validation.py | avl_validation.py |
import aerosandbox as asb
import aerosandbox.numpy as np
wing_airfoil = asb.Airfoil("sd7037")
tail_airfoil = asb.Airfoil("naca0010")
### Define the 3D geometry you want to analyze/optimize.
# Here, all distances are in meters and all angles are in degrees.
airplane = asb.Airplane(
name="Peter's Glider",
xyz_ref=[0, 0, 0], # CG location
wings=[
asb.Wing(
name="Main Wing",
symmetric=True, # Should this wing be mirrored across the XZ plane?
xsecs=[ # The wing's cross ("X") sections
asb.WingXSec( # Root
xyz_le=[0, 0, 0], # Coordinates of the XSec's leading edge, relative to the wing's leading edge.
chord=0.18,
twist=2, # degrees
airfoil=wing_airfoil, # Airfoils are blended between a given XSec and the next one.
),
asb.WingXSec( # Mid
xyz_le=[0.01, 0.5, 0],
chord=0.16,
twist=0,
airfoil=wing_airfoil,
),
asb.WingXSec( # Tip
xyz_le=[0.08, 1, 0.1],
chord=0.08,
twist=-2,
airfoil=wing_airfoil,
),
]
),
asb.Wing(
name="Horizontal Stabilizer",
symmetric=True,
xsecs=[
asb.WingXSec( # root
xyz_le=[0, 0, 0],
chord=0.1,
twist=-10,
airfoil=tail_airfoil,
),
asb.WingXSec( # tip
xyz_le=[0.02, 0.17, 0],
chord=0.08,
twist=-10,
airfoil=tail_airfoil
)
]
).translate([0.6, 0, 0.06]),
asb.Wing(
name="Vertical Stabilizer",
symmetric=False,
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=0.1,
twist=0,
airfoil=tail_airfoil,
),
asb.WingXSec(
xyz_le=[0.04, 0, 0.15],
chord=0.06,
twist=0,
airfoil=tail_airfoil
)
]
).translate([0.6, 0, 0.07])
],
fuselages=[
asb.Fuselage(
name="Fuselage",
xsecs=[
asb.FuselageXSec(
xyz_c=[0.8 * xi - 0.1, 0, 0.1 * xi - 0.03],
radius=0.6 * asb.Airfoil("dae51").local_thickness(x_over_c=xi)
)
for xi in np.cosspace(0, 1, 30)
]
)
]
)
if __name__ == '__main__':
airplane.draw() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/test_aero_3D/geometries/conventional.py | conventional.py |
import aerosandbox as asb
import aerosandbox.numpy as np
sd7037 = asb.Airfoil("sd7037")
airplane = asb.Airplane(
name="Vanilla",
xyz_ref=[0.5, 0, 0],
s_ref=9,
c_ref=0.9,
b_ref=10,
wings=[
asb.Wing(
name="Wing",
symmetric=True,
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=1,
twist=2,
airfoil=sd7037,
),
asb.WingXSec(
xyz_le=[0.2, 5, 1],
chord=0.6,
twist=2,
airfoil=sd7037,
)
]
),
asb.Wing(
name="H-stab",
symmetric=True,
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=0.7,
airfoil=asb.Airfoil("naca0012")
),
asb.WingXSec(
xyz_le=[0.14, 1.25, 0],
chord=0.42,
airfoil=asb.Airfoil("naca0012")
),
]
).translate([4, 0, 0]),
asb.Wing(
name="V-stab",
xsecs=[
asb.WingXSec(
xyz_le=[0, 0, 0],
chord=0.7,
airfoil=asb.Airfoil("naca0012")
),
asb.WingXSec(
xyz_le=[0.14, 0, 1],
chord=0.42,
airfoil=asb.Airfoil("naca0012")
)
]
).translate([4, 0, 0])
],
fuselages=[
asb.Fuselage(
name="Fuselage",
xsecs=[
asb.FuselageXSec(
xyz_c=[xi * 5 - 0.5, 0, 0],
radius=asb.Airfoil("naca0024").local_thickness(x_over_c=xi)
)
for xi in np.cosspace(0, 1, 30)
]
)
]
)
if __name__ == '__main__':
airplane.draw() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/test_aero_3D/geometries/vanilla.py | vanilla.py |
import aerosandbox.numpy as np
def critical_mach(fineness_ratio_nose: float) -> float:
"""
Returns the transonic critical Mach number for a streamlined fuselage.
Fitted to data from Raymer "Aircraft Design: A Conceptual Approach" 2nd Ed., Fig. 12.28.
See figure + study + fit in: /studies/FuselageCriticalMach/
Args:
fineness_ratio_nose: The fineness ratio of the nose section of the fuselage.
Specifically, fineness_ratio_nose = 2 * L_n / d, where:
* L_n is the length from the nose to the longitudinal location at which the fuselage cross section
becomes essentially constant, and:
* d is the body diameter at that location.
Returns: The critical Mach number
"""
p = {
'a': 11.087202397070559,
'b': 13.469755774708842,
'c': 4.034476257077558
}
mach_dd = 1 - (p["a"] / (2 * fineness_ratio_nose + p["b"])) ** p["c"]
### The following approximate relation is derived in W.H. Mason, "Configuration Aerodynamics", Chapter 7. Transonic Aerodynamics of Airfoils and Wings.
### Equation 7-8 on Page 7-19.
### This is in turn based on Lock's proposed empirically-derived shape of the drag rise, from Hilton, W.F., High Speed Aerodynamics, Longmans, Green & Co., London, 1952, pp. 47-49
mach_crit = mach_dd - (0.1 / 80) ** (1 / 3)
return mach_crit
def jorgensen_eta(fineness_ratio: float) -> float:
"""
A fit for the eta parameter (crossflow lift multiplier) of a fuselage, as described in:
Jorgensen, Leland Howard. "Prediction of Static Aerodynamic Characteristics for Slender Bodies
Alone and with Lifting Surfaces to Very High Angles of Attack". NASA TR R-474. 1977.
Fits performed in /studies/FuselageJorgensenEtaFitting/
Args:
fineness_ratio: The fineness ratio of the fuselage. (length / diameter)
Returns: An estimate of eta.
"""
x = fineness_ratio
p = {
'1scl': 23.009059965179222,
'1cen': -122.76900250914575,
'2scl': 13.006453125841258,
'2cen': -24.367562906887436
}
return 1 - p["1scl"] / (x - p["1cen"]) - (p["2scl"] / (x - p["2cen"])) ** 2
def fuselage_base_drag_coefficient(mach: float) -> float:
"""
A fit for the fuselage base drag coefficient of a cylindrical fuselage, as described in:
MIL-HDBK-762: DESIGN OF AERODYNAMICALLY STABILIZED FREE ROCKETS:
* Section 5-5.3.1 Body-of-Revolution Base Drag, Rocket Jet Plume-Off
* Figure 5-140: Effects of Mach Number and Reynolds Number on Base Pressure
Fits in /studies/FuselageBaseDragCoefficient
Args:
mach: Mach number [-]
Returns: Fuselage base drag coefficient
"""
m = mach
p = {'a' : 0.18024110740341143,
'center_sup': -0.21737019935624047,
'm_trans' : 0.9985447737532848,
'pc_sub' : 0.15922582283573747,
'pc_sup' : 0.04698820458826384,
'scale_sup' : 0.34978926411193456,
'trans_str' : 9.999987483414937}
return np.blend(
p["trans_str"] * (m - p["m_trans"]),
p["pc_sup"] + p["a"] * np.exp(-(p["scale_sup"] * (m - p["center_sup"])) ** 2),
p["pc_sub"]
)
def fuselage_form_factor(
fineness_ratio: float,
ratio_of_corner_radius_to_body_width: float = 0.5
):
"""
Computes the form factor of a fuselage as a function of various geometrical parameters.
Assumes the body cross section is a rounded square with constant-radius-of-curvature fillets.
Body cross section can therefore vary from a true square to a true circle.
Uses the methodology described in:
Götten, Falk; Havermann, Marc; Braun, Carsten; Marino, Matthew; Bil, Cees.
"Improved Form Factor for Drag Estimation of Fuselages with Various Cross Sections.
AIAA Journal of Aircraft, 2021. DOI: 10.2514/1.C036032
https://arc.aiaa.org/doi/10.2514/1.C036032
Assumes fully turbulent flow. Coefficient of determination found in the paper above was 0.95.
Note: the value returned does not account for any base separation (other than minor aft-closure separation). The
equations were also fit to relatively-shape-optimized fuselages, and will be overly-optimistic for unoptimized
shapes.
Args:
fineness_ratio: The fineness ratio of the body (length / diameter).
ratio_of_corner_radius_to_body_width: A parameter that describes the cross-sectional shape of the fuselage.
Precisely, this is ratio of corner radius to body width.
* A value of 0 corresponds to a true square.
* A value of 0.5 (default) corresponds to a true circle.
Returns: The form factor of the body, defined as:
C_D = C_f * form_factor * (S_wet / S_ref)
"""
fr = fineness_ratio
r = 2 * ratio_of_corner_radius_to_body_width
cs1 = -0.825885 * r ** 0.411795 + 4.0001
cs2 = -0.340977 * r ** 7.54327 - 2.27920
cs3 = -0.013846 * r ** 1.34253 + 1.11029
form_factor = cs1 * fr ** cs2 + cs3
return form_factor | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/aero_buildup_submodels/fuselage_aerodynamics_utilities.py | fuselage_aerodynamics_utilities.py |
import aerosandbox.numpy as np
from typing import Union
def calculate_induced_velocity_horseshoe(
x_field: Union[float, np.ndarray],
y_field: Union[float, np.ndarray],
z_field: Union[float, np.ndarray],
x_left: Union[float, np.ndarray],
y_left: Union[float, np.ndarray],
z_left: Union[float, np.ndarray],
x_right: Union[float, np.ndarray],
y_right: Union[float, np.ndarray],
z_right: Union[float, np.ndarray],
gamma: Union[float, np.ndarray] = 1,
trailing_vortex_direction: np.ndarray = None,
vortex_core_radius: float = 0,
) -> [Union[float, np.ndarray], Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Calculates the induced velocity at a point:
[x_field, y_field, z_field]
in a 3D potential-flow flowfield.
In this flowfield, the following singularity elements are assumed:
* A single horseshoe vortex consisting of a bound leg and two trailing legs
This function consists entirely of scalar, elementwise NumPy ufunc operations - so it can be vectorized as
desired assuming input dimensions/broadcasting are compatible.
Args:
x_field: x-coordinate of the field point
y_field: y-coordinate of the field point
z_field: z-coordinate of the field point
x_left: x-coordinate of the left vertex of the bound vortex
y_left: y-coordinate of the left vertex of the bound vortex
z_left: z-coordinate of the left vertex of the bound vortex
x_right: x-coordinate of the right vertex of the bound vortex
y_right: y-coordinate of the right vertex of the bound vortex
z_right: z-coordinate of the right vertex of the bound vortex
gamma: The strength of the horseshoe vortex filament.
trailing_vortex_direction: The direction that the trailing legs of the horseshoe vortex extend. Usually,
this is modeled as the direction of the freestream.
vortex_core_radius: To prevent a vortex singularity, here we use a Kaufmann vortex model. This parameter
governs the radius of this vortex model. It should be significantly smaller (e.g., at least an order of
magnitude smaller) than the smallest bound leg in the analysis in question.
Returns: u, v, and w:
The x-, y-, and z-direction induced velocities.
"""
if trailing_vortex_direction is None:
trailing_vortex_direction = np.array([1, 0, 0])
np.assert_equal_shape({
"x_field": x_field,
"y_field": y_field,
"z_field": z_field,
})
np.assert_equal_shape({
"x_left" : x_left,
"y_left" : y_left,
"z_left" : z_left,
"x_right": x_right,
"y_right": y_right,
"z_right": z_right,
})
a_x = np.add(x_field, -x_left)
a_y = np.add(y_field, -y_left)
a_z = np.add(z_field, -z_left)
b_x = np.add(x_field, -x_right)
b_y = np.add(y_field, -y_right)
b_z = np.add(z_field, -z_right)
u_x = trailing_vortex_direction[0]
u_y = trailing_vortex_direction[1]
u_z = trailing_vortex_direction[2]
# Handle the special case where the field point is on one of the legs (either bound or trailing)
def smoothed_inv(x):
"Approximates 1/x with a function that sharply goes to 0 in the x -> 0 limit."
if not np.all(vortex_core_radius == 0):
return x / (x ** 2 + vortex_core_radius ** 2)
else:
return 1 / x
### Do some useful arithmetic
a_cross_b_x = a_y * b_z - a_z * b_y
a_cross_b_y = a_z * b_x - a_x * b_z
a_cross_b_z = a_x * b_y - a_y * b_x
a_dot_b = a_x * b_x + a_y * b_y + a_z * b_z
a_cross_u_x = a_y * u_z - a_z * u_y
a_cross_u_y = a_z * u_x - a_x * u_z
a_cross_u_z = a_x * u_y - a_y * u_x
a_dot_u = a_x * u_x + a_y * u_y + a_z * u_z
b_cross_u_x = b_y * u_z - b_z * u_y
b_cross_u_y = b_z * u_x - b_x * u_z
b_cross_u_z = b_x * u_y - b_y * u_x
b_dot_u = b_x * u_x + b_y * u_y + b_z * u_z
norm_a = (a_x ** 2 + a_y ** 2 + a_z ** 2) ** 0.5
norm_b = (b_x ** 2 + b_y ** 2 + b_z ** 2) ** 0.5
norm_a_inv = smoothed_inv(norm_a)
norm_b_inv = smoothed_inv(norm_b)
### Calculate Vij
term1 = (norm_a_inv + norm_b_inv) * smoothed_inv(norm_a * norm_b + a_dot_b)
term2 = norm_a_inv * smoothed_inv(norm_a - a_dot_u)
term3 = norm_b_inv * smoothed_inv(norm_b - b_dot_u)
constant = gamma / (4 * np.pi)
u = np.multiply(
constant,
(
a_cross_b_x * term1 +
a_cross_u_x * term2 -
b_cross_u_x * term3
)
)
v = np.multiply(
constant,
(
a_cross_b_y * term1 +
a_cross_u_y * term2 -
b_cross_u_y * term3
)
)
w = np.multiply(
constant,
(
a_cross_b_z * term1 +
a_cross_u_z * term2 -
b_cross_u_z * term3
)
)
return u, v, w
if __name__ == '__main__':
##### Check single vortex
u, v, w = calculate_induced_velocity_horseshoe(
x_field=0,
y_field=0,
z_field=0,
x_left=-1,
y_left=-1,
z_left=0,
x_right=-1,
y_right=1,
z_right=0,
gamma=1,
)
print(u, v, w)
##### Plot grid of single vortex
args = (-2, 2, 30)
x = np.linspace(*args)
y = np.linspace(*args)
z = np.linspace(*args)
X, Y, Z = np.meshgrid(x, y, z)
Xf = X.flatten()
Yf = Y.flatten()
Zf = Z.flatten()
left = [0, -1, 0]
right = [0, 1, 0]
Uf, Vf, Wf = calculate_induced_velocity_horseshoe(
x_field=Xf,
y_field=Yf,
z_field=Zf,
x_left=left[0],
y_left=left[1],
z_left=left[2],
x_right=right[0],
y_right=right[1],
z_right=right[2],
gamma=1,
)
pos = np.stack((Xf, Yf, Zf)).T
dir = np.stack((Uf, Vf, Wf)).T
dir_norm = np.reshape(np.linalg.norm(dir, axis=1), (-1, 1))
dir = dir / dir_norm * dir_norm ** 0.2
import pyvista as pv
pv.set_plot_theme('dark')
plotter = pv.Plotter()
plotter.add_arrows(
cent=pos,
direction=dir,
mag=0.15
)
plotter.add_lines(
lines=np.array([
[Xf.max(), left[1], left[2]],
left,
right,
[Xf.max(), right[1], right[2]]
])
)
plotter.show_grid()
plotter.show()
##### Check multiple vortices
args = (-2, 2, 30)
x = np.linspace(*args)
y = np.linspace(*args)
z = np.linspace(*args)
X, Y, Z = np.meshgrid(x, y, z)
Xf = X.flatten()
Yf = Y.flatten()
Zf = Z.flatten()
left = [0, -1, 0]
center = [0, 0, 0]
right = [0, 1, 0]
lefts = np.array([left, center])
rights = np.array([center, right])
strengths = np.array([2, 1])
def wide(array):
return np.reshape(array, (1, -1))
def tall(array):
return np.reshape(array, (-1, 1))
Uf_each, Vf_each, Wf_each = calculate_induced_velocity_horseshoe(
x_field=wide(Xf),
y_field=wide(Yf),
z_field=wide(Zf),
x_left=tall(lefts[:, 0]),
y_left=tall(lefts[:, 1]),
z_left=tall(lefts[:, 2]),
x_right=tall(rights[:, 0]),
y_right=tall(rights[:, 1]),
z_right=tall(rights[:, 2]),
gamma=tall(strengths),
)
Uf = np.sum(Uf_each, axis=0)
Vf = np.sum(Vf_each, axis=0)
Wf = np.sum(Wf_each, axis=0)
pos = np.stack((Xf, Yf, Zf)).T
dir = np.stack((Uf, Vf, Wf)).T
dir_norm = np.reshape(np.linalg.norm(dir, axis=1), (-1, 1))
dir = dir / dir_norm * dir_norm ** 0.2
import pyvista as pv
pv.set_plot_theme('dark')
plotter = pv.Plotter()
plotter.add_arrows(
cent=pos,
direction=dir,
mag=0.15
)
plotter.add_lines(
lines=np.array([
[Xf.max(), left[1], left[2]],
left,
center,
[Xf.max(), center[1], center[2]],
center,
right,
[Xf.max(), right[1], right[2]]
])
)
plotter.show_grid()
plotter.show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/singularities/uniform_strength_horseshoe_singularities.py | uniform_strength_horseshoe_singularities.py |
import aerosandbox.numpy as np
from typing import Union
def calculate_induced_velocity_point_source(
x_field: Union[float, np.ndarray],
y_field: Union[float, np.ndarray],
z_field: Union[float, np.ndarray],
x_source: Union[float, np.ndarray],
y_source: Union[float, np.ndarray],
z_source: Union[float, np.ndarray],
sigma: Union[float, np.ndarray] = 1,
viscous_radius=0,
) -> [Union[float, np.ndarray], Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Calculates the induced velocity at a point:
[x_field, y_field, z_field]
in a 3D potential-flow flowfield.
In this flowfield, the following singularity elements are assumed:
* A single point source
This function consists entirely of scalar, elementwise NumPy ufunc operations - so it can be vectorized as
desired assuming input dimensions/broadcasting are compatible.
Args:
x_field: x-coordinate of the field point
y_field: y-coordinate of the field point
z_field: z-coordinate of the field point
x_left: x-coordinate of the left vertex of the bound vortex
y_left: y-coordinate of the left vertex of the bound vortex
z_left: z-coordinate of the left vertex of the bound vortex
x_right: x-coordinate of the right vertex of the bound vortex
y_right: y-coordinate of the right vertex of the bound vortex
z_right: z-coordinate of the right vertex of the bound vortex
gamma: The strength of the horseshoe vortex filament.
trailing_vortex_direction: The direction that the trailing legs of the horseshoe vortex extend. Usually,
this is modeled as the direction of the freestream.
viscous_radius: To prevent a vortex singularity, here we use a Kaufmann vortex model. This parameter
governs the radius of this vortex model. It should be significantly smaller (e.g., at least an order of
magnitude smaller) than the smallest bound leg in the analysis in question.
Returns: u, v, and w:
The x-, y-, and z-direction induced velocities.
"""
dx = x_field - x_source
dy = y_field - y_source
dz = z_field - z_source
r_squared = (
dx ** 2 +
dy ** 2 +
dz ** 2
)
def smoothed_x_15_inv(x):
"""
Approximates x^(-1.5) with a function that sharply goes to 0 in the x -> 0 limit.
"""
if not np.all(viscous_radius == 0):
return x / (x ** 2.5 + viscous_radius ** 2.5)
else:
return x ** -1.5
grad_phi_multiplier = sigma * smoothed_x_15_inv(r_squared) / (4 * np.pi)
u = grad_phi_multiplier * dx
v = grad_phi_multiplier * dy
w = grad_phi_multiplier * dz
return u, v, w
if __name__ == '__main__':
args = (-2, 2, 30)
x = np.linspace(*args)
y = np.linspace(*args)
z = np.linspace(*args)
X, Y, Z = np.meshgrid(x, y, z)
Xf = X.flatten()
Yf = Y.flatten()
Zf = Z.flatten()
def wide(array):
return np.reshape(array, (1, -1))
def tall(array):
return np.reshape(array, (-1, 1))
Uf, Vf, Wf = calculate_induced_velocity_point_source(
x_field=Xf,
y_field=Yf,
z_field=Zf,
x_source=1,
y_source=0,
z_source=0,
)
pos = np.stack((Xf, Yf, Zf)).T
dir = np.stack((Uf, Vf, Wf)).T
dir_norm = np.reshape(np.linalg.norm(dir, axis=1), (-1, 1))
dir = dir / dir_norm * dir_norm ** 0.2
import pyvista as pv
pv.set_plot_theme('dark')
plotter = pv.Plotter()
plotter.add_arrows(
cent=pos,
direction=dir,
mag=0.15
)
plotter.show_grid()
plotter.show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/aerodynamics/aero_3D/singularities/point_source.py | point_source.py |
from typing import List
import aerosandbox as asb
import aerosandbox.numpy as np
from aerosandbox.atmosphere import Atmosphere as atmo
import sympy as sym
from aerosandbox import cas
from numpy import pi
# set input parameters
airspeed = 5 # meters per second
rpm = 10000
altitude = 0 # meters
# air_density = atmo.get_density_at_altitude(altitude)
# mu = atmo.get_viscosity_from_temperature(atmo.get_temperature_at_altitude(altitude))
# speed_of_sound = 343
air_density = 1.225
mu = 0.178E-04
speed_of_sound = 340
## Prop Specs from CAM 6X3 for QPROP Validation
n_blades = 2 # number of blades
# give value in inches for some number of radial locations from root to tip
# tip radial location is propeller radius
radial_locations_in = np.array([0.75, 1, 1.5, 2, 2.5, 2.875, 3])
radial_locations_m = np.array([0.01905, 0.0254, 0.0381, 0.0508, 0.0635, 0.073025, 0.0762])
# # give value of blade chord in inches for each station
blade_chord_in = np.array([0.66, 0.69, 0.63, 0.55, 0.44, 0.30, 0.19])
blade_chord_m = np.array([0.016764, 0.017526, 0.016002, 0.01397, 0.011176, 0.00762, 0.004826])
# # give value of blade beta in degrees for each station
blade_beta_deg = np.array([27.5, 22, 15.2, 10.2, 6.5, 4.6, 4.2])
# # variable pitch angle
dBeta_deg = 0
# create subdivisions within the perscribed radial locations
divisions = 3
# def annick_propulsion_model(
# rpm: float,
# airspeed: float,
# air_density: float,
# mu: float,
# n_blades: int,
# radial_locations_m: np.ndarray,
# blade_chord_m: np.ndarray,
# blade_beta_deg: np.ndarray,
# dBeta_deg: float,
# divisions: float,
# ) -> [float, float]:
"""
Ideally:
* Physics-based where possible
* Where fitted correction factors need to be added, add them nondimensionally
* Theory from Drela's QPROP Formulation document found here:
http://web.mit.edu/drela/Public/web/qprop/qprop_theory.pdf
:param rpm: prop speed in revolutions per minute
:param airspeed: m/s
:param air_density:
:param mu:
:param n_blades:
:param blade_chord:
:param blade_twist:
:param dBeta:
:param divisions:
:return:
"""
# ## original CL function
# def airfoil_CL(alpha, Re, Ma):
# alpha_rad = alpha * pi / 180
# Cl = 2 * pi * alpha_rad
# return Cl
# Interpolation function
def interpolate(radial_locations, blade_chords, blade_betas, div):
radial_locations_new = np.array([])
blade_chords_new = np.array([])
blade_betas_new = np.array([])
for n in range(len(radial_locations) - 1):
r1 = radial_locations_m[n]
r2 = radial_locations_m[n + 1]
c1 = blade_chords[n]
c2 = blade_chords[n + 1]
b1 = blade_betas[n]
b2 = blade_betas[n + 1]
for i in range(0, div):
radial_loc = r1 + (r2 - r1) * i / div
radial_locations_new = np.append(radial_locations_new, radial_loc)
chord = c1 + (radial_loc - r1) * (c2 - c1) / (r2 - r1)
blade_chords_new = np.append(blade_chords_new, chord)
beta = b1 + (radial_loc - r1) * (b2 - b1) / (r2 - r1)
blade_betas_new = np.append(blade_betas_new, beta)
radial_locations_new = np.append(radial_locations_new, r2)
blade_chords_new = np.append(blade_chords_new, c2)
blade_betas_new = np.append(blade_betas_new, b2)
return radial_locations_new, blade_chords_new, blade_betas_new
# QPROP CL function
def airfoil_CL(alpha, Re, Ma):
alpha_rad = alpha * pi / 180
beta = (1 - Ma ** 2) ** 0.5
cl_0 = 0.5
cl_alpha = 5.8
cl_min = -0.3
cl_max = 1.2
cl = (alpha_rad * cl_alpha + cl_0) / beta
Cl = np.fmin(np.fmax(cl, cl_min), cl_max)
return Cl
# ## Peter Sharpe's CDp model
# def airfoil_CDp(alpha, Re, Ma, Cl):
# Re_exp = -0.5
# Re_ref = 1e6
# alpha_ref = 5
# cd_0 = 0.00540
# cd_a2 = 0.00848 - cd_0
# Cd = (
# cd_a2 * (alpha / alpha_ref) ** 2 + cd_0
# ) * (Re / Re_ref) ** Re_exp
# return Cd
## QPROP CDp model
def airfoil_CDp(alpha, Re, Ma, Cl):
alpha_rad = alpha * pi / 180
Re_exp = -0.7
Re_ref = 70000
cd_0 = 0.028
cd_2 = 0.05
# cd_2 = 0.05
cl_cd_0 = 0.5
cl_0 = 0.5
cl_alpha = 5.8
cl_min = -0.3
cl_max = 1.2
# cd = (cd_0 + cd_2 * (cl - cl_cd_0) ** 2) * (Re / Re_ref) ** Re_exp
cd = (cd_0 + cd_2 * (Cl - cl_cd_0) ** 2) * (Re / Re_ref) ** Re_exp
aCD0 = (cl_cd_0 - cl_0) / cl_alpha
dcd_stall = 2 * (np.sin(alpha - aCD0)) ** 2
if cas.is_equal(Cl, cl_max):
cd = dcd_stall + cd
if cas.is_equal(Cl, cl_min):
cd = dcd_stall + cd
return cd
radial_locations_m, blade_chord_m, blade_beta_deg = interpolate(radial_locations_m, blade_chord_m, blade_beta_deg,
divisions)
n_stations = len(radial_locations_m) - 1
tip_radius = radial_locations_m[n_stations] # use tip radial location as prop radius
omega = rpm * 2 * pi / 60 # radians per second
blade_twist_deg = blade_beta_deg + dBeta_deg
blade_twist_rad = blade_twist_deg * pi / 180
# terms to print
radius = []
chord = []
beta = []
Cl = []
Cd = []
RE = []
Mach = []
effi = []
effp = []
Wa = []
a_swirl = []
adv_wake = []
alpha = []
Wt = []
torque = []
thrust = []
for station in range(n_stations): # TODO undo this
# for station in [22]:
radial_loc = (radial_locations_m[station] + radial_locations_m[station + 1]) / 2
blade_section = (radial_locations_m[station + 1] - radial_locations_m[station])
chord_local = (blade_chord_m[station] + blade_chord_m[station + 1]) / 2
twist_local_rad = (blade_twist_rad[station] + blade_twist_rad[station + 1]) / 2
opti = asb.Opti()
# v_a = opti.variable(init_guess=15)
# v_t = opti.variable(init_guess=15)
# u_a = opti.variable(init_guess=5)
Psi = opti.variable(init_guess=pi / 2)
# h_ati = opti.variable(init_guess=0.01)
# f = opti.variable(init_guess=300)
# F = opti.variable(init_guess=1)
# gamma = opti.variable(init_guess=1)
### Define velocity triangle components
U_a = airspeed # + u_a # Axial velocity w/o induced eff. assuming u_a = 0
U_t = omega * radial_loc # Tangential velocity w/o induced eff.
U = (U_a ** 2 + U_t ** 2) ** 0.5 # Velocity magnitude
W_a = 0.5 * U_a + 0.5 * U * np.sin(Psi) # Axial velocity w/ induced eff.
W_t = 0.5 * U_t + 0.5 * U * np.cos(Psi) # Tangential velocity w/ induced eff.
v_a = W_a - U_a # Axial induced velocity
v_t = U_t - W_t # Tangential induced velocity
W = (W_a ** 2 + W_t ** 2) ** 0.5
Re = air_density * W * chord_local / mu
Ma = W / speed_of_sound
v = (v_a ** 2 + v_t ** 2) ** 0.5
loc_wake_adv_ratio = (radial_loc / tip_radius) * (W_a / W_t)
f = (n_blades / 2) * (1 - radial_loc / tip_radius) * 1 / loc_wake_adv_ratio
F = 2 / pi * np.arccos(np.exp(-f))
## Compute local blade quantities
phi_rad = np.arctan2(W_a, W_t) # local flow angle
phi_deg = phi_rad * 180 / pi
alpha_rad = twist_local_rad - phi_rad
alpha_deg = alpha_rad * 180 / pi
### Compute sectional lift and drag
cl = airfoil_CL(alpha_deg, Re, Ma)
cd = airfoil_CDp(alpha_deg, Re, Ma, cl)
gamma = 0.5 * W * chord_local * cl
### Add governing equations
opti.subject_to([
# 0.5 * v == 0.5 * U * cas.sin(Psi / 4),
# v_a == v_t * W_t / W_a,
# U ** 2 == v ** 2 + W ** 2,
# gamma == -0.0145,
# gamma == (4 * pi * radial_loc / n_blades) * F * (
# 1 + ((4 * loc_wake_adv_ratio * tip_radius) / (pi * n_blades * radial_loc)) ** 2) ** 0.5,
gamma == v_t * (4 * pi * radial_loc / n_blades) * F * (
1 + ((4 * loc_wake_adv_ratio * tip_radius) / (pi * n_blades * radial_loc)) ** 2) ** 0.5,
# vt**2*F**2*(1.+(4.*lam_w*R/(pi*B*r))**2) >= (B*G/(4.*pi*r))**2,
# f + (radial_loc / tip_radius) * n_blades / (2 * loc_wake_adv_ratio) <= (n_blades / 2) * (1 / loc_wake_adv_ratio),
# blade_twist_deg * pi / 180 == alpha_rad + 1 / h_ati,
# h_ati ** 1.83442 == 0.966692 * (W_a / W_t) ** -1.84391 + 0.596688 * (W_a / W_t) ** -0.0973781,
# v_t ** 2 * F ** 2 * (1 + (4 * loc_wake_adv_ratio * tip_radius/(pi * n_blades * radial_loc)) ** 2) >= (n_blades * gamma /(4 * pi * radial_loc)) ** 2,
# alpha_deg >= -45
# v_a >= 0,
# v_t >= 0
])
### Solve
sol = opti.solve()
### Compute sectional quantities
# dLift = sol.value(
# n_blades * 0.5 * air_density * (W ** 2) *
# cl * chord_local * blade_section
# )
# dDrag = sol.value(
# n_blades * 0.5 * air_density * (W ** 2) *
# cd * chord_local * blade_section
# )
dThrust = sol.value(
air_density * n_blades * gamma * (
W_t - W_a * cd / cl
) * blade_section
)
dTorque = sol.value(
air_density * n_blades * gamma * (
W_a + W_t * cd / cl
) * radial_loc * blade_section
)
# if sol.value(alpha_deg) <= 0:
# break
thrust.append(dThrust)
torque.append(dTorque)
radius.append(opti.value(radial_loc))
chord.append(opti.value(chord_local))
beta.append(opti.value(phi_deg + alpha_deg))
Cl.append(opti.value(cl))
Cd.append(opti.value(cd))
RE.append(opti.value(Re))
Mach.append(opti.value(Ma))
effi.append(opti.value((1 - v_t / U_t) / (1 + v_a / U_a)))
effp.append(opti.value((1 - cd / cl * W_a / W_t) / (1 + cd / cl * W_t / W_a)))
Wa.append(opti.value(W_a))
a_swirl.append(opti.value(phi_deg))
adv_wake.append(opti.value(loc_wake_adv_ratio))
alpha.append(opti.value(alpha_deg))
Wt.append(opti.value(W_t))
Thrust = sum(thrust)
Torque = sum(torque)
# debugging section: outputs printed in qprop
print(
"radius chord beta Cl Cd Re Mach effi effp Wa Aswirl adv_wake alpha Wt")
for i in range(0, len(radius)):
# print(f'{radius[i]} {chord[i]} {beta[i]} {Cl[i]} {Cd[i]} {Re[i]} {Mach[i]} {effi[i]} {effp[i]} {Wa[i]} {a_swirl[i]} {adv_wake[i]}')
print('%.4f %.4f %.3f %.4f %.5f %d %.3f %.4f %.4f %.2f %.3f %.4f %.4f %.2f'
% (
radius[i], chord[i], beta[i], Cl[i], Cd[i], RE[i], Mach[i], effi[i], effp[i], Wa[i], a_swirl[i],
adv_wake[i],
alpha[i], Wt[i]))
print(f"Thrust Total: {Thrust}")
print(f"Torque Total: {Torque}")
# return Torque, Thrust
# Thrust, Torque = annick_propulsion_model(
# rpm,
# airspeed,
# air_density,
# mu,
# n_blades,
# radial_locations_m,
# blade_chord_m,
# blade_beta_deg,
# dBeta_deg,
# divisions,
# ) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/propulsion/ignore/propeller_model.py | propeller_model.py |
import aerosandbox as asb
import aerosandbox.numpy as np
from typing import Callable, Union, Dict
class TubeSparBendingStructure(asb.ImplicitAnalysis):
@asb.ImplicitAnalysis.initialize
def __init__(self,
length: float,
diameter_function: Union[float, Callable[[np.ndarray], np.ndarray]] = None,
wall_thickness_function: Union[float, Callable[[np.ndarray], np.ndarray]] = None,
bending_point_forces: Dict[float, float] = None,
bending_distributed_force_function: Union[float, Callable[[np.ndarray], np.ndarray]] = 0.,
points_per_point_load: int = 20,
elastic_modulus_function: Union[float, Callable[[np.ndarray], np.ndarray]] = 175e9, # Pa
EI_guess: float = None,
assume_thin_tube=True,
):
"""
A structural spar model that simulates bending of a cantilever tube spar based on beam theory (static,
linear elasticity). This tube spar is assumed to have uniform wall thickness in the azimuthal direction,
but not necessarily along its length. The diameter of the tube spar and elastic modulus may vary along its
length.
Governing equation is Euler-Bernoulli beam theory:
(E * I * u(y)'')'' = q(y)
where:
* y is the distance along the spar, with a cantilever support at y=0 and a free tip at y=length.
* E is the elastic modulus
* I is the bending moment of inertia
* u(y) is the local displacement at y.
* q(y) is the force-per-unit-length at y. (In other words, a dirac delta is a point load.)
* ()' is a derivative w.r.t. y.
Any applicable constraints relating to stress, buckling, ovalization, gauge limits, displacement, etc. should
be applied after initialization of this class.
Example:
>>> opti = asb.Opti()
>>>
>>> span = 34
>>> half_span = span / 2
>>> lift = 200 * 9.81
>>>
>>> beam = TubeSparBendingStructure(
>>> opti=opti,
>>> length=half_span,
>>> diameter_function=0.12,
>>> points_per_point_load=100,
>>> bending_distributed_force_function=lambda y: (lift / span) * (
>>> 4 / np.pi * (1 - (y / half_span) ** 2) ** 0.5
>>> ), # Elliptical
>>> # bending_distributed_force_function=lambda y: lift / span * np.ones_like(y) # Uniform
>>> )
>>> opti.subject_to([
>>> beam.stress_axial <= 500e6, # Stress constraint
>>> beam.u[-1] <= 3, # Tip displacement constraint
>>> beam.wall_thickness > 1e-3 # Gauge constraint
>>> ])
>>> mass = beam.volume() * 1600 # Density of carbon fiber [kg/m^3]
>>>
>>> opti.minimize(mass / 100)
>>> sol = opti.solve()
>>>
>>> beam = sol(beam)
>>>
>>> print(f"{sol.value(mass)} kg")
>>>
>>> beam.draw()
Args:
length: Length of the spar [m]. Spar is assumed to go from y=0 (cantilever support) to y=length (free tip).
diameter_function: The diameter of the tube as a function of the distance along the spar y. Refers to the
nominal diameter (e.g., the arithmetic mean of the inner diameter and outer diameter of the tube; the
"centerline" diameter). In terms of data types, this can be one of:
* None, in which case it's interpreted as a design variable to optimize over. Assumes that the value
can freely vary along the length of the spar.
* a scalar optimization variable (see asb.ImplicitAnalysis documentation to see how to link an Opti
instance to this analysis), in which case it's interpreted as a design variable to optimize over
that's uniform along the length of the spar.
* a float, in which case it's interpreted as a uniform value along the spar
* a function (or other callable) in the form f(y), where y is the coordinate along the length of the
spar. This function should be vectorized (e.g., a vector input of y values produces a vector output).
wall_thickness_function: The wall thickness of the tube as a function of the distance along the spar y. In
terms of data types, this can be one of:
* None, in which case it's interpreted as a design variable to optimize over. Assumes that the value
can freely vary along the length of the spar.
* a scalar optimization variable (see asb.ImplicitAnalysis documentation to see how to link an Opti
instance to this analysis), in which case it's interpreted as a design variable to optimize over
that's uniform along the length of the spar.
* a float, in which case it's interpreted as a uniform value along the spar
* a function (or other callable) in the form f(y), where y is the coordinate along the length of the
spar. This function should be vectorized (e.g., a vector input of y values produces a vector output).
bending_point_forces: Not yet implemented; will allow for inclusion of point loads in the future.
bending_distributed_force_function: The (distributed) load per unit span applied to the spar,
as a function of the distance along the spar y. Should be in units of force per unit length. In terms of
data types, this can be one of:
* None, in which case it's interpreted as a design variable to optimize over. Assumes that the value
can freely vary along the length of the spar.
* a scalar optimization variable (see asb.ImplicitAnalysis documentation to see how to link an Opti
instance to this analysis), in which case it's interpreted as a design variable to optimize over
that's uniform along the length of the spar.
* a float, in which case it's interpreted as a uniform value along the spar
* a function (or other callable) in the form f(y), where y is the coordinate along the length of the
spar. This function should be vectorized (e.g., a vector input of y values produces a vector output).
points_per_point_load: Controls the discretization resolution of the beam. [int] When point load support
is added, this will be the number of nodes between each individual point load.
elastic_modulus_function: The elastic modulus [Pa] of the spar as a function of the distance along the
spar y. In terms of data types, can be one of:
* None, in which case it's interpreted as a design variable to optimize over. Assumes that the value
can freely vary along the length of the spar.
* a scalar optimization variable (see asb.ImplicitAnalysis documentation to see how to link an Opti
instance to this analysis), in which case it's interpreted as a design variable to optimize over
that's uniform along the length of the spar.
* a float, in which case it's interpreted as a uniform value along the spar
* a function (or other callable) in the form f(y), where y is the coordinate along the length of the
spar. This function should be vectorized (e.g., a vector input of y values produces a vector output).
EI_guess: Provides an initial guess for the bending stiffness EI, which is used in problems where spar
diameter and thickness is not known at the outset. If not provided, a heuristic will be used to calculate this.
assume_thin_tube: Makes assumptions that are applicable in the limit of a thin-walled (wall_thickness <<
diameter) tube. This greatly increases numerical stability.
Relative error of this assumption in the thin-walled limit is:
(wall_thickness / diameter) ^ 2
So, for t/d = 0.1, the relative error is roughly 1%.
"""
### Parse the inputs
self.length = length
self.diameter_function = diameter_function
self.wall_thickness_function = wall_thickness_function
if bending_point_forces is not None:
self.bending_point_forces = bending_point_forces
raise NotImplementedError
else:
self.bending_point_forces = dict()
self.bending_distributed_force_function = bending_distributed_force_function
self.points_per_point_load = points_per_point_load
self.elastic_modulus_function = elastic_modulus_function
if EI_guess is None:
try:
diameter_guess = float(diameter_function)
except (TypeError, RuntimeError):
diameter_guess = 1
try:
wall_thickness_guess = float(wall_thickness_function)
except (TypeError, RuntimeError):
wall_thickness_guess = 0.01
try:
E_guess = float(elastic_modulus_function)
except (TypeError, RuntimeError):
E_guess = 175e9
if assume_thin_tube:
I_guess = np.pi / 8 * diameter_guess ** 3 * wall_thickness_guess
else:
I_guess = np.pi / 64 * (
(diameter_guess + wall_thickness_guess) ** 4 -
(diameter_guess - wall_thickness_guess) ** 4
)
EI_guess = E_guess * I_guess
# EI_guess *= 1e0 # A very high EI guess is numerically stabilizing
self.EI_guess = EI_guess
self.assume_thin_tube = assume_thin_tube
### Discretize
y = np.linspace(
0,
length,
points_per_point_load
)
N = np.length(y)
dy = np.diff(y)
### Evaluate the beam properties
if isinstance(diameter_function, Callable):
diameter = diameter_function(y)
elif diameter_function is None:
diameter = self.opti.variable(init_guess=1, n_vars=N, lower_bound=0.)
else:
diameter = diameter_function * np.ones_like(y)
if isinstance(wall_thickness_function, Callable):
wall_thickness = wall_thickness_function(y)
elif wall_thickness_function is None:
wall_thickness = self.opti.variable(init_guess=1e-2, n_vars=N, lower_bound=0, upper_bound=diameter)
else:
wall_thickness = wall_thickness_function * np.ones_like(y)
if isinstance(bending_distributed_force_function, Callable):
distributed_force = bending_distributed_force_function(y)
else:
distributed_force = bending_distributed_force_function * np.ones_like(y)
if isinstance(elastic_modulus_function, Callable):
elastic_modulus = elastic_modulus_function(y)
else:
elastic_modulus = elastic_modulus_function * np.ones_like(y)
### Evaluate the beam properties
if assume_thin_tube:
I = np.pi / 8 * diameter ** 3 * wall_thickness
else:
I = np.pi / 64 * (
(diameter + wall_thickness) ** 4 -
(diameter - wall_thickness) ** 4
)
EI = elastic_modulus * I
### Compute the initial guess
u = self.opti.variable(
init_guess=np.zeros_like(y),
scale=np.sum(np.trapz(distributed_force) * dy) * length ** 4 / EI_guess
)
du = self.opti.derivative_of(
u, with_respect_to=y,
derivative_init_guess=np.zeros_like(y),
derivative_scale=np.sum(np.trapz(distributed_force) * dy) * length ** 3 / EI_guess
)
ddu = self.opti.derivative_of(
du, with_respect_to=y,
derivative_init_guess=np.zeros_like(y),
derivative_scale=np.sum(np.trapz(distributed_force) * dy) * length ** 2 / EI_guess
)
dEIddu = self.opti.derivative_of(
EI * ddu, with_respect_to=y,
derivative_init_guess=np.zeros_like(y),
derivative_scale=np.sum(np.trapz(distributed_force) * dy) * length
)
self.opti.constrain_derivative(
variable=dEIddu, with_respect_to=y,
derivative=distributed_force
)
self.opti.subject_to([
u[0] == 0,
du[0] == 0,
ddu[-1] == 0,
dEIddu[-1] == 0
])
bending_moment = -EI * ddu
shear_force = -dEIddu
stress_axial = elastic_modulus * ddu * (diameter + wall_thickness) / 2
self.y = y
self.diameter = diameter
self.wall_thickness = wall_thickness
self.distributed_force = distributed_force
self.elastic_modulus = elastic_modulus
self.I = I
self.u = u
self.du = du
self.ddu = ddu
self.dEIddu = dEIddu
self.bending_moment = bending_moment
self.shear_force = shear_force
self.stress_axial = stress_axial
def volume(self):
if self.assume_thin_tube:
return np.sum(
np.pi * np.trapz(
self.diameter * self.wall_thickness
) * np.diff(self.y)
)
else:
return np.sum(
np.pi / 4 * np.trapz(
(self.diameter + self.wall_thickness) ** 2 -
(self.diameter - self.wall_thickness) ** 2
) * np.diff(self.y)
)
def total_force(self):
if len(self.bending_point_forces) != 0:
raise NotImplementedError
return np.sum(
np.trapz(
self.distributed_force
) * np.diff(self.y)
)
def draw(self, show=True):
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
plot_quantities = {
"Displacement [m]" : self.u,
# "Local Slope [deg]": np.arctan2d(self.du, 1),
"Local Load [N/m]" : self.distributed_force,
"Axial Stress [MPa]" : self.stress_axial / 1e6,
"Bending $EI$ [N $\cdot$ m$^2$]": self.elastic_modulus * self.I,
"Tube Diameter [m]" : self.diameter,
"Wall Thickness [m]" : self.wall_thickness,
}
fig, ax = plt.subplots(2, 3, figsize=(8, 6), sharex='all')
for i, (k, v) in enumerate(plot_quantities.items()):
plt.sca(ax.flatten()[i])
plt.plot(
self.y,
v,
# ".-"
)
plt.ylabel(k)
plt.xlim(
np.min(self.y),
np.max(self.y),
)
for a in ax[-1, :]:
a.set_xlabel(r"$y$ [m]")
if show:
p.show_plot("Tube Spar Bending Structure")
if __name__ == '__main__':
import aerosandbox.tools.units as u
opti = asb.Opti()
span = 112 * u.foot
lift = 229 * u.lbm * 9.81
half_span = span / 2
beam = TubeSparBendingStructure(
opti=opti,
length=half_span,
diameter_function=3.5 * u.inch, # lambda y: (3.5 * u.inch) - (3.5 - 1.25) * u.inch * (y / half_span),
points_per_point_load=100,
bending_distributed_force_function=lambda y: (lift / span) * (
4 / np.pi * (1 - (y / half_span) ** 2) ** 0.5
), # Elliptical
# bending_distributed_force_function=lambda y: lift / span * np.ones_like(y) # Uniform,
elastic_modulus_function=228e9,
)
opti.subject_to([
beam.stress_axial <= 500e6, # Stress constraint
beam.u[-1] <= 2, # Tip displacement constraint
beam.wall_thickness > 0.1e-3 # Gauge constraint
])
mass = beam.volume() * 1600 # Density of carbon fiber [kg/m^3]
opti.minimize(mass / (lift / 9.81))
sol = opti.solve()
beam.substitute_solution(sol)
print(f"{sol.value(mass)} kg per half-wing")
beam.draw()
computed_spar_mass = 2 * sol.value(mass)
vehicle_mass = lift / 9.81
ultimate_load_factor = 2
cruz_estimated_spar_mass = (
(span * 1.17e-1 + span ** 2 * 1.10e-2) *
(1 + (ultimate_load_factor * vehicle_mass / 100 - 2) / 4)
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/tube_spar_bending.py | tube_spar_bending.py |
import aerosandbox.numpy as np
def column_buckling_critical_load(
elastic_modulus: float,
moment_of_inertia: float,
length: float,
boundary_condition_type: str = "pin-pin",
use_recommended_design_values: bool = True,
):
"""
Computes the critical load (in N) for a column or tube in compression to buckle via primary buckling. Uses Euler's classical critical
load formula.
Args:
elastic_modulus: The elastic modulus of the material, in Pa.
moment_of_inertia: The moment of inertia of the cross-section, in m^4.
length: The length of the column, in m.
boundary_condition_type: The boundary condition type. Options are:
- "pin-pin"
- "pin-clamp"
- "clamp-clamp"
- "clamp-pin"
- "clamp-free"
- "free-clamp"
use_recommended_design_values: Whether to use the recommended design value of K for a given boundary condition (True)
or to use the less-conservative theoretical value (False).
* Recommended values are from Table C.1.8.1 in Steel Construction Manual, 8th edition, 2nd revised
printing, American Institute of Steel Construction, 1987 via WikiMedia:
https://commons.wikimedia.org/wiki/File:ColumnEffectiveLength.png
Returns:
The critical compressive load (in N) for the column or tube to buckle via primary buckling.
"""
if boundary_condition_type == "pin-pin":
K = 1.00 if use_recommended_design_values else 1.00
elif boundary_condition_type == "pin-clamp" or boundary_condition_type == "clamp-pin":
K = 0.80 if use_recommended_design_values else 0.70
elif boundary_condition_type == "clamp-clamp":
K = 0.65 if use_recommended_design_values else 0.50
elif boundary_condition_type == "clamp-free" or boundary_condition_type == "free-clamp":
K = 2.10 if use_recommended_design_values else 2.00
else:
raise ValueError("Invalid `boundary_condition_type`.")
return (
np.pi ** 2 * elastic_modulus * moment_of_inertia
/ (K * length) ** 2
)
def thin_walled_tube_crippling_buckling_critical_load(
elastic_modulus: float,
wall_thickness: float,
radius: float,
use_recommended_design_values: bool = True,
):
"""
Computes the critical load for a thin-walled tube in compression to fail in the crippling mode. (Note: you should also check for
failure by primary buckling using the `column_buckling_critical_load()` function.)
The crippling mode is a specific instability mode for tubes with thin walls when loaded in compression. It can be
seen when you step on a soda can and it buckles inwards. The critical load for this mode is given by the
following formula:
stress_crippling = crippling_constant * (E * t / r)
where:
A recommended value of crippling_constant = 0.3 is given in Raymer: Aircraft Design: A Conceptual Approach,
5th Edition, Eq. 14.33, pg. 554.
A theoretically more accurate value of crippling_constant = 0.605 is given in the Air Force Stress Manual,
Section 2.3.2.1, Eq. 2-20. This value assumes mu = 0.3, which is a good assumption for most metals.
and E is the elastic modulus, t is the wall thickness, and r is the radius.
For more info, see the Air Force Stress Manual, Section 2.3.2.1:
https://engineeringlibrary.org/reference/column-crippling-air-force-stress-manual
And see Raymer: Aircraft Design: A Conceptual Approach, 5th Edition, pg. 554.
Args:
elastic_modulus: The elastic modulus of the material, in Pa.
wall_thickness: The wall thickness of the tube, in m.
radius: The radius of the tube, in m.
use_recommended_design_values: Whether to use the recommended design value of crippling_constant (True)
or to use the less-conservative theoretical value (False).
Returns:
The critical compressive load (in N) for the tube to buckle in the crippling mode.
"""
if use_recommended_design_values:
crippling_stress_constant = 0.3
# Taken from Raymer: Aircraft Design: A Conceptual Approach, 5th Edition, Eq. 14.33, pg. 554.
#
# According to the Air Force Stress Manual, Figure 2-67, this value should drop as radius/wall_thickness
# increases.
else:
crippling_stress_constant = 0.605
# Theoretically, this should be (3 * (1 - mu^2))^(-0.5), where mu is the Poisson's ratio.
# Following the Air Force Stress Manual, Section 2.3.2.1, Eq. 2-20.
# The above value assumes mu = 0.3, which is a good assumption for most metals.
crippling_stress = 0.3 * (elastic_modulus * wall_thickness / radius)
tube_xsec_area = 2 * np.pi * radius * wall_thickness
crippling_load = crippling_stress * tube_xsec_area
return crippling_load
def plate_buckling_critical_load(
length: float,
width: float,
wall_thickness: float,
elastic_modulus: float,
poissons_ratio: float = 0.33,
side_boundary_condition_type: str = "clamp-clamp",
):
"""
Computes the critical compressive load (in N) for a plate to buckle via plate buckling.
Assumes a rectangular plate with dimensions:
- length
- width
- wall_thickness
A compressive force is applied such that it is aligned with the length dimension of the plate.
Uses constants from NACA TN3781.
Methdology taken from "Stress Analysis Manual," Air Force Flight Dynamic Laboratory, Oct. 1986.
Section 6.3: Axial Compression of Flat Plates
Reproduced at "Engineering Library":
https://engineeringlibrary.org/reference/analysis-of-plates-axial-compression-air-force-stress-manual
Args:
length: The length of the plate, in m.
width: The width of the plate, in m.
wall_thickness: The wall thickness of the plate, in m.
elastic_modulus: The elastic modulus of the material, in Pa.
side_boundary_condition_type: The boundary condition type at the sides of the plate. Options are:
- "clamp-clamp"
- "pin-pin"
- "free-free"
Returns:
The critical compressive load (in N) for the plate to buckle via plate buckling.
"""
if side_boundary_condition_type == "clamp-clamp":
K = 6.35 # From NACA TN3781
elif side_boundary_condition_type == "pin-pin":
K = 3.62 # From NACA TN3781
elif side_boundary_condition_type == "free-free":
K = 0.385 # From NACA TN3781
else:
raise ValueError("Invalid `side_boundary_condition_type`.")
critical_buckling_load = (
K * np.pi ** 2 * elastic_modulus /
(12 * (1 - poissons_ratio ** 2)) *
wall_thickness ** 3 / width
)
return critical_buckling_load | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/buckling.py | buckling.py |
import aerosandbox.numpy as np
import casadi as cas
if __name__ == '__main__':
opti = cas.Opti() # Initialize a SAND environment
# Define Assumptions
L = 34.1376 / 2
n = 50
mass_total = 292
x = cas.linspace(0, L, n)
dx = cas.diff(x)
E = 228e9 # Pa, modulus of CF
G = E / 2 / (1 + 0.5) # TODO fix this!!! CFRP is not isotropic!
max_allowable_stress = 570e6 / 1.75
log_nominal_diameter = opti.variable(n)
opti.set_initial(log_nominal_diameter, cas.log(200e-3))
nominal_diameter = cas.exp(log_nominal_diameter)
thickness = 0.14e-3 * 5
opti.subject_to([
nominal_diameter > thickness,
])
def trapz(x):
out = (x[:-1] + x[1:]) / 2
out[0] += x[0] / 2
out[-1] += x[-1] / 2
return out
# Mass
volume = cas.sum1(
cas.pi / 4 * trapz((nominal_diameter + thickness) ** 2 - (nominal_diameter - thickness) ** 2) * dx
)
mass = volume * 1600
# Bending loads
I = cas.pi / 64 * ((nominal_diameter + thickness) ** 4 - (nominal_diameter - thickness) ** 4)
EI = E * I
total_lift_force = 9.81 * (mass_total - mass) / 2 # 9.81 * 292 / 2
lift_distribution = "elliptical"
if lift_distribution == "rectangular":
force_per_unit_length = total_lift_force * cas.GenDM_ones(n) / L
elif lift_distribution == "elliptical":
force_per_unit_length = total_lift_force * cas.sqrt(1 - (x / L) ** 2) * (4 / cas.pi) / L
# Torsion loads
J = cas.pi / 32 * ((nominal_diameter + thickness) ** 4 - (nominal_diameter - thickness) ** 4)
airfoil_lift_coefficient = 1
airfoil_moment_coefficient = -0.14
airfoil_chord = 1 # meter
moment_per_unit_length = force_per_unit_length * airfoil_moment_coefficient * airfoil_chord / airfoil_lift_coefficient
# Derivation of above:
# CL = L / q c
# CM = M / q c**2
# M / L = (CM * c) / (CL)
# Set up derivatives
u = 1 * opti.variable(n)
du = 0.1 * opti.variable(n)
ddu = 0.01 * opti.variable(n)
dEIddu = 100 * opti.variable(n)
phi = 0.1 * opti.variable(n)
dphi = 0.01 * opti.variable(n)
# opti.set_initial(u, 2 * (x/L)**4)
# opti.set_initial(du, 2 * 4/L * (x/L)**3)
# opti.set_initial(ddu, 2 * 3/L * 2/L * (x/L))
# opti.set_initial(dEIddu, 2 * 3/L * 2/L * 1/L * 1e3)
# Add forcing term
ddEIddu = force_per_unit_length
ddphi = -moment_per_unit_length / (G * J)
# Define derivatives
opti.subject_to([
cas.diff(u) == trapz(du) * dx,
cas.diff(du) == trapz(ddu) * dx,
cas.diff(EI * ddu) == trapz(dEIddu) * dx,
cas.diff(dEIddu) == trapz(ddEIddu) * dx,
cas.diff(phi) == trapz(dphi) * dx,
cas.diff(dphi) == trapz(ddphi) * dx,
])
# Add BCs
opti.subject_to([
u[0] == 0,
du[0] == 0,
ddu[-1] == 0, # No tip moment
dEIddu[-1] == 0, # No tip higher order stuff
phi[0] == 0,
dphi[-1] == 0,
])
# Failure criterion
stress_axial = (nominal_diameter + thickness) / 2 * E * ddu
stress_shear = dphi * G * (nominal_diameter + thickness) / 2
# stress_axial = cas.fmax(0, stress_axial)
# stress_shear = cas.fmax(0, stress_shear)
stress_von_mises_squared = cas.sqrt(
stress_axial ** 2 + 0 * stress_shear ** 2) # Source: https://en.wikipedia.org/wiki/Von_Mises_yield_criterion
stress = stress_axial
opti.subject_to([
stress / max_allowable_stress < 1
])
opti.minimize(mass)
# Tip deflection constraint
opti.subject_to([
# u[-1] < 2 # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
du[-1] * 180 / cas.pi < 10
])
# Twist
opti.subject_to([
phi[-1] * 180 / cas.pi > -5
])
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 500 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
try:
sol = opti.solve()
except Exception:
print("Failed!")
sol = opti.debug
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
fig, ax = plt.subplots(2, 3, figsize=(10, 6), dpi=200)
plt.subplot(231)
plt.plot(sol.value(x), sol.value(u), '.-')
plt.xlabel("x [m]")
plt.ylabel("u [m]")
plt.title("Displacement (Bending)")
plt.axis("equal")
# plt.subplot(232)
# plt.plot(sol.value(x), np.arctan(sol.value(du))*180/np.pi, '.-')
# plt.xlabel("x [m]")
# plt.ylabel(r"Local Slope [deg]")
# plt.title("Slope")
plt.subplot(232)
plt.plot(sol.value(x), sol.value(phi) * 180 / np.pi, '.-')
plt.xlabel("x [m]")
plt.ylabel("Twist angle [deg]")
plt.title("Twist Angle (Torsion)")
plt.subplot(233)
plt.plot(sol.value(x), sol.value(force_per_unit_length), '.-')
plt.xlabel("x [m]")
plt.ylabel(r"$F$ [N/m]")
plt.title("Local Load per Unit Span")
plt.subplot(234)
plt.plot(sol.value(x), sol.value(stress / 1e6), '.-')
plt.xlabel("x [m]")
plt.ylabel("Stress [MPa]")
plt.title("Peak Stress at Section")
plt.subplot(235)
plt.plot(sol.value(x), sol.value(dEIddu), '.-')
plt.xlabel("x [m]")
plt.ylabel("F [N]")
plt.title("Shear Force")
plt.subplot(236)
plt.plot(sol.value(x), sol.value(nominal_diameter), '.-')
plt.xlabel("x [m]")
plt.ylabel("t [m]")
plt.title("Optimal Spar Diameter")
plt.suptitle(f"Beam Modeling (Total Spar Mass: {2 * sol.value(mass):.2f} kg)")
plt.subplots_adjust(hspace=0.4)
# plt.tight_layout()
# plt.legend()
plt.show()
print("Mass (half-wing) [kg]:", sol.value(mass))
print("Mass (full-wing) [kg]:", 2 * sol.value(mass)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/legacy/simple_beam_opt.py | simple_beam_opt.py |
import casadi as cas
import aerosandbox.numpy as np
from aerosandbox.geometry import *
from aerosandbox.common import AeroSandboxObject
class TubeBeam1(AeroSandboxObject):
def __init__(self,
opti, # type: cas.Opti
length,
points_per_point_load=100,
E=228e9, # Pa
isotropic=True,
poisson_ratio=0.5,
diameter_guess=100, # Make this larger for more computational stability, lower for a bit faster speed
thickness=0.14e-3 * 5,
max_allowable_stress=570e6 / 1.75,
density=1600,
G=None,
bending=True, # Should we consider beam bending?
torsion=True, # Should we consider beam torsion?
):
"""
A beam model (static, linear elasticity) that simulates both bending and torsion.
Governing equation for bending:
Euler-Bernoulli beam theory.
(E * I * u(x)'')'' = q(x)
where:
* E is the elastic modulus
* I is the bending moment of inertia
* u(x) is the local displacement at x.
* q(x) is the force-per-unit-length at x. (In other words, a dirac delta is a point load.)
* ()' is a derivative w.r.t. x.
Governing equation for torsion:
phi(x)'' = -T / (G * J)
where:
* phi is the local twist angle
* T is the local torque per unit length
* G is the local shear modulus
* J is the polar moment of inertia
* ()' is a derivative w.r.t. x.
:param opti: An optimization environment. # type: cas.Opti
:param length: Length of the beam [m]
:param points_per_point_load: Number of discretization points to use per point load
:param E: Elastic modulus [Pa]
:param isotropic: Is the material isotropic? If so, attempts to find shear modulus from poisson's ratio, or vice versa. [boolean]
:param poisson_ratio: Poisson's ratio (if isotropic, can't set both poisson_ratio and shear modulus - one must be None)
:param diameter_guess: Initial guess for the tube diameter [m]. Make this larger for more computational stability, lower for a bit faster speed.
:param thickness: Tube wall thickness. This will often be set by shell buckling considerations. [m]
:param max_allowable_stress: Maximum allowable stress in the material. [Pa]
:param density: Density of the material [kg/m^3]
:param G: Shear modulus (if isotropic, can't set both poisson_ratio and shear modulus - one must be None)
:param bending: Should we consider bending? [boolean]
:param torsion: Should we consider torsion? [boolean]
"""
# Transfer inputs
self.opti = opti
self.length = length
self.points_per_point_load = points_per_point_load
self.E = E
self.isotropic = isotropic
self.poisson_ratio = poisson_ratio
self.diameter_guess = diameter_guess
self.thickness = thickness
self.max_allowable_stress = max_allowable_stress
self.density = density
self.G = G
self.bending = bending
self.torsion = torsion
# Calculate G
if isotropic:
if G is None:
self.G = E / 2 / (1 + poisson_ratio)
elif poisson_ratio is None:
pass # TODO find poisson?
else:
raise ValueError(
"You can't uniquely specify shear modulus and Poisson's ratio on an isotropic material!")
# Create data structures to track loads
self.point_loads = []
self.distributed_loads = []
def add_point_load(self,
location,
force=0,
bending_moment=0,
torsional_moment=0,
):
"""
Adds a point force and/or moment.
:param location: Location of the point force along the beam [m]
:param force: Force to add [N]
:param bending_moment: Bending moment to add [N-m] # TODO make this work
:param torsional_moment: Torsional moment to add [N-m] # TODO make this work
:return: None (in-place)
"""
self.point_loads.append(
{
"location" : location,
"force" : force,
"bending_moment" : bending_moment,
"torsional_moment": torsional_moment
}
)
def add_uniform_load(self,
force=0,
bending_moment=0,
torsional_moment=0,
):
"""
Adds a uniformly distributed force and/or moment across the entire length of the beam.
:param force: Total force applied to beam [N]
:param bending_moment: Bending moment to add [N-m] # TODO make this work
:param torsional_moment: Torsional moment to add [N-m] # TODO make this work
:return: None (in-place)
"""
self.distributed_loads.append(
{
"type" : "uniform",
"force" : force,
"bending_moment" : bending_moment,
"torsional_moment": torsional_moment
}
)
def add_elliptical_load(self,
force=0,
bending_moment=0,
torsional_moment=0,
):
"""
Adds an elliptically distributed force and/or moment across the entire length of the beam.
:param force: Total force applied to beam [N]
:param bending_moment: Bending moment to add [N-m] # TODO make this work
:param torsional_moment: Torsional moment to add [N-m] # TODO make this work
:return: None (in-place)
"""
self.distributed_loads.append(
{
"type" : "elliptical",
"force" : force,
"bending_moment" : bending_moment,
"torsional_moment": torsional_moment
}
)
def setup(self,
bending_BC_type="cantilevered"
):
"""
Sets up the problem. Run this last.
:return: None (in-place)
"""
### Discretize and assign loads
# Discretize
point_load_locations = [load["location"] for load in self.point_loads]
point_load_locations.insert(0, 0)
point_load_locations.append(self.length)
self.x = cas.vertcat(*[
cas.linspace(
point_load_locations[i],
point_load_locations[i + 1],
self.points_per_point_load)
for i in range(len(point_load_locations) - 1)
])
# Post-process the discretization
self.n = self.x.shape[0]
dx = cas.diff(self.x)
# Add point forces
self.point_forces = cas.GenMX_zeros(self.n - 1)
for i in range(len(self.point_loads)):
load = self.point_loads[i]
self.point_forces[self.points_per_point_load * (i + 1) - 1] = load["force"]
# Add distributed loads
self.force_per_unit_length = cas.GenMX_zeros(self.n)
self.moment_per_unit_length = cas.GenMX_zeros(self.n)
for load in self.distributed_loads:
if load["type"] == "uniform":
self.force_per_unit_length += load["force"] / self.length
elif load["type"] == "elliptical":
load_to_add = load["force"] / self.length * (
4 / cas.pi * cas.sqrt(1 - (self.x / self.length) ** 2)
)
self.force_per_unit_length += load_to_add
else:
raise ValueError("Bad value of \"type\" for a load within beam.distributed_loads!")
# Initialize optimization variables
log_nominal_diameter = self.opti.variable(self.n)
self.opti.set_initial(log_nominal_diameter, cas.log(self.diameter_guess))
self.nominal_diameter = cas.exp(log_nominal_diameter)
self.opti.subject_to([
log_nominal_diameter > cas.log(self.thickness)
])
def trapz(x):
out = (x[:-1] + x[1:]) / 2
# out[0] += x[0] / 2
# out[-1] += x[-1] / 2
return out
# Mass
self.volume = cas.sum1(
cas.pi / 4 * trapz(
(self.nominal_diameter + self.thickness) ** 2 -
(self.nominal_diameter - self.thickness) ** 2
) * dx
)
self.mass = self.volume * self.density
# Mass proxy
self.volume_proxy = cas.sum1(
cas.pi * trapz(
self.nominal_diameter
) * dx * self.thickness
)
self.mass_proxy = self.volume_proxy * self.density
# Find moments of inertia
self.I = cas.pi / 64 * ( # bending
(self.nominal_diameter + self.thickness) ** 4 -
(self.nominal_diameter - self.thickness) ** 4
)
self.J = cas.pi / 32 * ( # torsion
(self.nominal_diameter + self.thickness) ** 4 -
(self.nominal_diameter - self.thickness) ** 4
)
if self.bending:
# Set up derivatives
self.u = 1 * self.opti.variable(self.n)
self.du = 0.1 * self.opti.variable(self.n)
self.ddu = 0.01 * self.opti.variable(self.n)
self.dEIddu = 1 * self.opti.variable(self.n)
self.opti.set_initial(self.u, 0)
self.opti.set_initial(self.du, 0)
self.opti.set_initial(self.ddu, 0)
self.opti.set_initial(self.dEIddu, 0)
# Define derivatives
self.opti.subject_to([
cas.diff(self.u) == trapz(self.du) * dx,
cas.diff(self.du) == trapz(self.ddu) * dx,
cas.diff(self.E * self.I * self.ddu) == trapz(self.dEIddu) * dx,
cas.diff(self.dEIddu) == trapz(self.force_per_unit_length) * dx + self.point_forces,
])
# Add BCs
if bending_BC_type == "cantilevered":
self.opti.subject_to([
self.u[0] == 0,
self.du[0] == 0,
self.ddu[-1] == 0, # No tip moment
self.dEIddu[-1] == 0, # No tip higher order stuff
])
else:
raise ValueError("Bad value of bending_BC_type!")
# Stress
self.stress_axial = (self.nominal_diameter + self.thickness) / 2 * self.E * self.ddu
if self.torsion:
# Set up derivatives
phi = 0.1 * self.opti.variable(self.n)
dphi = 0.01 * self.opti.variable(self.n)
# Add forcing term
ddphi = -self.moment_per_unit_length / (self.G * self.J)
self.stress = self.stress_axial
self.opti.subject_to([
self.stress / self.max_allowable_stress < 1,
self.stress / self.max_allowable_stress > -1,
])
def draw_bending(self,
show=True,
for_print=False,
equal_scale=True,
):
"""
Draws a figure that illustrates some bending properties. Must be called on a solved object (i.e. using the substitute_sol method).
:param show: Whether or not to show the figure [boolean]
:param for_print: Whether or not the figure should be shaped for printing in a paper [boolean]
:param equal_scale: Whether or not to make the displacement plot have equal scale (i.e. true deformation only)
:return:
"""
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
fig, ax = plt.subplots(
2 if not for_print else 3,
3 if not for_print else 2,
figsize=(
10 if not for_print else 6,
6 if not for_print else 6
),
dpi=200
)
plt.subplot(231) if not for_print else plt.subplot(321)
plt.plot(self.x, self.u, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel(r"$u$ [m]")
plt.title("Displacement (Bending)")
if equal_scale:
plt.axis("equal")
plt.subplot(232) if not for_print else plt.subplot(322)
plt.plot(self.x, np.arctan(self.du) * 180 / np.pi, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel(r"Local Slope [deg]")
plt.title("Slope")
plt.subplot(233) if not for_print else plt.subplot(323)
plt.plot(self.x, self.force_per_unit_length, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel(r"$q$ [N/m]")
plt.title("Local Load per Unit Span")
plt.subplot(234) if not for_print else plt.subplot(324)
plt.plot(self.x, self.stress_axial / 1e6, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel("Axial Stress [MPa]")
plt.title("Axial Stress")
plt.subplot(235) if not for_print else plt.subplot(325)
plt.plot(self.x, self.dEIddu, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel(r"$F$ [N]")
plt.title("Shear Force")
plt.subplot(236) if not for_print else plt.subplot(326)
plt.plot(self.x, self.nominal_diameter, '.-')
plt.xlabel(r"$x$ [m]")
plt.ylabel("Diameter [m]")
plt.title("Optimal Spar Diameter")
plt.tight_layout()
plt.show() if show else None
if __name__ == '__main__':
opti = cas.Opti()
beam = TubeBeam1(
opti=opti,
length=60 / 2,
points_per_point_load=50,
diameter_guess=100,
bending=True,
torsion=False
)
lift_force = 9.81 * 103.873
load_location = opti.variable()
opti.set_initial(load_location, 15)
opti.subject_to([
load_location > 2,
load_location < 60 / 2 - 2,
load_location == 18,
])
beam.add_point_load(load_location, -lift_force / 3)
beam.add_uniform_load(force=lift_force / 2)
beam.setup()
# Tip deflection constraint
opti.subject_to([
# beam.u[-1] < 2, # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10,
beam.du * 180 / cas.pi > -10
])
opti.subject_to([
cas.diff(cas.diff(beam.nominal_diameter)) < 0.001,
cas.diff(cas.diff(beam.nominal_diameter)) > -0.001,
])
# opti.minimize(cas.sqrt(beam.mass))
opti.minimize(beam.mass)
# opti.minimize(beam.mass ** 2)
# opti.minimize(beam.mass_proxy)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c
# s_opts["bound_frac"] = 0.5
# s_opts["bound_push"] = 0.5
# s_opts["slack_bound_frac"] = 0.5
# s_opts["slack_bound_push"] = 0.5
# s_opts["mu_strategy"] = "adaptive"
# s_opts["mu_oracle"] = "quality-function"
# s_opts["quality_function_max_section_steps"] = 20
# s_opts["fixed_mu_oracle"] = "quality-function"
# s_opts["alpha_for_y"] = "min"
# s_opts["alpha_for_y"] = "primal-and-full"
# s_opts["watchdog_shortened_iter_trigger"] = 1
# s_opts["expect_infeasible_problem"]="yes" # TODO remove all this
# s_opts["start_with_resto"] = "yes"
# s_opts["required_infeasibility_reduction"] = 0.001
# s_opts["evaluate_orig_obj_at_resto_trial"] = "yes"
opti.solver('ipopt', p_opts, s_opts)
try:
sol = opti.solve()
except Exception:
print("Failed!")
sol = opti.debug
import copy
beam_sol = sol(beam)
print(f"Beam mass: {beam_sol.mass} kg")
beam_sol.draw_bending()
bs = beam_sol | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/legacy/beams.py | beams.py |
import aerosandbox.numpy as np
import casadi as cas
if __name__ == '__main__':
opti = cas.Opti() # Initialize a SAND environment
# Define Assumptions
L = 34.1376 / 2
n = 200
x = cas.linspace(0, L, n)
dx = cas.diff(x)
E = 228e9 # Pa, modulus of CF
G = E / 2 / (1 + 0.5) # TODO fix this!!! CFRP is not isotropic!
max_allowable_stress = 570e6 / 1.75
log_nominal_diameter = opti.variable(n)
opti.set_initial(log_nominal_diameter, cas.log(200e-3))
nominal_diameter = cas.exp(log_nominal_diameter)
thickness = 0.14e-3 * 5
opti.subject_to([
nominal_diameter > thickness,
])
# Bending loads
I = cas.pi / 64 * ((nominal_diameter + thickness) ** 4 - (nominal_diameter - thickness) ** 4)
EI = E * I
total_lift_force = 9.81 * 103.873 / 2
lift_distribution = "elliptical"
if lift_distribution == "rectangular":
force_per_unit_length = total_lift_force * cas.GenDM_ones(n) / L
elif lift_distribution == "elliptical":
force_per_unit_length = total_lift_force * cas.sqrt(1 - (x / L) ** 2) * (4 / cas.pi) / L
# Torsion loads
J = cas.pi / 32 * ((nominal_diameter + thickness) ** 4 - (nominal_diameter - thickness) ** 4)
airfoil_lift_coefficient = 1
airfoil_moment_coefficient = -0.14
airfoil_chord = 1 # meter
moment_per_unit_length = force_per_unit_length * airfoil_moment_coefficient * airfoil_chord / airfoil_lift_coefficient
# Derivation of above:
# CL = L / q c
# CM = M / q c**2
# M / L = (CM * c) / (CL)
# Set up derivatives
u = 1 * opti.variable(n)
du = 0.1 * opti.variable(n)
ddu = 0.01 * opti.variable(n)
dEIddu = 100 * opti.variable(n)
phi = 0.1 * opti.variable(n)
dphi = 0.01 * opti.variable(n)
# opti.set_initial(u, 2 * (x/L)**4)
# opti.set_initial(du, 2 * 4/L * (x/L)**3)
# opti.set_initial(ddu, 2 * 3/L * 2/L * (x/L))
# opti.set_initial(dEIddu, 2 * 3/L * 2/L * 1/L * 1e3)
# Add forcing term
ddEIddu = force_per_unit_length
ddphi = -moment_per_unit_length / (G * J)
# Define derivatives
def trapz(x):
out = (x[:-1] + x[1:]) / 2
out[0] += x[0] / 2
out[-1] += x[-1] / 2
return out
opti.subject_to([
cas.diff(u) == trapz(du) * dx,
cas.diff(du) == trapz(ddu) * dx,
cas.diff(EI * ddu) == trapz(dEIddu) * dx,
cas.diff(dEIddu) == trapz(ddEIddu) * dx,
cas.diff(phi) == trapz(dphi) * dx,
cas.diff(dphi) == trapz(ddphi) * dx,
])
# Add BCs
opti.subject_to([
u[0] == 0,
du[0] == 0,
ddu[-1] == 0, # No tip moment
dEIddu[-1] == 0, # No tip higher order stuff
phi[0] == 0,
dphi[-1] == 0,
])
# Failure criterion
stress_axial = (nominal_diameter + thickness) / 2 * E * ddu
stress_shear = dphi * G * (nominal_diameter + thickness) / 2
# stress_axial = cas.fmax(0, stress_axial)
# stress_shear = cas.fmax(0, stress_shear)
stress_von_mises_squared = cas.sqrt(
stress_axial ** 2 + 0 * stress_shear ** 2) # Source: https://en.wikipedia.org/wiki/Von_Mises_yield_criterion
stress = stress_axial
opti.subject_to([
stress / max_allowable_stress < 1
])
# Mass
volume = cas.sum1(
cas.pi / 4 * trapz((nominal_diameter + thickness) ** 2 - (nominal_diameter - thickness) ** 2) * dx
)
mass = volume * 1600
opti.minimize(mass)
# Tip deflection constraint
opti.subject_to([
u[-1] < 2 # Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
])
# Twist
opti.subject_to([
phi[-1] * 180 / cas.pi > -3
])
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 500 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
try:
sol = opti.solve()
except Exception:
print("Failed!")
sol = opti.debug
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
fig, ax = plt.subplots(2, 3, figsize=(10, 6), dpi=200)
plt.subplot(231)
plt.plot(sol.value(x), sol.value(u), '.-')
plt.xlabel("x [m]")
plt.ylabel("u [m]")
plt.title("Displacement (Bending)")
plt.axis("equal")
# plt.subplot(232)
# plt.plot(sol.value(x), np.arctan(sol.value(du))*180/np.pi, '.-')
# plt.xlabel("x [m]")
# plt.ylabel(r"Local Slope [deg]")
# plt.title("Slope")
plt.subplot(232)
plt.plot(sol.value(x), sol.value(phi) * 180 / np.pi, '.-')
plt.xlabel("x [m]")
plt.ylabel("Twist angle [deg]")
plt.title("Twist Angle (Torsion)")
plt.subplot(233)
plt.plot(sol.value(x), sol.value(force_per_unit_length), '.-')
plt.xlabel("x [m]")
plt.ylabel(r"$F$ [N/m]")
plt.title("Local Load per Unit Span")
plt.subplot(234)
plt.plot(sol.value(x), sol.value(stress / 1e6), '.-')
plt.xlabel("x [m]")
plt.ylabel("Stress [MPa]")
plt.title("Peak Stress at Section")
plt.subplot(235)
plt.plot(sol.value(x), sol.value(dEIddu), '.-')
plt.xlabel("x [m]")
plt.ylabel("F [N]")
plt.title("Shear Force")
plt.subplot(236)
plt.plot(sol.value(x), sol.value(nominal_diameter), '.-')
plt.xlabel("x [m]")
plt.ylabel("t [m]")
plt.title("Optimal Spar Diameter")
plt.suptitle(f"Beam Modeling (Total Spar Mass: {2 * sol.value(mass):.2f} kg)")
plt.subplots_adjust(hspace=0.4)
plt.savefig("C:/Users/User/Downloads/beam.png")
# plt.tight_layout()
# plt.legend()
plt.show()
print("Mass (half-wing) [kg]:", sol.value(mass))
print("Mass (full-wing) [kg]:", 2 * sol.value(mass)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/legacy/simple_beam_opt_daedalus_calibration.py | simple_beam_opt_daedalus_calibration.py |
import aerosandbox as asb
import aerosandbox.numpy as np
import cadquery as cq
import copy
from typing import List, Union, Dict
from sortedcontainers import SortedDict
class WingStructureGenerator():
def __init__(self,
wing: asb.Wing,
default_rib_thickness=3e-3,
minimum_airfoil_TE_thickness_rel: float = 0.001
):
self.wing = wing
self.default_rib_thickness = default_rib_thickness
self.minimum_airfoil_TE_thickness_rel = minimum_airfoil_TE_thickness_rel
### Compute some span properties which are used for locating ribs
self._sectional_spans: List[float] = wing.span(_sectional=True)
self._cumulative_spans_up_to_section = np.concatenate((
[0],
np.cumsum(self._sectional_spans)
))
self._total_span = sum(self._sectional_spans)
### Generate the OML geometry
self.oml = asb.Airplane(
wings=[wing]
).generate_cadquery_geometry(
minimum_airfoil_TE_thickness=minimum_airfoil_TE_thickness_rel
)
### Set up data structures for geometry
self.ribs: SortedDict[cq.Workplane] = SortedDict()
self.spars: List[cq.Workplane] = []
def __repr__(self):
return f"{self.__class__.__name__}({self.wing})"
# def open_interactive(self):
def add_ribs_from_section_span_fractions(
self,
section_index: int,
section_span_fractions: Union[float, int, List[float], np.ndarray],
rib_thickness: float = None,
):
if rib_thickness is None:
rib_thickness = self.default_rib_thickness
try:
iter(section_span_fractions)
except TypeError:
section_span_fractions = [section_span_fractions]
xsec_a = wing.xsecs[section_index]
xsec_b = wing.xsecs[section_index + 1]
for s in section_span_fractions:
af = xsec_a.airfoil.blend_with_another_airfoil(
airfoil=xsec_b.airfoil,
blend_fraction=s
)
chord = (
(1 - s) * xsec_a.chord + s * xsec_b.chord
)
csys = wing._compute_frame_of_section(section_index)
span = (
self._cumulative_spans_up_to_section[section_index]
+ s * self._sectional_spans[section_index]
)
self.ribs[span] = (
cq.Workplane(
inPlane=cq.Plane(
origin=tuple(
(1 - s) * xsec_a.xyz_le + s * xsec_b.xyz_le
),
xDir=tuple(csys[0]),
normal=tuple(-csys[1])
)
).spline(
listOfXYTuple=[
tuple(xy * chord)
for xy in af.coordinates
]
).close().extrude(
rib_thickness / 2,
combine=False,
both=True
)
)
def add_ribs_from_xsecs(
self,
indexes: List[int] = None,
rib_thickness: float = None
):
if rib_thickness is None:
rib_thickness = self.default_rib_thickness
if indexes is None:
indexes = range(len(wing.xsecs))
for i in indexes:
xsec = wing.xsecs[i]
csys = wing._compute_frame_of_WingXSec(i)
af = xsec.airfoil
if af.TE_thickness() < self.minimum_airfoil_TE_thickness_rel:
af = af.set_TE_thickness(
thickness=self.minimum_airfoil_TE_thickness_rel
)
span = self._cumulative_spans_up_to_section[i]
self.ribs[span] = (
cq.Workplane(
inPlane=cq.Plane(
origin=tuple(xsec.xyz_le),
xDir=tuple(csys[0]),
normal=tuple(-csys[1])
)
).spline(
listOfXYTuple=[
tuple(xy * xsec.chord)
for xy in af.coordinates
]
).close().extrude(
rib_thickness / 2,
combine=False,
both=True
)
)
def add_ribs_from_span_fractions(
self,
span_fractions: Union[float, List[float], np.ndarray] = np.linspace(0, 1, 10),
rib_thickness: float = None,
):
### Handle span_fractions if it's not an iterable
try:
iter(span_fractions)
except TypeError:
span_fractions = [span_fractions]
for s in span_fractions:
if s == 0:
section_index = 0
section_span_fraction = 0
elif s == 1:
section_index = len(self.wing.xsecs) - 2
section_span_fraction = 1
elif s < 0 or s > 1:
raise ValueError(
"All values of `span_fractions` must be between 0 and 1!"
)
else:
section_index = np.argwhere(
self._cumulative_spans_up_to_section > self._total_span * s
)[0][0] - 1
section_span_fraction = (
s * self._total_span
- self._cumulative_spans_up_to_section[section_index]
) / self._sectional_spans[section_index]
self.add_ribs_from_section_span_fractions(
section_index=section_index,
section_span_fractions=section_span_fraction,
rib_thickness=rib_thickness
)
def add_tube_spar(self,
span_location_root: float,
span_location_tip: float,
diameter_root,
x_over_c_location_root=0.25,
y_over_c_location_root=None,
x_over_c_location_tip=None,
y_over_c_location_tip=None,
diameter_tip: float = None,
cut_ribs: bool = True,
):
if diameter_tip is None:
diameter_tip = diameter_root
if x_over_c_location_tip is None:
x_over_c_location_tip = x_over_c_location_root
# TODO change behavior so that default is a 90 degree spar
if y_over_c_location_root is None:
y_over_c_location_root = wing.xsecs[0].airfoil.local_camber(
x_over_c=x_over_c_location_root
)
if y_over_c_location_tip is None:
y_over_c_location_tip = y_over_c_location_root
# TODO change behavior so that default is a 90 degree spar
### Figure out where the spar root is
section_index = np.argwhere(
self._cumulative_spans_up_to_section > span_location_root
)[0][0] - 1
section_span_fraction = (
span_location_root
- self._cumulative_spans_up_to_section[section_index]
) / self._sectional_spans[section_index]
root_csys = self.wing._compute_frame_of_section(section_index)
root_le_point = (
(1 - section_span_fraction) * wing.xsecs[section_index].xyz_le
+ section_span_fraction * wing.xsecs[section_index + 1].xyz_le
)
root_chord = (
(1 - section_span_fraction) * wing.xsecs[section_index].chord
+ section_span_fraction * wing.xsecs[section_index + 1].chord
)
root_point = (
root_le_point +
x_over_c_location_root * root_csys[0] * root_chord +
y_over_c_location_root * root_csys[2] * root_chord
)
### Figure out where the spar tip is
section_index = np.argwhere(
self._cumulative_spans_up_to_section > span_location_tip
)[0][0] - 1
section_span_fraction = (
span_location_tip
- self._cumulative_spans_up_to_section[section_index]
) / self._sectional_spans[section_index]
tip_csys = self.wing._compute_frame_of_section(section_index)
tip_le_point = (
(1 - section_span_fraction) * wing.xsecs[section_index].xyz_le
+ section_span_fraction * wing.xsecs[section_index + 1].xyz_le
)
tip_chord = (
(1 - section_span_fraction) * wing.xsecs[section_index].chord
+ section_span_fraction * wing.xsecs[section_index + 1].chord
)
tip_point = (
tip_le_point +
x_over_c_location_tip * tip_csys[0] * tip_chord +
y_over_c_location_tip * tip_csys[2] * tip_chord
)
normal = tip_point - root_point
root_plane = cq.Plane(
origin=tuple(root_point),
xDir=tuple(root_csys[0]),
normal=tuple(normal)
)
tip_plane = cq.Plane(
origin=tuple(tip_point),
xDir=tuple(tip_csys[0]),
normal=tuple(normal)
)
### Make the spar
root_wire = cq.Workplane(
inPlane=root_plane
).circle(radius=2 * diameter_root / 2)
tip_wire = cq.Workplane(
inPlane=tip_plane
).circle(radius=2 * diameter_tip / 2)
wire_collection = root_wire
wire_collection.ctx.pendingWires.extend(tip_wire.ctx.pendingWires)
spar = wire_collection.loft(ruled=True, clean=False)
self.spars.append(spar)
if cut_ribs:
for span_loc, rib in self.ribs.items():
rib: cq.Workplane
self.ribs[span_loc] = rib.cut(spar)
from aerosandbox.tools import units as u
af = asb.Airfoil("sd7032")
wing = asb.Wing(
name="Wing",
symmetric=True,
xsecs=[
asb.WingXSec(
chord=0.2,
airfoil=af
),
asb.WingXSec(
xyz_le=[0.05, 0.5, 0],
chord=0.15,
airfoil=af
),
asb.WingXSec(
xyz_le=[0.1, 0.8, 0.1],
chord=0.1,
airfoil=af
)
]
)
s = WingStructureGenerator(
wing,
default_rib_thickness=1 / 16 * u.inch
)
# s.add_ribs_from_section_span_fractions(0, np.linspace(0, 1, 10)[1:-1])
# s.add_ribs_from_section_span_fractions(1, np.linspace(0, 1, 8)[1:-1])
s.add_ribs_from_xsecs()
# s.add_ribs_from_span_fractions(
# span_fractions=np.linspace(0, 1, 6)
# )
s.add_tube_spar(
diameter_root=8e-3,
span_location_root=-1 / 16 * u.inch,
span_location_tip=s.ribs.keys()[1],
diameter_tip=4e-3
)
### Show all ribs
rs = s.ribs.values()[0]
for rib in s.ribs.values()[1:]:
rs = rs.union(rib)
ss = s.spars[0]
# oml = s.oml | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/structures/ignore/wing_structure_generator.py | wing_structure_generator.py |
import numpy as _onp
import casadi as _cas
from typing import Any
def is_casadi_type(
object: Any,
recursive: bool = True
) -> bool:
"""
Returns a boolean of whether an object is a CasADi data type or not. If the recursive flag is True,
iterates recursively, returning True if any subelement (at any depth) is a CasADi type.
Args:
object: The object to evaluate.
recursive: If the object is a list or tuple, recursively iterate through every subelement. If any of the
subelements (at any depth) are a CasADi type, return True. Otherwise, returns False.
Returns: A boolean if the object is (or contains, if recursive=True) a CasADi data type.
"""
t = type(object)
# NumPy arrays cannot be or contain CasADi types, unless they are object arrays
if t == _onp.ndarray and object.dtype != 'O':
return False
# Skip certain Python types known not to be or contain CasADi types.
for type_to_skip in (
float, int, complex,
bool, str,
range,
type(None),
bytes, bytearray, memoryview
):
if t == type_to_skip:
return False
# If it's directly a CasADi type, we're done.
if (
t == _cas.MX or
t == _cas.DM or
t == _cas.SX
):
return True
# At this point, we know it's not a CasADi type, but we don't know if it *contains* a CasADi type (relevant if recursing)
if recursive:
if (
issubclass(t, list) or
issubclass(t, tuple) or
issubclass(t, set) or
(
t == _onp.ndarray and object.dtype == 'O'
)
):
for element in object:
if is_casadi_type(element, recursive=True):
return True
return False
if issubclass(t, dict):
for kv in object.items():
if is_casadi_type(kv, recursive=True):
return True
return False
return False
else:
return False
def is_iterable(x):
"""
Returns a boolean of whether an object is iterable or not.
Args:
x:
Returns:
"""
try:
iter(x)
return True
except TypeError:
return False | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/determine_type.py | determine_type.py |
from aerosandbox.numpy.array import array, length
import numpy as _onp
def finite_difference_coefficients(
x: _onp.ndarray,
x0: float = 0,
derivative_degree: int = 1,
) -> _onp.ndarray:
"""
Computes the weights (coefficients) in compact finite differece formulas for any order of derivative
and to any order of accuracy on one-dimensional grids with arbitrary spacing.
(Wording above is taken from the paper below, as are docstrings for parameters.)
Modified from an implementation of:
Fornberg, Bengt, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids". Oct. 1988.
Mathematics of Computation, Volume 51, Number 184, pages 699-706.
PDF: https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf
More detail: https://en.wikipedia.org/wiki/Finite_difference_coefficient
Args:
derivative_degree: The degree of the derivative that you are interested in obtaining. (denoted "M" in the
paper)
x: The grid points (not necessarily uniform or in order) that you want to obtain weights for. You must
provide at least as many grid points as the degree of the derivative that you're interested in, plus 1.
The order of accuracy of your derivative depends in part on the number of grid points that you provide.
Specifically:
order_of_accuracy = n_grid_points - derivative_degree
(This is in general; can be higher in special cases.)
For example, if you're evaluating a second derivative and you provide three grid points, you'll have a
first-order-accurate answer.
(x is denoted "alpha" in the paper)
x0: The location that you are interested in obtaining a derivative at. This need not be on a grid point.
Complexity is O(derivative_degree * len(x) ^ 2)
Returns: A 1D ndarray corresponding to the coefficients that should be placed on each grid point. In other words,
the approximate derivative at `x0` is the dot product of `coefficients` and the function values at each of the
grid points `x`.
"""
### Check inputs
if derivative_degree < 1:
return ValueError("The parameter derivative_degree must be an integer >= 1.")
expected_order_of_accuracy = length(x) - derivative_degree
if expected_order_of_accuracy < 1:
return ValueError("You need to provide at least (derivative_degree+1) grid points in the x vector.")
### Implement algorithm; notation from paper in docstring.
N = length(x) - 1
delta = _onp.zeros(
shape=(
derivative_degree + 1,
N + 1,
N + 1
),
dtype="O"
)
delta[0, 0, 0] = 1
c1 = 1
for n in range(1,
N + 1): # TODO make this algorithm more efficient; we only need to store a fraction of this data.
c2 = 1
for v in range(n):
c3 = x[n] - x[v]
c2 = c2 * c3
# if n <= M: # Omitted because d is initialized to zero.
# d[n, n - 1, v] = 0
for m in range(min(n, derivative_degree) + 1):
delta[m, n, v] = (
(x[n] - x0) * delta[m, n - 1, v] - m * delta[m - 1, n - 1, v]
) / c3
for m in range(min(n, derivative_degree) + 1):
delta[m, n, n] = (
c1 / c2 * (
m * delta[m - 1, n - 1, n - 1] - (x[n - 1] - x0) * delta[m, n - 1, n - 1]
)
)
c1 = c2
coefficients_object_array = delta[derivative_degree, -1, :]
coefficients = array([*coefficients_object_array]) # Reconstructs using aerosandbox.numpy to intelligently type
return coefficients | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/finite_difference_operators.py | finite_difference_operators.py |
import numpy as _onp
import casadi as _cas
from aerosandbox.numpy.determine_type import is_casadi_type
def clip(
x,
min,
max
):
"""
Clip a value to a range.
Args:
x: Value to clip.
min: Minimum value to clip to.
max: Maximum value to clip to.
Returns:
"""
return _onp.fmin(_onp.fmax(x, min), max)
def logical_and(x1, x2):
"""
Compute the truth value of x1 AND x2 element-wise.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.logical_and.html
"""
if not is_casadi_type([x1, x2], recursive=True):
return _onp.logical_and(x1, x2)
else:
return _cas.logic_and(x1, x2)
def logical_or(x1, x2):
"""
Compute the truth value of x1 OR x2 element-wise.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.logical_or.html
"""
if not is_casadi_type([x1, x2], recursive=True):
return _onp.logical_or(x1, x2)
else:
return _cas.logic_or(x1, x2)
def logical_not(x):
"""
Compute the truth value of NOT x element-wise.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.logical_not.html
"""
if not is_casadi_type(x, recursive=False):
return _onp.logical_not(x)
else:
return _cas.logic_not(x)
def all(a): # TODO add axis functionality
"""
Test whether all array elements along a given axis evaluate to True.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.all.html
"""
if not is_casadi_type(a, recursive=False):
return _onp.all(a)
else:
try:
return _cas.logic_all(a)
except NotImplementedError:
return False
def any(a): # TODO add axis functionality
"""
Test whether any array element along a given axis evaluates to True.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.any.html
"""
if not is_casadi_type(a, recursive=False):
return _onp.any(a)
else:
try:
return _cas.logic_any(a)
except NotImplementedError:
return False | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/logicals.py | logicals.py |
import aerosandbox.numpy as _np
from typing import Tuple, Union
def softmax(
*args: Union[float, _np.ndarray],
hardness: float = None,
softness: float = None,
) -> Union[float, _np.ndarray]:
"""
An element-wise softmax between two or more arrays. Also referred to as the logsumexp() function.
Useful for optimization because it's differentiable and preserves convexity!
Great writeup by John D Cook here:
https://www.johndcook.com/soft_maximum.pdf
Notes: Can provide either `hardness` or `softness`, not both. These are the inverse of each other. If neither is
provided, `hardness` is set to 1.
Args:
*args: Provide any number of arguments as values to take the softmax of.
hardness: Hardness parameter. Higher values make this closer to max(x1, x2).
softness: Softness parameter. (Inverse of hardness.) Lower values make this closer to max(x1, x2).
- Setting `softness` is particularly useful, because it has the same units as each of the function's
inputs. For example, if you're taking the softmax of two values that are lengths in units of meters,
then `softness` is also in units of meters. In this case, `softness` has the rough meaning of "an amount
of discrepancy between the input values that would be considered physically significant".
Returns:
Soft maximum of the supplied values.
"""
### Set defaults for hardness/softness
if hardness is not None:
if softness is not None:
raise ValueError("You can't specify both `hardness` and `softness`.")
else:
if softness is not None:
hardness = 1 / softness
else:
hardness = 1.0
if _np.any(hardness <= 0):
if softness is not None:
raise ValueError("The value of `softness` must be positive.")
else:
raise ValueError("The value of `hardness` must be positive.")
if len(args) <= 1:
raise ValueError("You must call softmax with the value of two or more arrays that you'd like to take the "
"element-wise softmax of.")
### Scale the args by hardness
args = [arg * hardness for arg in args]
### Find the element-wise max and min of the arrays:
min = args[0]
max = args[0]
for arg in args[1:]:
min = _np.fmin(min, arg)
max = _np.fmax(max, arg)
out = max + _np.log(sum(
[_np.exp(_np.maximum(array - max, -500)) for array in args]
)
)
out = out / hardness
return out
def softmin(
*args: Union[float, _np.ndarray],
hardness: float = None,
softness: float = None,
) -> Union[float, _np.ndarray]:
"""
An element-wise softmin between two or more arrays. Related to the logsumexp() function.
Useful for optimization because it's differentiable and preserves convexity!
Great writeup by John D Cook here:
https://www.johndcook.com/soft_maximum.pdf
Notes: Can provide either `hardness` or `softness`, not both. These are the inverse of each other. If neither is
provided, `hardness` is set to 1.
Args:
*args: Provide any number of arguments as values to take the softmin of.
hardness: Hardness parameter. Higher values make this closer to min(x1, x2).
softness: Softness parameter. (Inverse of hardness.) Lower values make this closer to min(x1, x2).
- Setting `softness` is particularly useful, because it has the same units as each of the function's
inputs. For example, if you're taking the softmin of two values that are lengths in units of meters,
then `softness` is also in units of meters. In this case, `softness` has the rough meaning of "an amount
of discrepancy between the input values that would be considered physically significant".
Returns:
Soft minimum of the supplied values.
"""
return -softmax(
*[-arg for arg in args],
hardness=hardness,
softness=softness
)
def sigmoid(
x,
sigmoid_type: str = "tanh",
normalization_range: Tuple[Union[float, int], Union[float, int]] = (0, 1)
):
"""
A sigmoid function. From Wikipedia (https://en.wikipedia.org/wiki/Sigmoid_function):
A sigmoid function is a mathematical function having a characteristic "S"-shaped curve
or sigmoid curve.
Args:
x: The input
sigmoid_type: Type of sigmoid function to use [str]. Can be one of:
* "tanh" or "logistic" (same thing)
* "arctan"
* "polynomial"
normalization_type: Range in which to normalize the sigmoid, shorthanded here in the
documentation as "N". This parameter is given as a two-element tuple (min, max).
After normalization:
>>> sigmoid(-Inf) == normalization_range[0]
>>> sigmoid(Inf) == normalization_range[1]
* In the special case of N = (0, 1):
>>> sigmoid(-Inf) == 0
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0.5
>>> d(sigmoid)/dx at x=0 == 0.5
* In the special case of N = (-1, 1):
>>> sigmoid(-Inf) == -1
>>> sigmoid(Inf) == 1
>>> sigmoid(0) == 0
>>> d(sigmoid)/dx at x=0 == 1
Returns: The value of the sigmoid.
"""
### Sigmoid equations given here under the (-1, 1) normalization:
if sigmoid_type == ("tanh" or "logistic"):
# Note: tanh(x) is simply a scaled and shifted version of a logistic curve; after
# normalization these functions are identical.
s = _np.tanh(x)
elif sigmoid_type == "arctan":
s = 2 / _np.pi * _np.arctan(_np.pi / 2 * x)
elif sigmoid_type == "polynomial":
s = x / (1 + x ** 2) ** 0.5
else:
raise ValueError("Bad value of parameter 'type'!")
### Normalize
min = normalization_range[0]
max = normalization_range[1]
s_normalized = s * (max - min) / 2 + (max + min) / 2
return s_normalized
def blend(
switch: float,
value_switch_high,
value_switch_low,
):
"""
Smoothly blends between two values on the basis of some switch function.
This function is similar in usage to numpy.where (documented here:
https://numpy.org/doc/stable/reference/generated/numpy.where.html) , except that
instead of using a boolean as to switch between the two values, a float is used to
smoothly transition between the two in a differentiable manner.
Before using this function, be sure to understand the difference between this and
smoothmax(), and choose the correct one.
Args:
switch: A value that acts as a "switch" between the two values [float].
If switch is -Inf, value_switch_low is returned.
If switch is Inf, value_switch_high is returned.
If switch is 0, the mean of value_switch_low and value_switch_high is returned.
If switch is 1, the return value is roughly (0.88 * value_switch_high + 0.12 * value_switch_low).
If switch is -1, the return value is roughly (0.88 * value_switch_low + 0.12 * value_switch_high).
value_switch_high: Value to be returned when switch is high. Can be a float or an array.
value_switch_low: Value to be returned when switch is low. Can be a float or an array.
Returns: A value that is a blend between value_switch_low and value_switch_high, with the weighting dependent
on the value of the 'switch' parameter.
"""
blend_function = lambda x: sigmoid(
x,
normalization_range=(0, 1)
)
weight_to_value_switch_high = blend_function(switch)
blend_value = (
value_switch_high * weight_to_value_switch_high +
value_switch_low * (1 - weight_to_value_switch_high)
)
return blend_value | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/surrogate_model_tools.py | surrogate_model_tools.py |
from aerosandbox.numpy import sin, cos, linalg
from aerosandbox.numpy.array import array
import numpy as _onp
from typing import Union, List
def rotation_matrix_2D(
angle,
as_array: bool = True,
):
"""
Gives the 2D rotation matrix associated with a counterclockwise rotation about an angle.
Args:
angle: Angle by which to rotate. Given in radians.
as_array: Determines whether to return an array-like or just a simple list of lists.
Returns: The 2D rotation matrix
"""
s = sin(angle)
c = cos(angle)
rot = [
[c, -s],
[s, c]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_3D(
angle: Union[float, _onp.ndarray],
axis: Union[_onp.ndarray, List, str],
as_array: bool = True,
axis_already_normalized: bool = False
):
"""
Yields the rotation matrix that corresponds to a rotation by a specified amount about a given axis.
An implementation of https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Args:
angle: The angle to rotate by. [radians]
Direction of rotation corresponds to the right-hand rule.
Can be vectorized.
axis: The axis to rotate about. [ndarray]
Can be vectorized; be sure axis[0] yields all the x-components, etc.
as_array: boolean, returns a 3x3 array-like if True, and a list-of-lists otherwise.
If you are intending to use this function vectorized, it is recommended you flag this False. (Or test before
proceeding.)
axis_already_normalized: boolean, skips axis normalization for speed if you flag this true.
Returns:
The rotation matrix, with type according to the parameter `as_array`.
"""
s = sin(angle)
c = cos(angle)
if isinstance(axis, str):
if axis.lower() == "x":
rot = [
[1, 0, 0],
[0, c, -s],
[0, s, c]
]
elif axis.lower() == "y":
rot = [
[c, 0, s],
[0, 1, 0],
[-s, 0, c]
]
elif axis.lower() == "z":
rot = [
[c, -s, 0],
[s, c, 0],
[0, 0, 1]
]
else:
raise ValueError("If `axis` is a string, it must be `x`, `y`, or `z`.")
else:
ux = axis[0]
uy = axis[1]
uz = axis[2]
if not axis_already_normalized:
norm = (ux ** 2 + uy ** 2 + uz ** 2) ** 0.5
ux = ux / norm
uy = uy / norm
uz = uz / norm
rot = [
[c + ux ** 2 * (1 - c), ux * uy * (1 - c) - uz * s, ux * uz * (1 - c) + uy * s],
[uy * ux * (1 - c) + uz * s, c + uy ** 2 * (1 - c), uy * uz * (1 - c) - ux * s],
[uz * ux * (1 - c) - uy * s, uz * uy * (1 - c) + ux * s, c + uz ** 2 * (1 - c)]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_from_euler_angles(
roll_angle: Union[float, _onp.ndarray] = 0,
pitch_angle: Union[float, _onp.ndarray] = 0,
yaw_angle: Union[float, _onp.ndarray] = 0,
as_array: bool = True
):
"""
Yields the rotation matrix that corresponds to a given Euler angle rotation.
Note: This uses the standard (yaw, pitch, roll) Euler angle rotation, where:
* First, a rotation about x is applied (roll)
* Second, a rotation about y is applied (pitch)
* Third, a rotation about z is applied (yaw)
In other words: R = R_z(yaw) @ R_y(pitch) @ R_x(roll).
Note: To use this, pre-multiply your vector to go from body axes to earth axes.
Example:
>>> vector_earth = rotation_matrix_from_euler_angles(np.pi / 4, np.pi / 4, np.pi / 4) @ vector_body
See notes:
http://planning.cs.uiuc.edu/node102.html
Args:
roll_angle: The roll angle, which is a rotation about the x-axis. [radians]
pitch_angle: The pitch angle, which is a rotation about the y-axis. [radians]
yaw_angle: The yaw angle, which is a rotation about the z-axis. [radians]
as_array:
Returns:
"""
sa = sin(yaw_angle)
ca = cos(yaw_angle)
sb = sin(pitch_angle)
cb = cos(pitch_angle)
sc = sin(roll_angle)
cc = cos(roll_angle)
rot = [
[ca * cb, ca * sb * sc - sa * cc, ca * sb * cc + sa * sc],
[sa * cb, sa * sb * sc + ca * cc, sa * sb * cc - ca * sc],
[-sb, cb * sc, cb * cc]
]
if as_array:
return array(rot)
else:
return rot
def is_valid_rotation_matrix(
a: _onp.ndarray,
tol=1e-9
) -> bool:
"""
Returns a boolean of whether the given matrix satisfies the properties of a rotation matrix.
Specifically, tests for:
* Volume-preserving
* Handedness of output reference frame
* Orthogonality of output reference frame
Args:
a: The array-like to be tested
tol: A tolerance to use for truthiness; accounts for floating-point error.
Returns: A boolean of whether the array-like is a valid rotation matrix.
"""
def approx_equal(x, y):
return (x > y - tol) and (x < y + tol)
det = linalg.det(a)
is_volume_preserving_and_right_handed = approx_equal(det, 1)
eye_approx = a.T @ a
eye = _onp.eye(a.shape[0])
is_orthogonality_preserving = True
for i in range(eye.shape[0]):
for j in range(eye.shape[1]):
if not approx_equal(eye_approx[i, j], eye[i, j]):
is_orthogonality_preserving = False
return (
is_volume_preserving_and_right_handed and
is_orthogonality_preserving
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/rotations.py | rotations.py |
from operator import is_
import numpy as _onp
import casadi as _cas
from aerosandbox.numpy.determine_type import is_casadi_type
from aerosandbox.numpy.array import array, zeros_like
from aerosandbox.numpy.conditionals import where
from aerosandbox.numpy.logicals import all, any, logical_or
from typing import Tuple
from scipy import interpolate as _interpolate
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation, analogous to numpy.interp().
Returns the one-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp),
evaluated at x.
See syntax here: https://numpy.org/doc/stable/reference/generated/numpy.interp.html
Specific notes: xp is assumed to be sorted.
"""
if not is_casadi_type([x, xp, fp], recursive=True):
return _onp.interp(
x=x,
xp=xp,
fp=fp,
left=left,
right=right,
period=period
)
else:
### Handle period argument
if period is not None:
if any(
logical_or(
xp < 0,
xp > period
)
):
raise NotImplementedError(
"Haven't yet implemented handling for if xp is outside the period.") # Not easy to implement because casadi doesn't have a sort feature.
x = _cas.fmod(x, period)
### Make sure x isn't an int
if isinstance(x, int):
x = float(x)
### Make sure that x is an iterable
try:
x[0]
except TypeError:
x = array([x], dtype=float)
### Make sure xp is an iterable
xp = array(xp, dtype=float)
### Do the interpolation
if is_casadi_type([x, xp], recursive=True):
grid = [xp.shape[0]] # size of grid, list is used since can be multi-dimensional
cas_interp = _cas.interpolant('cs_interp','linear',grid,1,{"inline": True})
f = cas_interp(x,xp,fp)
else:
f = _cas.interp1d(xp, fp, x)
### Handle left/right
if left is not None:
f = where(
x < xp[0],
left,
f
)
if right is not None:
f = where(
x > xp[-1],
right,
f
)
### Return
return f
def is_data_structured(
x_data_coordinates: Tuple[_onp.ndarray],
y_data_structured: _onp.ndarray
) -> bool:
"""
Determines if the shapes of a given dataset are consistent with "structured" (i.e. gridded) data.
For this to evaluate True, the inputs should be:
x_data_coordinates: A tuple or list of 1D ndarrays that represent coordinates along each axis of a N-dimensional hypercube.
y_data_structured: The values of some scalar defined on that N-dimensional hypercube, expressed as an
N-dimesional array. In other words, y_data_structured is evaluated at `np.meshgrid(*x_data_coordinates,
indexing="ij")`.
Returns: Boolean of whether the above description is true.
"""
try:
for coordinates in x_data_coordinates:
if len(coordinates.shape) != 1:
return False
implied_y_data_shape = tuple(len(coordinates) for coordinates in x_data_coordinates)
if not y_data_structured.shape == implied_y_data_shape:
return False
except TypeError: # if x_data_coordinates is not iterable, for instance
return False
except AttributeError: # if y_data_structured has no shape, for instance
return False
return True
def interpn(
points: Tuple[_onp.ndarray],
values: _onp.ndarray,
xi: _onp.ndarray,
method: str = "linear",
bounds_error=True,
fill_value=_onp.nan
) -> _onp.ndarray:
"""
Performs multidimensional interpolation on regular grids. Analogue to scipy.interpolate.interpn().
See syntax here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interpn.html
Args:
points: The points defining the regular grid in n dimensions. Tuple of coordinates of each axis. Shapes (m1,
), ..., (mn,)
values: The data on the regular grid in n dimensions. Shape (m1, ..., mn)
xi: The coordinates to sample the gridded data at. (..., ndim)
method: The method of interpolation to perform. one of:
* "bspline" (Note: differentiable and suitable for optimization - made of piecewise-cubics. For other
applications, other interpolators may be faster. Not monotonicity-preserving - may overshoot.)
* "linear" (Note: differentiable, but not suitable for use in optimization w/o subgradient treatment due
to C1-discontinuity)
* "nearest" (Note: NOT differentiable, don't use in optimization. Fast.)
bounds_error: If True, when interpolated values are requested outside of the domain of the input data,
a ValueError is raised. If False, then fill_value is used.
fill_value: If provided, the value to use for points outside of the interpolation domain. If None,
values outside the domain are extrapolated.
Returns: Interpolated values at input coordinates.
"""
### Check input types for points and values
if is_casadi_type([points, values], recursive=True):
raise TypeError("The underlying dataset (points, values) must consist of NumPy arrays.")
### Check dimensions of points
for points_axis in points:
points_axis = array(points_axis)
if not len(points_axis.shape) == 1:
raise ValueError("`points` must consist of a tuple of 1D ndarrays defining the coordinates of each axis.")
### Check dimensions of values
implied_values_shape = tuple(len(points_axis) for points_axis in points)
if not values.shape == implied_values_shape:
raise ValueError(f"""
The shape of `values` should be {implied_values_shape}.
""")
if ( ### NumPy implementation
not is_casadi_type([points, values, xi], recursive=True)
) and (
(method == "linear") or (method == "nearest")
):
xi = _onp.array(xi).reshape((-1, len(implied_values_shape)))
return _interpolate.interpn(
points=points,
values=values,
xi=xi,
method=method,
bounds_error=bounds_error,
fill_value=fill_value
)
elif ( ### CasADi implementation
(method == "linear") or (method == "bspline")
):
### Add handling to patch a specific bug in CasADi that occurs when `values` is all zeros.
### For more information, see: https://github.com/casadi/casadi/issues/2837
if method == "bspline" and all(values == 0):
return zeros_like(xi)
### If xi is an int or float, promote it to an array
if isinstance(xi, int) or isinstance(xi, float):
xi = array([xi])
### If xi is a NumPy array and 1D, convert it to 2D for this.
if not is_casadi_type(xi, recursive=False) and len(xi.shape) != 2:
xi = _onp.reshape(xi, (-1, 1))
### Check that xi is now 2D
if not len(xi.shape) == 2:
raise ValueError("`xi` must have the shape (n_points, n_dimensions)!")
### Transpose xi so that xi.shape is [n_points, n_dimensions].
n_dimensions = len(points)
if not len(points) in xi.shape:
raise ValueError("`xi` must have the shape (n_points, n_dimensions)!")
if not xi.shape[1] == n_dimensions:
xi = xi.T
assert xi.shape[1] == n_dimensions
### Calculate the minimum and maximum values along each axis.
axis_values_min = [
_onp.min(axis_values)
for axis_values in points
]
axis_values_max = [
_onp.max(axis_values)
for axis_values in points
]
### If fill_value is None, project the xi back onto the nearest point in the domain.
if fill_value is None:
for axis in range(n_dimensions):
xi[:, axis] = where(
xi[:, axis] > axis_values_max[axis],
axis_values_max[axis],
xi[:, axis]
)
xi[:, axis] = where(
xi[:, axis] < axis_values_min[axis],
axis_values_min[axis],
xi[:, axis]
)
### Check bounds_error
if bounds_error:
if isinstance(xi, _cas.MX):
raise ValueError("Can't have the `bounds_error` flag as True if `xi` is of cas.MX type.")
for axis in range(n_dimensions):
if any(
logical_or(
xi[:, axis] > axis_values_max[axis],
xi[:, axis] < axis_values_min[axis]
)
):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {axis}"
)
### Do the interpolation
values_flattened = _onp.ravel(values, order='F')
interpolator = _cas.interpolant(
'Interpolator',
method,
points,
values_flattened
)
fi = interpolator(xi.T).T
### If fill_value is a scalar, replace all out-of-bounds xi with that value.
if fill_value is not None:
for axis in range(n_dimensions):
fi = where(
xi[:, axis] > axis_values_max[axis],
fill_value,
fi
)
fi = where(
xi[:, axis] < axis_values_min[axis],
fill_value,
fi
)
### If DM output (i.e. a numeric value), convert that back to an array
if isinstance(fi, _cas.DM):
if fi.shape == (1, 1):
return float(fi)
else:
return _onp.array(fi, dtype=float).reshape(-1)
return fi
else:
raise ValueError("Bad value of `method`!") | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/interpolate.py | interpolate.py |
# Math Module Test Guide
Peter Sharpe
## Basic Idea
The file `test_all_operations_run` simply tests that all (or almost all) possible mathematical operations compute without errors for all possible combinations of input types; they have no regard for correctness, only that no errors are raised.
The file `test_array` tests that array-like objects can be created out of individual scalars.
All other individual files test the *correctness* of specific calculations against known values computed with NumPy as a reference. | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/test_numpy/README.md | README.md |
import aerosandbox.numpy as np
from aerosandbox.optimization.opti import Opti
from typing import Union, Dict, Callable, List
from aerosandbox.modeling.surrogate_model import SurrogateModel
import copy
import warnings
class FittedModel(SurrogateModel):
"""
A model that is fitted to data. Maps from R^N -> R^1.
You can evaluate this model at a given point by calling it just like a function, e.g.:
>>> my_fitted_model = FittedModel(...) # See FittedModel.__init__ docstring for syntax
>>> y = my_fitted_model(x)
The input to the model (`x` in the example above) is of the type:
* in the general N-dimensional case, a dictionary where: keys are variable names and values are float/array
* in the case of a 1-dimensional input (R^1 -> R^1), a float/array.
If you're not sure what the input type of `my_fitted_model` should be, just do:
>>> print(my_fitted_model) # Displays the valid input type to the model
The output of the model (`y` in the example above) is always a float or array.
See the docstring __init__ method of FittedModel for more details of how to instantiate and use FittedModel.
One might have expected a fitted model to be a literal Python function rather than a Python class - the
benefit of having FittedModel as a class rather than a function is that you can easily save (pickle) classes
including data (e.g. parameters, x_data, y_data), but you can't do that with functions. And, because the
FittedModel class has a __call__ method, you can basically still just think of it like a function.
"""
def __init__(self,
model: Callable[
[
Union[np.ndarray, Dict[str, np.ndarray]],
Dict[str, float]
],
np.ndarray
],
x_data: Union[np.ndarray, Dict[str, np.ndarray]],
y_data: np.ndarray,
parameter_guesses: Dict[str, float],
parameter_bounds: Dict[str, tuple] = None,
residual_norm_type: str = "L2",
fit_type: str = "best",
weights: np.ndarray = None,
put_residuals_in_logspace: bool = False,
verbose=True,
):
"""
Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach.
Args:
model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where:
* x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n].
* If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x
as a 1D ndarray. (If you do this, just give `x_data` as an array.)
* p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value].
Model should return a 1D ndarray of length n.
Basically, if you've done it right:
>>> model(x_data, parameter_guesses)
should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit
will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types
should be happy.)
Model should use aerosandbox.numpy operators.
The model is not allowed to make any in-place changes to the input `x`. The most common way this
manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x =
x + 3`.
x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
var_name:var_data}.
* If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)
y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]
parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}.
* Parameters will be initialized to the values set here; all parameters need an initial guess.
* param_initial_guess is a float; note that only scalar parameters are allowed.
parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}.
* May contain only a subset of param_guesses if desired.
* Use None to represent one-sided constraints (i.e. (None, 5)).
residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options:
* "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers.
* "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default.
* "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers.
fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm),
or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate
modeling, so that you can put bounds on modeling error):
* "best": finds the model of best fit. Usually, this is what you want.
* "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize
the specified residual norm).
* "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize
the specified residual norm).
weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform.
* Weights are automatically normalized. [1D ndarray of length n]
put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error
(useful for minimizing percent error).
Note: If any model outputs or data are negative, this will raise an error!
verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See
`aerosandbox.Opti.solve(verbose=)` syntax for more details.
Returns: A model in the form of a FittedModel object. Some things you can do:
>>> y = FittedModel(x) # evaluate the FittedModel at new x points
>>> FittedModel.parameters # directly examine the optimal values of the parameters that were found
>>> FittedModel.plot() # plot the fit
"""
super().__init__()
##### Prepare all inputs, check types/sizes.
### Flatten all inputs
def flatten(input):
return np.array(input).flatten()
try:
x_data = {
k: flatten(v)
for k, v in x_data.items()
}
x_data_is_dict = True
except AttributeError: # If it's not a dict or dict-like, assume it's a 1D ndarray dataset
x_data = flatten(x_data)
x_data_is_dict = False
y_data = flatten(y_data)
n_datapoints = np.length(y_data)
### Handle weighting
if weights is None:
weights = np.ones(n_datapoints)
else:
weights = flatten(weights)
sum_weights = np.sum(weights)
if sum_weights <= 0:
raise ValueError("The weights must sum to a positive number!")
if np.any(weights < 0):
raise ValueError("No entries of the weights vector are allowed to be negative!")
weights = weights / np.sum(weights) # Normalize weights so that they sum to 1.
### Check format of parameter_bounds input
if parameter_bounds is None:
parameter_bounds = {}
for param_name, v in parameter_bounds.items():
if param_name not in parameter_guesses.keys():
raise ValueError(
f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses.")
if not np.length(v) == 2:
raise ValueError(
"Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
"For one-sided bounds, use None for the unbounded side.")
### If putting residuals in logspace, check positivity
if put_residuals_in_logspace:
if not np.all(y_data > 0):
raise ValueError("You can't fit a model with residuals in logspace if y_data is not entirely positive!")
### Check dimensionality of inputs to fitting algorithm
relevant_inputs = {
"y_data" : y_data,
"weights": weights,
}
try:
relevant_inputs.update(x_data)
except TypeError:
relevant_inputs.update({"x_data": x_data})
for key, value in relevant_inputs.items():
# Check that the length of the inputs are consistent
series_length = np.length(value)
if not series_length == n_datapoints:
raise ValueError(
f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}.")
##### Formulate and solve the fitting optimization problem
### Initialize an optimization environment
opti = Opti()
### Initialize the parameters as optimization variables
params = {}
for param_name, param_initial_guess in parameter_guesses.items():
if param_name in parameter_bounds:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
lower_bound=parameter_bounds[param_name][0],
upper_bound=parameter_bounds[param_name][1],
)
else:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
)
### Evaluate the model at the data points you're trying to fit
x_data_original = copy.deepcopy(
x_data) # Make a copy of x_data so that you can determine if the model did in-place operations on x and tattle on the user.
try:
y_model = model(x_data, params) # Evaluate the model
except Exception:
raise Exception("""
There was an error when evaluating the model you supplied with the x_data you supplied.
Likely possible causes:
* Your model() does not have the call syntax model(x, p), where x is the x_data and p are parameters.
* Your model should take in p as a dict of parameters, but it does not.
* Your model assumes x is an array-like but you provided x_data as a dict, or vice versa.
See the docstring of FittedModel() if you have other usage questions or would like to see examples.
""")
try: ### If the model did in-place operations on x_data, throw an error
x_data_is_unchanged = np.all(x_data == x_data_original)
except ValueError:
x_data_is_unchanged = np.all([
x_series == x_series_original
for x_series, x_series_original in zip(x_data, x_data_original)
])
if not x_data_is_unchanged:
raise TypeError("model(x_data, parameter_guesses) did in-place operations on x, which is not allowed!")
if y_model is None: # Make sure that y_model actually returned something sensible
raise TypeError("model(x_data, parameter_guesses) returned None, when it should've returned a 1D ndarray.")
### Compute how far off you are (error)
if not put_residuals_in_logspace:
error = y_model - y_data
else:
y_model = np.fmax(y_model, 1e-300) # Keep y_model very slightly always positive, so that log() doesn't NaN.
error = np.log(y_model) - np.log(y_data)
### Set up the optimization problem to minimize some norm(error), which looks different depending on the norm used:
if residual_norm_type.lower() == "l1": # Minimize the L1 norm
abs_error = opti.variable(init_guess=0,
n_vars=np.length(y_data)) # Make the abs() of each error entry an opt. var.
opti.subject_to([
abs_error >= error,
abs_error >= -error,
])
opti.minimize(np.sum(weights * abs_error))
elif residual_norm_type.lower() == "l2": # Minimize the L2 norm
opti.minimize(np.sum(weights * error ** 2))
elif residual_norm_type.lower() == "linf": # Minimize the L-infinity norm
linf_value = opti.variable(init_guess=0) # Make the value of the L-infinity norm an optimization variable
opti.subject_to([
linf_value >= weights * error,
linf_value >= -weights * error
])
opti.minimize(linf_value)
else:
raise ValueError("Bad input for the 'residual_type' parameter.")
### Add in the constraints specified by fit_type, which force the model to stay above / below the data points.
if fit_type == "best":
pass
elif fit_type == "upper bound":
opti.subject_to(y_model >= y_data)
elif fit_type == "lower bound":
opti.subject_to(y_model <= y_data)
else:
raise ValueError("Bad input for the 'fit_type' parameter.")
### Solve
sol = opti.solve(verbose=verbose)
##### Construct a FittedModel
### Create a vector of solved parameters
params_solved = {}
for param_name in params:
try:
params_solved[param_name] = sol.value(params[param_name])
except Exception:
params_solved[param_name] = np.nan
### Store all the data and inputs
self.model = model
self.x_data = x_data
self.y_data = y_data
self.parameters = params_solved
self.parameter_guesses = parameter_guesses
self.parameter_bounds = parameter_bounds
self.residual_norm_type = residual_norm_type
self.fit_type = fit_type
self.weights = weights
self.put_residuals_in_logspace = put_residuals_in_logspace
def __call__(self, x):
super().__call__(x)
return self.model(x, self.parameters)
def goodness_of_fit(self,
type="R^2"
):
"""
Returns a metric of the goodness of the fit.
Args:
type: Type of metric to use for goodness of fit. One of:
* "R^2": The coefficient of determination. Strictly speaking only mathematically rigorous to use this
for linear fits.
https://en.wikipedia.org/wiki/Coefficient_of_determination
* "mean_absolute_error" or "mae" or "L1": The mean absolute error of the fit.
* "root_mean_squared_error" or "rms" or "L2": The root mean squared error of the fit.
* "max_absolute_error" or "Linf": The maximum deviation of the fit from any of the data points.
Returns: The metric of the goodness of the fit.
"""
if type == "R^2":
y_mean = np.mean(self.y_data)
SS_tot = np.sum(
(self.y_data - y_mean) ** 2
)
y_model = self(self.x_data)
SS_res = np.sum(
(self.y_data - y_model) ** 2
)
R_squared = 1 - SS_res / SS_tot
return R_squared
elif type == "mean_absolute_error" or type == "mae" or type == "L1":
return np.mean(np.abs(self.y_data - self(self.x_data)))
elif type == "root_mean_squared_error" or type == "rms" or type == "L2":
return np.sqrt(np.mean((self.y_data - self(self.x_data)) ** 2))
elif type == "max_absolute_error" or type == "Linf":
return np.max(np.abs(self.y_data - self(self.x_data)))
else:
valid_types = [
"R^2",
"mean_absolute_error", "mae", "L1",
"root_mean_squared_error", "rms", "L2",
"max_absolute_error", "Linf"
]
valid_types_formatted = [
f" * \"{valid_type}\""
for valid_type in valid_types
]
raise ValueError("Bad value of `type`! Valid values are:\n" + "\n".join(valid_types_formatted)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/fitting.py | fitting.py |
import inspect
from typing import Callable, Any, Union, Optional
def black_box(
function: Callable[[Any], float],
n_in: int = None,
n_out: int = 1,
fd_method: str ='forward',
fd_step: Optional[float] = None,
fd_step_iter: Optional[bool] = None,
) -> Callable[[Any], float]:
"""
Wraps a function as a black box, allowing it to be used in AeroSandbox / CasADi optimization problems.
Obtains gradients via finite differences. Assumes that the function's Jacobian is fully dense, always.
Args:
function:
n_in:
n_out:
fd_method: One of:
- 'forward'
- 'backward'
- 'central'
- 'smoothed'
Returns:
"""
### Grab the signature of the function to be wrapped - we'll need it.
signature = inspect.signature(function)
### Handle default arguments.
if n_in is None:
n_in = len(signature.parameters)
if n_out is None:
n_out = 1
### Add limitations
if n_out > 1:
raise NotImplementedError("Black boxes with multiple outputs are not yet supported.")
### Compute finite-differencing options
fd_options = {}
if fd_step is not None:
fd_options['h'] = fd_step
if fd_step_iter is not None:
fd_options['h_iter'] = fd_step_iter
import casadi as cas
class BlackBox(cas.Callback):
def __init__(
self,
):
cas.Callback.__init__(self)
self.construct(
self.__class__.__name__,
dict(
enable_fd=True,
fd_method=fd_method,
fd_options=fd_options,
)
)
# Number of inputs and outputs
def get_n_in(self):
"""
Number of scalar inputs to the black-box function.
"""
return n_in
def get_n_out(self):
return n_out
# Evaluate numerically
def eval(self, args):
f = function(*args)
if isinstance(f, tuple):
return f
else:
return [f]
# `wrapped_function` is a function with the same call signature as the original function, but with all arguments as positional arguments.
wrapped_function = BlackBox()
def wrapped_function_with_kwargs_support(*args, **kwargs):
"""
This is a function with the same call signature as the original function, allowing both positional and keyword
arguments. Should work identically to the original function:
- Keyword arguments should be optional, and the default values should be the same as the original function.
- Keyword arguments should have the option of being passed as positional arguments, provided they are in the
correct order and all required positional arguments are passed.
- Keyword arguments should be allowed to be passed in any order.
- Positional arguments should be required.
- Positional arguments should have the option of being passed as keyword arguments.
"""
inputs = []
# Check number of positional arguments in the signature
n_positional_args = len(signature.parameters) - len(signature.parameters.values())
n_args = len(signature.parameters)
if len(args) < n_positional_args or len(args) > n_args:
raise TypeError(
f"Takes from {n_positional_args} to {n_args} positional arguments but {len(args)} were given"
)
for i, (name, parameter) in enumerate(signature.parameters.items()):
if i < len(args):
input = args[i]
if name in kwargs:
raise TypeError(
f"Got multiple values for argument '{name}': {input} and {kwargs[name]}"
)
elif name in kwargs:
input = kwargs[name]
else:
if parameter.default is parameter.empty:
raise TypeError(
f"Missing required argument '{name}'"
)
else:
input = parameter.default
# print(input)
inputs.append(input)
return wrapped_function(*inputs)
wrapped_function_with_kwargs_support.wrapped_function = wrapped_function
wrapped_function_with_kwargs_support.wrapper_class = BlackBox
return wrapped_function_with_kwargs_support
if __name__ == '__main__':
### Create a function that's effectively black-box (doesn't use `aerosandbox.numpy`)
def my_func(
a1,
a2,
k1=4,
k2=5,
k3=6,
):
import math
return (
math.sin(a1) * math.exp(a2) * math.cos(k1 * k2) + k3
)
### Now, start an optimization problem
import aerosandbox as asb
import aerosandbox.numpy as np
opti = asb.Opti()
# Wrap our function such that it can be used in an optimization problem.
my_func_wrapped = black_box(
function=my_func,
)
# Pick some variables to optimize over
m = opti.variable(init_guess=5, lower_bound=3, upper_bound=8)
n = opti.variable(init_guess=5, lower_bound=3, upper_bound=8)
# Minimize the black-box function
opti.minimize(
my_func_wrapped(m, a2=3, k2=n)
)
# Solve
sol = opti.solve()
### Plot the function over its inputs
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
M, N = np.meshgrid(
np.linspace(3, 8, 300),
np.linspace(3, 8, 300),
)
fig, ax = plt.subplots()
p.contour(
M, N, np.vectorize(my_func)(M, a2=3, k2=N),
)
p.show_plot() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/black_box.py | black_box.py |
from typing import Union, Dict, Any
import aerosandbox.numpy as np
from aerosandbox.modeling.interpolation import InterpolatedModel
from scipy import interpolate
class UnstructuredInterpolatedModel(InterpolatedModel):
"""
A model that is interpolated to unstructured (i.e., point cloud) N-dimensional data. Maps from R^N -> R^1.
You can evaluate this model at a given point by calling it just like a function, e.g.:
>>> y = my_interpolated_model(x)
The input to the model (`x` in the example above) is of the type:
* in the general N-dimensional case, a dictionary where: keys are variable names and values are float/array
* in the case of a 1-dimensional input (R^1 -> R^1), it can optionally just be a float/array.
If you're not sure what the input type of `my_interpolated_model` should be, just do:
>>> print(my_interpolated_model) # Displays the valid input type to the model
The output of the model (`y` in the example above) is always a float or array.
See the docstring __init__ method of InterpolatedModel for more details of how to instantiate and use UnstructuredInterpolatedModel.
"""
def __init__(self,
x_data: Union[np.ndarray, Dict[str, np.ndarray]],
y_data: np.ndarray,
x_data_resample: Union[int, Dict[str, Union[int, np.ndarray]]] = 10,
resampling_interpolator: object = interpolate.RBFInterpolator,
resampling_interpolator_kwargs: Dict[str, Any] = None,
fill_value=np.nan, # Default behavior: return NaN for all inputs outside data range.
interpolated_model_kwargs: Dict[str, Any] = None,
):
"""
Creates the interpolator. Note that data must be unstructured (i.e., point cloud) for general N-dimensional
interpolation.
Note that if data is either 1D or structured,
Args:
x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
var_name:var_data}.
* If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)
y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]
x_data_resample: A parameter that guides how the x_data should be resampled onto a structured grid.
* If this is an int, we look at each axis of the `x_data` (here, we'll call this `xi`),
and we resample onto a linearly-spaced grid between `min(xi)` and `max(xi)` with `x_data_resample`
points.
* If this is a dict, it must be a dict where the keys are strings matching the keys of (the
dictionary) `x_data`. The values can either be ints or 1D np.ndarrays.
* If the values are ints, then that axis is linearly spaced between `min(xi)` and `max(xi)` with
`x_data_resample` points.
* If the values are 1D np.ndarrays, then those 1D np.ndarrays are used as the resampled spacing
for the given axis.
resampling_interpolator: Indicates the interpolator to use in order to resample the unstructured data
onto a structured grid. Should be analogous to scipy.interpolate.RBFInterpolator in __init__ and __call__
syntax. See reference here:
* https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RBFInterpolator.html
resampling_interpolator_kwargs: Indicates keyword arguments (keyword-value pairs, as a dictionary) to
pass into the resampling interpolator.
fill_value: Gives the value that the interpolator should return for points outside of the interpolation
domain. The interpolation domain is defined as the hypercube bounded by the coordinates specified in
`x_data_resample`. By default, these coordinates are the tightest axis-aligned hypercube that bounds the
point cloud data. If fill_value is None, then the interpolator will attempt to extrapolate if the interpolation method allows.
interpolated_model_kwargs: Indicates keyword arguments to pass into the (structured) InterpolatedModel.
Also a dictionary. See aerosandbox.InterpolatedModel for documentation on possible inputs here.
"""
if resampling_interpolator_kwargs is None:
resampling_interpolator_kwargs = {}
if interpolated_model_kwargs is None:
interpolated_model_kwargs = {}
try: # Try to use the InterpolatedModel initializer. If it doesn't work, then move on.
super().__init__(
x_data_coordinates=x_data,
y_data_structured=y_data,
)
return
except ValueError:
pass
# If it didn't work, this implies that x_data is multidimensional, and hence a dict-like object. Validate this.
try: # Determine type of `x_data`
x_data.keys()
x_data.values()
x_data.items()
except AttributeError:
raise TypeError("`x_data` must be a dict-like object!")
# Make the interpolator, based on x_data and y_data.
if resampling_interpolator == interpolate.RBFInterpolator:
resampling_interpolator_kwargs = {
"kernel": "thin_plate_spline",
"degree": 1,
**resampling_interpolator_kwargs
}
interpolator = resampling_interpolator(
y=np.stack(tuple(x_data.values()), axis=1),
d=y_data,
**resampling_interpolator_kwargs
)
# If x_data_resample is an int, make it into a dict that matches x_data.
if isinstance(x_data_resample, int):
x_data_resample = {
k: x_data_resample
for k in x_data.keys()
}
# Now, x_data_resample should be dict-like. Validate this.
try:
x_data_resample.keys()
x_data_resample.values()
x_data_resample.items()
except AttributeError:
raise TypeError("`x_data_resample` must be a dict-like object!")
# Go through x_data_resample, and replace any values that are ints with linspaced arrays.
for k, v in x_data_resample.items():
if isinstance(v, int):
x_data_resample[k] = np.linspace(
np.min(x_data[k]),
np.max(x_data[k]),
v
)
x_data_coordinates: Dict = x_data_resample
x_data_structured_values = [
xi.flatten()
for xi in np.meshgrid(*x_data_coordinates.values(), indexing="ij")
]
x_data_structured = {
k: xi
for k, xi in zip(x_data.keys(), x_data_structured_values)
}
y_data_structured = interpolator(
np.stack(tuple(x_data_structured_values), axis=1)
)
y_data_structured = y_data_structured.reshape([
np.length(xi)
for xi in x_data_coordinates.values()
])
interpolated_model_kwargs = {
"fill_value": fill_value,
**interpolated_model_kwargs
}
super().__init__(
x_data_coordinates=x_data_coordinates,
y_data_structured=y_data_structured,
**interpolated_model_kwargs,
)
self.x_data_raw_unstructured = x_data
self.y_data_raw = y_data
if __name__ == '__main__':
x = np.arange(10)
y = x ** 3
interp = UnstructuredInterpolatedModel(
x_data=x,
y_data=y
)
def randspace(start, stop, n=50):
vals = (stop - start) * np.random.rand(n) + start
vals = np.concatenate((vals[:-2], np.array([start, stop])))
# vals = np.sort(vals)
return vals
np.random.seed(4)
X = randspace(-5, 5, 200)
Y = randspace(-5, 5, 200)
f = np.where(X > 0, 1, 0) + np.where(Y > 0, 1, 0)
# f = X ** 2 + Y ** 2
interp = UnstructuredInterpolatedModel(
x_data={
"x": X.flatten(),
"y": Y.flatten(),
},
y_data=f.flatten()
)
from aerosandbox.tools.pretty_plots import plt, show_plot
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# ax.plot_surface(X, Y, f, color="blue", alpha=0.2)
ax.scatter(X.flatten(), Y.flatten(), f.flatten())
X_plot, Y_plot = np.meshgrid(
np.linspace(X.min(), X.max(), 500),
np.linspace(Y.min(), Y.max(), 500),
)
F_plot = interp({
"x": X_plot.flatten(),
"y": Y_plot.flatten()
}).reshape(X_plot.shape)
ax.plot_surface(
X_plot, Y_plot, F_plot,
color="red",
edgecolors=(1, 1, 1, 0.5),
linewidth=0.5,
alpha=0.2,
rcount=40,
ccount=40,
shade=True,
)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
import aerosandbox as asb
import aerosandbox.numpy as np
opti = asb.Opti()
x = opti.variable(init_guess=0)
y = opti.variable(init_guess=0)
opti.minimize(interp({"x": x, "y": y}))
sol = opti.solve()
print(sol.value(x))
print(sol.value(y)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/interpolation_unstructured.py | interpolation_unstructured.py |
from aerosandbox.common import AeroSandboxObject
from abc import abstractmethod
from typing import Union, Dict, List, Tuple
import aerosandbox.numpy as np
class SurrogateModel(AeroSandboxObject):
"""
A SurrogateModel is effectively a callable; it only has the __call__ method, and all subclasses must explicitly
overwrite this. The only reason it is not a callable is that you want to be able to save it to disk (via
pickling) while also having the capability to save associated data (for example, constants associated with a
particular model, or underlying data).
If data is used to generate the SurrogateModel, it should be stored as follows:
* The independent variable(s) should be stored as SurrogateModel.x_data
* in the general N-dimensional case, x_data should be a dictionary where: keys are variable names and
values are float/array
* in the case of a 1-dimensional input (R^1 -> R^1), x-data should be a float/array.
* The dependent variable should be stored as SurrogateModel.y_data
* The type of this variable should be a float or an np.ndarray.
Even if you don't have any real x_data or y_data to add as SurrogateModel.x_data or SurrogateModel.y_data, it's recommended (but not required) that you add
values here as examples that users can inspect in order to see the types required.
"""
@abstractmethod
def __init__(self):
"""
SurrogateModel is an abstract class; you should not instantiate it directly.
"""
pass
@abstractmethod # If you subclass SurrogateModel, you must overwrite __call__ so that it's a callable.
def __call__(self,
x: Union[int, float, np.ndarray, Dict[str, np.ndarray]]
) -> Union[float, np.ndarray]:
"""
Evaluates the surrogate model at some given input x.
The input `x` is of the type:
* in the general N-dimensional case, a dictionary where keys are variable names and values are float/array.
* in the case of a 1-dimensional input (R^1 -> R^2), a float/array.
"""
### Perform basic type checking on x, if x_data exists as a reference.
try:
x_data_is_dict = isinstance(self.x_data, dict)
input_is_dict = isinstance(x, dict)
if x_data_is_dict and not input_is_dict:
raise TypeError(
f"The input to this model should be a dict with: keys {self.input_names()}, values as float or array.")
if input_is_dict and not x_data_is_dict:
raise TypeError("The input to this model should be a float or array.")
except NameError: # If x_data does not exist
pass
def __repr__(self) -> str:
input_names = self.input_names()
if input_names is not None:
input_description = f"a dict with: keys {input_names}, values as float or array"
else:
input_description = f"a float or array"
return "\n".join([
f"SurrogateModel(x) [R^{self.input_dimensionality()} -> R^1]",
f"\tInput: {input_description}",
f"\tOutput: float or array",
])
def input_dimensionality(self) -> int:
"""
Returns the number of inputs that should be supplied in x, where x is the input to the SurrogateModel.
"""
input_names = self.input_names()
if input_names is not None:
return len(input_names)
else:
return 1
def input_names(self) -> Union[List, None]:
"""
If x (the input to this model) is supposed to be a dict, this method returns the keys that should be part of x.
If x is 1D and simply takes in floats or arrays, or if no x_data exists, returns None.
"""
try:
return list(self.x_data.keys())
except AttributeError:
return None
def plot(self, resolution=250):
import matplotlib.pyplot as plt
def axis_range(x_data_axis: np.ndarray) -> Tuple[float, float]:
"""
Given the entries of one axis of the dependent variable, determine a min/max range over which to plot the fit.
Args:
x_data_axis: The entries of one axis of the dependent variable, i.e. x_data["x1"].
Returns: A tuple representing the (min, max) value over which to plot that axis.
"""
minval = np.min(x_data_axis)
maxval = np.max(x_data_axis)
return (minval, maxval)
if self.input_dimensionality() == 1:
### Parse the x_data
if self.input_names() is not None:
x_name = self.x_data.keys()[0]
x_data = self.x_data.values()[0]
minval, maxval = axis_range(x_data)
x_fit = {x_name: np.linspace(minval, maxval, resolution)}
y_fit = self(x_fit)
else:
x_name = "x"
x_data = self.x_data
minval, maxval = axis_range(x_data)
x_fit = np.linspace(minval, maxval, resolution)
y_fit = self(x_fit)
### Plot the 2D figure
fig = plt.figure(dpi=200)
plt.plot(
x_data,
self.y_data,
".k",
label="Data",
)
plt.plot(
x_fit,
y_fit,
"-",
color="#cb3bff",
label="Fit",
zorder=4,
)
plt.xlabel(x_name)
plt.ylabel(rf"$f({x_name})$")
plt.title(r"Fit of FittedModel")
plt.tight_layout()
plt.legend()
plt.show()
else:
raise NotImplementedError() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/surrogate_model.py | surrogate_model.py |
from typing import Union, Dict
import aerosandbox.numpy as np
from aerosandbox.modeling.surrogate_model import SurrogateModel
class InterpolatedModel(SurrogateModel):
"""
A model that is interpolated to structured (i.e., gridded) N-dimensional data. Maps from R^N -> R^1.
You can evaluate this model at a given point by calling it just like a function, e.g.:
>>> y = my_interpolated_model(x)
The input to the model (`x` in the example above) is of the type:
* in the general N-dimensional case, a dictionary where: keys are variable names and values are float/array
* in the case of a 1-dimensional input (R^1 -> R^1), it can optionally just be a float/array.
If you're not sure what the input type of `my_interpolated_model` should be, just do:
>>> print(my_interpolated_model) # Displays the valid input type to the model
The output of the model (`y` in the example above) is always a float or array.
See the docstring __init__ method of InterpolatedModel for more details of how to instantiate and use InterpolatedModel.
One might have expected a interpolated model to be a literal Python function rather than a Python class - the
benefit of having InterpolatedModel as a class rather than a function is that you can easily save (pickle) classes
including data (e.g. parameters, x_data, y_data), but you can't do that with functions. And, because the
InterpolatedModel class has a __call__ method, you can basically still just think of it like a function.
"""
def __init__(self,
x_data_coordinates: Union[np.ndarray, Dict[str, np.ndarray]],
y_data_structured: np.ndarray,
method: str = "bspline",
fill_value=np.nan, # Default behavior: return NaN for all inputs outside data range.
):
"""
Create the interpolator. Note that data must be structured (i.e., gridded on a hypercube) for general
N-dimensional interpolation.
Args:
x_data_coordinates: The coordinates of each axis of the cube; essentially, the independent variable(s):
* For the general N-dimensional case, this should be a dictionary where the keys are axis names [str]
and the values are 1D arrays.
* For the 1D case, you can optionally alternatively supply this as a single 1D array.
Usage example for how you might generate this data, along with `y_data_structured`:
>>> x1 = np.linspace(0, 5, 11)
>>> x2 = np.linspace(0, 10, 21)
>>> X1, X2 = np.meshgrid(x1, x2, indexing="ij")
>>>
>>> x_data_coordinates = {
>>> "x1": x1, # 1D ndarray of length 11
>>> "x2": x2, # 1D ndarray of length 21
>>> }
>>> y_data_structured = function_to_approximate(X1, X2) # 2D ndarray of shape (11, 21)
y_data_structured: The dependent variable, expressed as a structured data "cube":
* For the general N-dimensional case, this should be a single N-dimensional array with axis lengths
corresponding to the inputs in `x_data_coordinates`. In the 1-dimensional case, this naturally
reduces down to a single 1D ndarray.
See usage example along with `x_data_coordinates` above.
method: The method of interpolation to perform. Options:
* "bspline" (Note: differentiable and suitable for optimization - made of piecewise-cubics. For other
applications, other interpolators may be faster. Not monotonicity-preserving - may overshoot. Watch
out for Runge's phenomenon; on that note, if your data is noisy, consider smoothing it first.)
* "linear" (Note: differentiable, but not suitable for use in optimization w/o subgradient treatment due
to C1-discontinuity)
* "nearest" (Note: NOT differentiable, don't use in optimization. Fast.)
fill_value: Gives the value that the interpolator should return for points outside of the interpolation
domain. The interpolation domain is defined as the hypercube bounded by the coordinates specified in
`x_data_coordinates`. If fill_value is None, then the interpolator will attempt to extrapolate if the interpolation method allows.
"""
try:
x_data_coordinates_values = x_data_coordinates.values()
except AttributeError: # If x_data_coordinates is not a dict
x_data_coordinates_values = tuple([x_data_coordinates])
### Validate inputs
for coordinates in x_data_coordinates_values:
if len(coordinates.shape) != 1:
raise ValueError("""
`x_data_coordinates` must be either:
* In the general N-dimensional case, a dict where values are 1D ndarrays defining the coordinates of each axis.
* In the 1D case, can also be a 1D ndarray.
""")
implied_y_data_shape = tuple(len(coordinates) for coordinates in x_data_coordinates_values)
if not y_data_structured.shape == implied_y_data_shape:
raise ValueError(f"""
The shape of `y_data_structured` should be {implied_y_data_shape}
""")
### Store data
self.x_data_coordinates = x_data_coordinates
self.x_data_coordinates_values = x_data_coordinates_values
self.y_data_structured = y_data_structured
self.method = method
self.fill_value = fill_value
### Create unstructured versions of the data for plotting, etc.
x_data = x_data_coordinates
if isinstance(x_data, dict):
x_data_values = np.meshgrid(*x_data_coordinates_values, indexing="ij")
x_data = {
k: v.reshape(-1)
for k, v in zip(x_data_coordinates.keys(), x_data_values)
}
self.x_data = x_data
self.y_data = np.ravel(y_data_structured, order="F")
def __call__(self, x):
if isinstance(self.x_data_coordinates, dict):
def get_shape(value):
if np.is_casadi_type(value, recursive=False):
if value.shape[1] == 1:
return (np.length(value),)
try:
return value.shape
except AttributeError:
return tuple()
shape = np.broadcast_shapes(
*[get_shape(v) for v in x.values()]
)
shape_for_reshaping = (int(np.prod(shape)),)
def reshape(value):
try:
return np.reshape(value, shape_for_reshaping)
except ValueError:
if isinstance(value, int) or isinstance(value, float) or value.shape == tuple() or np.prod(
value.shape) == 1:
return value * np.ones(shape_for_reshaping)
raise ValueError("Could not reshape value of one of the inputs!")
x = np.stack(tuple(
reshape(x[k])
for k, v in self.x_data_coordinates.items()
), axis=1)
output = np.interpn(
points=self.x_data_coordinates_values,
values=self.y_data_structured,
xi=x,
method=self.method,
bounds_error=False, # Can't be set true if general MX-type inputs are to be expected.
fill_value=self.fill_value
)
try:
return np.reshape(output, shape)
except UnboundLocalError:
return output | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/interpolation.py | interpolation.py |
import aerosandbox.numpy as np
from typing import Union
def linear_hermite_patch(
x: Union[float, np.ndarray],
x_a: float,
x_b: float,
f_a: float,
f_b: float,
) -> Union[float, np.ndarray]:
"""
Computes the linear Hermite polynomial patch that passes through the given endpoints f_a and f_b.
Args:
x: Scalar or array of values at which to evaluate the patch.
x_a: The x-coordinate of the first endpoint.
x_b: The x-coordinate of the second endpoint.
f_a: The function value at the first endpoint.
f_b: The function value at the second endpoint.
Returns:
The value of the patch evaluated at the input x. Returns a scalar if x is a scalar, or an array if x is an array.
"""
return (x - x_a) * (f_b - f_a) / (x_b - x_a) + f_a
def cubic_hermite_patch(
x: Union[float, np.ndarray],
x_a: float,
x_b: float,
f_a: float,
f_b: float,
dfdx_a: float,
dfdx_b: float,
extrapolation: str = 'continue',
) -> Union[float, np.ndarray]:
"""
Computes the cubic Hermite polynomial patch that passes through the given endpoints and endpoint derivatives.
Args:
x: Scalar or array of values at which to evaluate the patch.
x_a: The x-coordinate of the first endpoint.
x_b: The x-coordinate of the second endpoint.
f_a: The function value at the first endpoint.
f_b: The function value at the second endpoint.
dfdx_a: The derivative of the function with respect to x at the first endpoint.
dfdx_b: The derivative of the function with respect to x at the second endpoint.
extrapolation: A string indicating how to handle extrapolation outside of the domain [x_a, x_b]. Valid values are
"continue", which continues the patch beyond the endpoints, and "clip", which clips the patch at the
endpoints. Default is "continue".
Returns:
The value of the patch evaluated at the input x. Returns a scalar if x is a scalar, or an array if x is an array.
"""
dx = x_b - x_a
t = (x - x_a) / dx # Nondimensional distance along the patch
if extrapolation == 'continue':
pass
elif extrapolation == 'clip':
t = np.clip(t, 0, 1)
else:
raise ValueError("Bad value of `extrapolation`!")
return (
(t ** 3) * (1 * f_b) +
(t ** 2 * (1 - t)) * (3 * f_b - 1 * dfdx_b * dx) +
(t * (1 - t) ** 2) * (3 * f_a + 1 * dfdx_a * dx) +
((1 - t) ** 3) * (1 * f_a)
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
x = np.linspace(-1, 2, 500)
plt.plot(
x,
cubic_hermite_patch(
x,
x_a=0,
x_b=1,
f_a=0,
f_b=1,
dfdx_a=-0.5,
dfdx_b=-1,
extrapolation='clip'
)
)
p.equal()
p.show_plot() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/splines/hermite.py | hermite.py |
import aerosandbox.numpy as np
from typing import Union, Tuple
def quadratic_bezier_patch_from_tangents(
t: Union[float, np.ndarray],
x_a: float,
x_b: float,
y_a: float,
y_b: float,
dydx_a: float,
dydx_b: float,
) -> Tuple[Union[float, np.ndarray], Union[float, np.ndarray]]:
"""
Computes sampled points in 2D space from a quadratic Bezier spline defined by endpoints and end-tangents.
Note: due to the inherent nature of a quadratic Bezier curve, curvature will be strictly one-sided - in other
words, this will not make "S"-shaped curves. This means that you should be aware that bad values of dydx at
either endpoint might cause this curvature to flip, which would result in the curve "going backwards" at one
endpoint.
Also, note that, in general, points will not be spaced evenly in x, y, or arc length s.
Args:
t:
x_a: The x-coordinate of the first endpoint.
x_b: The x-coordinate of the second endpoint.
y_a: The y-coordinate of the first endpoint.
y_b: The y-coordinate of the second endpoint.
dydx_a: The derivative of y with respect to x at the first endpoint.
dydx_b: The derivative of y with respect to x at the second endpoint.
Returns:
x: A scalar or numpy array of scalars representing the x-coordinates of the sampled points.
y: A scalar or numpy array of scalars representing the y-coordinates of the sampled points.
Usage:
>>> x_a, x_b = 0, 10
>>> y_a, y_b = 0, 5
>>> dydx_a, dydx_b = 0.5, -0.5
>>>
>>> t = np.linspace(0, 1, 50)
>>> x, y = quadratic_bezier_patch_from_tangents(
>>> t=t,
>>> x_a=x_a,
>>> x_b=x_b,
>>> y_a=y_a,
>>> y_b=y_b,
>>> dydx_a=dydx_a,
>>> dydx_b=dydx_b
>>> )
"""
### Compute intercept of tangent lines
x_P1 = (
(y_b - y_a) + (dydx_a * x_a - dydx_b * x_b)
) / (dydx_a - dydx_b)
y_P1 = y_a + dydx_a * (x_P1 - x_a)
x = (
(1 - t) ** 2 * x_a +
2 * (1 - t) * t * x_P1 +
t ** 2 * x_b
)
y = (
(1 - t) ** 2 * y_a +
2 * (1 - t) * t * y_P1 +
t ** 2 * y_b
)
return x, y
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
x, y = quadratic_bezier_patch_from_tangents(
t = np.linspace(0, 1, 11),
x_a=1,
x_b=4,
y_a=2,
y_b=3,
dydx_a=1,
dydx_b=-30
)
plt.plot(x, y, ".-")
p.show_plot() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/modeling/splines/bezier.py | bezier.py |
import aerosandbox.numpy as np
from aerosandbox.weights.mass_properties import MassProperties
"""
Most of these relations are taken from:
https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
def mass_properties_from_radius_of_gyration(
mass: float,
x_cg: float = 0,
y_cg: float = 0,
z_cg: float = 0,
radius_of_gyration_x: float = 0,
radius_of_gyration_y: float = 0,
radius_of_gyration_z: float = 0,
) -> MassProperties:
"""
Returns the mass properties of an object, given its radius of gyration.
It's assumed that the principle axes of the inertia tensor are aligned with the coordinate axes.
This is a shorthand convenience function for common usage of the MassProperties constructor. For more detailed
use, use the MassProperties object directly.
Args:
mass: Mass [kg]
x_cg: x-position of the center of gravity
y_cg: y-position of the center of gravity
z_cg: z-position of the center of gravity
radius_of_gyration_x: Radius of gyration along the x-axis, about the center of gravity [m]
radius_of_gyration_y: Radius of gyration along the y-axis, about the center of gravity [m]
radius_of_gyration_z: Radius of gyration along the z-axis, about the center of gravity [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=x_cg,
y_cg=y_cg,
z_cg=z_cg,
Ixx=mass * radius_of_gyration_x ** 2,
Iyy=mass * radius_of_gyration_y ** 2,
Izz=mass * radius_of_gyration_z ** 2,
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_ellipsoid(
mass: float,
radius_x: float,
radius_y: float,
radius_z: float,
) -> MassProperties:
"""
Returns the mass properties of an ellipsoid centered on the origin.
Args:
mass: Mass [kg]
radius_x: Radius along the x-axis [m]
radius_y: Radius along the y-axis [m]
radius_z: Radius along the z-axis [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=0,
y_cg=0,
z_cg=0,
Ixx=0.2 * mass * (radius_y ** 2 + radius_z ** 2),
Iyy=0.2 * mass * (radius_z ** 2 + radius_x ** 2),
Izz=0.2 * mass * (radius_x ** 2 + radius_y ** 2),
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_sphere(
mass: float,
radius: float,
) -> MassProperties:
"""
Returns the mass properties of a sphere centered on the origin.
Args:
mass: Mass [kg]
radius: Radius [m]
Returns: MassProperties object.
"""
return mass_properties_of_ellipsoid(
mass=mass,
radius_x=radius,
radius_y=radius,
radius_z=radius
)
def mass_properties_of_rectangular_prism(
mass: float,
length_x: float,
length_y: float,
length_z: float,
) -> MassProperties:
"""
Returns the mass properties of a rectangular prism centered on the origin.
Args:
mass: Mass [kg]
length_x: Side length along the x-axis [m]
length_y: Side length along the y-axis [m]
length_z: Side length along the z-axis [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=0,
y_cg=0,
z_cg=0,
Ixx=1 / 12 * mass * (length_y ** 2 + length_z ** 2),
Iyy=1 / 12 * mass * (length_z ** 2 + length_x ** 2),
Izz=1 / 12 * mass * (length_x ** 2 + length_y ** 2),
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_cube(
mass: float,
side_length: float,
) -> MassProperties:
"""
Returns the mass properties of a cube centered on the origin.
Args:
mass: Mass [kg]
side_length: Side length of the cube [m]
Returns: MassProperties object.
"""
return mass_properties_of_rectangular_prism(
mass=mass,
length_x=side_length,
length_y=side_length,
length_z=side_length,
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/weights/mass_properties_of_shapes.py | mass_properties_of_shapes.py |
import aerosandbox.numpy as np
from aerosandbox.common import AeroSandboxObject
from typing import Union, Any, List
from aerosandbox.tools.string_formatting import trim_string
class MassProperties(AeroSandboxObject):
"""
Mass properties of a rigid 3D object.
## Notes on Inertia Tensor Definition
This class uses the standard mathematical definition of the inertia tensor, which is different from the
alternative definition used by some CAD and CAE applications (such as SolidWorks, NX, etc.). These differ by a
sign flip in the products of inertia.
Specifically, we define the inertia tensor using the standard convention:
[ I11 I12 I13 ] [ Ixx Ixy Ixz ] [sum(m*(y^2+z^2)) -sum(m*x*y) -sum(m*x*z) ]
I = [ I21 I22 I23 ] = [ Ixy Iyy Iyz ] = [-sum(m*x*y) sum(m*(x^2+z^2)) -sum(m*y*z) ]
[ I31 I32 I33 ] [ Ixz Iyz Izz ] [-sum(m*x*z) -sum(m*y*z) sum(m*(x^2+y^2))]
Whereas SolidWorks, NX, etc. define the inertia tensor as:
[ I11 I12 I13 ] [ Ixx -Ixy -Ixz ] [sum(m*(y^2+z^2)) -sum(m*x*y) -sum(m*x*z) ]
I = [ I21 I22 I23 ] = [-Ixy Iyy -Iyz ] = [-sum(m*x*y) sum(m*(x^2+z^2)) -sum(m*y*z) ]
[ I31 I32 I33 ] [-Ixz -Iyz Izz ] [-sum(m*x*z) -sum(m*y*z) sum(m*(x^2+y^2))]
See also: https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor
"""
def __init__(self,
mass: Union[float, np.ndarray] = None,
x_cg: Union[float, np.ndarray] = 0.,
y_cg: Union[float, np.ndarray] = 0.,
z_cg: Union[float, np.ndarray] = 0.,
Ixx: Union[float, np.ndarray] = 0.,
Iyy: Union[float, np.ndarray] = 0.,
Izz: Union[float, np.ndarray] = 0.,
Ixy: Union[float, np.ndarray] = 0.,
Iyz: Union[float, np.ndarray] = 0.,
Ixz: Union[float, np.ndarray] = 0.,
):
"""
Initializes a new MassProperties object.
Axes can be given in any convenient axes system, as long as mass properties are not combined across different
axis systems. For aircraft design, the most common axis system is typically geometry axes (x-positive aft,
y-positive out the right wingtip, z-positive upwards).
Args:
mass: Mass of the component [kg]
x_cg: X-location of the center of gravity of the component [m]
y_cg: Y-location of the center of gravity of the component [m]
z_cg: Z-location of the center of gravity of the component [m]
Ixx: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is a point mass.
Iyy: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is a point mass.
Izz: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is a point mass.
Ixy: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is symmetric about z.
Iyz: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is symmetric about x.
Ixz: Respective component of the inertia tensor, as measured about the component's center of mass. 0 if
this is symmetric about y.
"""
if mass is None:
import warnings
warnings.warn(
"Defining a MassProperties object with zero mass. This can cause problems (divide-by-zero) in dynamics calculations, if this is not intended.\nTo silence this warning, please explicitly set `mass=0` in the MassProperties constructor.",
stacklevel=2
)
mass = 0
self.mass = mass
self.x_cg = x_cg
self.y_cg = y_cg
self.z_cg = z_cg
self.Ixx = Ixx
self.Iyy = Iyy
self.Izz = Izz
self.Ixy = Ixy
self.Iyz = Iyz
self.Ixz = Ixz
def __repr__(self) -> str:
def fmt(x: Union[float, Any], width=14) -> str:
if isinstance(x, (float, int)):
if x == 0:
x = "0"
else:
return f"{x:.8g}".rjust(width)
return trim_string(str(x).rjust(width), length=40)
return "\n".join([
"MassProperties instance:",
f" Mass : {fmt(self.mass)}",
f" Center of Gravity : ({fmt(self.x_cg)}, {fmt(self.y_cg)}, {fmt(self.z_cg)})",
f" Inertia Tensor : ",
f" (about CG) [{fmt(self.Ixx)}, {fmt(self.Ixy)}, {fmt(self.Ixz)}]",
f" [{fmt(self.Ixy)}, {fmt(self.Iyy)}, {fmt(self.Iyz)}]",
f" [{fmt(self.Ixz)}, {fmt(self.Iyz)}, {fmt(self.Izz)}]",
])
def __getitem__(self, index) -> "MassProperties":
def get_item_of_attribute(a):
if np.isscalar(a): # If NumPy says its a scalar, return it.
return a
try:
return a[index]
except TypeError: # object is not subscriptable
return a
except IndexError as e: # index out of range
raise IndexError("A state variable could not be indexed, since the index is out of range!")
inputs = {
"mass": self.mass,
"x_cg": self.x_cg,
"y_cg": self.y_cg,
"z_cg": self.z_cg,
"Ixx" : self.Ixx,
"Iyy" : self.Iyy,
"Izz" : self.Izz,
"Ixy" : self.Ixy,
"Iyz" : self.Iyz,
"Ixz" : self.Ixz,
}
return self.__class__(
**{
k: get_item_of_attribute(v)
for k, v in inputs.items()
}
)
def __neg__(self) -> "MassProperties":
return -1 * self
def __add__(self, other: "MassProperties") -> "MassProperties":
"""
Combines one MassProperties object with another.
"""
if not isinstance(other, MassProperties):
raise TypeError("MassProperties objects can only be added to other MassProperties objects.")
total_mass = self.mass + other.mass
total_x_cg = (self.mass * self.x_cg + other.mass * other.x_cg) / total_mass
total_y_cg = (self.mass * self.y_cg + other.mass * other.y_cg) / total_mass
total_z_cg = (self.mass * self.z_cg + other.mass * other.z_cg) / total_mass
self_inertia_tensor_elements = self.get_inertia_tensor_about_point(
x=total_x_cg,
y=total_y_cg,
z=total_z_cg,
return_tensor=False
)
other_inertia_tensor_elements = other.get_inertia_tensor_about_point(
x=total_x_cg,
y=total_y_cg,
z=total_z_cg,
return_tensor=False
)
total_inertia_tensor_elements = [
I__ + J__
for I__, J__ in zip(
self_inertia_tensor_elements,
other_inertia_tensor_elements
)
]
return MassProperties(
mass=total_mass,
x_cg=total_x_cg,
y_cg=total_y_cg,
z_cg=total_z_cg,
Ixx=total_inertia_tensor_elements[0],
Iyy=total_inertia_tensor_elements[1],
Izz=total_inertia_tensor_elements[2],
Ixy=total_inertia_tensor_elements[3],
Iyz=total_inertia_tensor_elements[4],
Ixz=total_inertia_tensor_elements[5],
)
def __radd__(self, other: "MassProperties") -> "MassProperties":
"""
Allows sum() to work with MassProperties objects.
Basically, makes addition commutative.
"""
if other == 0:
return self
else:
return self.__add__(other)
def __sub__(self, other: "MassProperties") -> "MassProperties":
"""
Subtracts one MassProperties object from another. (opposite of __add__() )
"""
return self.__add__(-other)
def __mul__(self, other: float) -> "MassProperties":
"""
Returns a new MassProperties object that is equivalent to if you had summed together N (with `other`
interpreted as N) identical MassProperties objects.
"""
return MassProperties(
mass=self.mass * other,
x_cg=self.x_cg,
y_cg=self.y_cg,
z_cg=self.z_cg,
Ixx=self.Ixx * other,
Iyy=self.Iyy * other,
Izz=self.Izz * other,
Ixy=self.Ixy * other,
Iyz=self.Iyz * other,
Ixz=self.Ixz * other,
)
def __rmul__(self, other: float) -> "MassProperties":
"""
Allows multiplication of a scalar by a MassProperties object. Makes multiplication commutative.
"""
return self.__mul__(other)
def __truediv__(self, other: float) -> "MassProperties":
"""
Returns a new MassProperties object that is equivalent to if you had divided the mass of the current
MassProperties object by a factor.
"""
return self.__mul__(1 / other)
def __eq__(self, other: "MassProperties") -> bool:
"""
Returns True if all expected attributes of the two MassProperties objects are exactly equal.
"""
if not isinstance(other, MassProperties):
raise TypeError("MassProperties objects can only be compared to other MassProperties objects.")
return all([
getattr(self, attribute) == getattr(other, attribute)
for attribute in [
"mass",
"x_cg",
"y_cg",
"z_cg",
"Ixx",
"Iyy",
"Izz",
"Ixy",
"Iyz",
"Ixz",
]
])
def __ne__(self, other: "MassProperties") -> bool:
return not self.__eq__(other)
def allclose(self,
other: "MassProperties",
rtol=1e-5,
atol=1e-8,
equal_nan=False
) -> bool:
return all([
np.allclose(
getattr(self, attribute),
getattr(other, attribute),
rtol=rtol,
atol=atol,
equal_nan=equal_nan
)
for attribute in [
"mass",
"x_cg",
"y_cg",
"z_cg",
"Ixx",
"Iyy",
"Izz",
"Ixy",
"Iyz",
"Ixz",
]
])
@property
def xyz_cg(self):
return self.x_cg, self.y_cg, self.z_cg
@property
def inertia_tensor(self):
# Returns the inertia tensor about the component's centroid.
return np.array(
[[self.Ixx, self.Ixy, self.Ixz],
[self.Ixy, self.Iyy, self.Iyz],
[self.Ixz, self.Iyz, self.Izz]]
)
def inv_inertia_tensor(self):
"""
Computes the inverse of the inertia tensor, in a slightly more efficient way than raw inversion by exploiting its known structure.
If you are effectively using this inertia tensor to solve a linear system, you should use a linear algebra
solve() method (ideally via Cholesky decomposition) instead, for best speed.
"""
iIxx, iIyy, iIzz, iIxy, iIyz, iIxz = np.linalg.inv_symmetric_3x3(
m11=self.Ixx,
m22=self.Iyy,
m33=self.Izz,
m12=self.Ixy,
m23=self.Iyz,
m13=self.Ixz,
)
return np.array(
[[iIxx, iIxy, iIxz],
[iIxy, iIyy, iIyz],
[iIxz, iIyz, iIzz]]
)
def get_inertia_tensor_about_point(self,
x: float = 0.,
y: float = 0.,
z: float = 0.,
return_tensor: bool = True,
):
"""
Returns the inertia tensor about an arbitrary point.
Using https://en.wikipedia.org/wiki/Parallel_axis_theorem#Tensor_generalization
Args:
x: x-position of the new point, in the same axes as this MassProperties instance is specified in.
y: y-position of the new point, in the same axes as this MassProperties instance is specified in.
z: z-position of the new point, in the same axes as this MassProperties instance is specified in.
return_tensor: A switch for the desired return type; see below for details. [boolean]
Returns:
If `return_tensor` is True:
Returns the new inertia tensor, as a 2D numpy ndarray.
If `return_tensor` is False:
Returns the components of the new inertia tensor, as a tuple.
If J is the new inertia tensor, the tuple returned is:
(Jxx, Jyy, Jzz, Jxy, Jyz, Jxz)
"""
R = [x - self.x_cg, y - self.y_cg, z - self.z_cg]
RdotR = np.dot(R, R, manual=True)
Jxx = self.Ixx + self.mass * (RdotR - R[0] ** 2)
Jyy = self.Iyy + self.mass * (RdotR - R[1] ** 2)
Jzz = self.Izz + self.mass * (RdotR - R[2] ** 2)
Jxy = self.Ixy - self.mass * R[0] * R[1]
Jyz = self.Iyz - self.mass * R[1] * R[2]
Jxz = self.Ixz - self.mass * R[2] * R[0]
if return_tensor:
return np.array([
[Jxx, Jxy, Jxz],
[Jxy, Jyy, Jyz],
[Jxz, Jyz, Jzz],
])
else:
return Jxx, Jyy, Jzz, Jxy, Jyz, Jxz
def is_physically_possible(self) -> bool:
"""
Checks whether it's possible for this MassProperties object to correspond to the mass properties of a real
physical object.
Assumes that all physically-possible objects have a positive mass (or density).
Some special edge cases:
- A MassProperties object with mass of 0 (i.e., null object) will return True. Note: this will return
True even if the inertia tensor is not zero (which would basically be infinitesimal point masses at
infinite distance).
- A MassProperties object that is a point mass (i.e., inertia tensor is all zeros) will return True.
Returns:
True if the MassProperties object is physically possible, False otherwise.
"""
### This checks the basics
impossible_conditions = [
self.mass < 0,
self.Ixx < 0,
self.Iyy < 0,
self.Izz < 0,
]
eigs = np.linalg.eig(self.inertia_tensor)[0]
# ## This checks that the inertia tensor is positive definite, which is a necessary but not sufficient
# condition for an inertia tensor to be physically possible.
impossible_conditions.extend([
eigs[0] < 0,
eigs[1] < 0,
eigs[2] < 0,
])
# ## This checks the triangle inequality, which is a necessary but not sufficient condition for an inertia
# tensor to be physically possible.
impossible_conditions.extend([
eigs[0] + eigs[1] < eigs[2],
eigs[0] + eigs[2] < eigs[1],
eigs[1] + eigs[2] < eigs[0],
])
return not any(impossible_conditions)
def is_point_mass(self) -> bool:
"""
Returns True if this MassProperties object corresponds to a point mass, False otherwise.
"""
return np.allclose(self.inertia_tensor, 0)
def generate_possible_set_of_point_masses(self,
method="optimization",
check_if_already_a_point_mass: bool = True,
) -> List["MassProperties"]:
"""
Generates a set of point masses (represented as MassProperties objects with zero inertia tensors), that, when
combined, would yield this MassProperties object.
Note that there are an infinite number of possible sets of point masses that could yield this MassProperties
object. This method returns one possible set of point masses, but there are many others.
Example:
>>> mp = MassProperties(mass=1, Ixx=1, Iyy=1, Izz=1, Ixy=0.1, Iyz=-0.1, Ixz=0.1)
>>> point_masses = mp.generate_possible_set_of_point_masses()
>>> mp.allclose(sum(point_masses)) # Asserts these are equal, within tolerance
True
Args:
method: The method to use to generate the set of point masses. Currently, only "optimization" is supported.
Returns:
A list of MassProperties objects, each of which is a point mass (i.e., zero inertia tensor).
"""
if check_if_already_a_point_mass:
if self.is_point_mass():
return [self]
if method == "optimization":
from aerosandbox.optimization import Opti
opti = Opti()
approximate_radius = (self.Ixx + self.Iyy + self.Izz) ** 0.5 / self.mass + 1e-16
point_masses = [
MassProperties(
mass=self.mass / 4,
x_cg=opti.variable(init_guess=self.x_cg - approximate_radius, scale=approximate_radius),
y_cg=opti.variable(init_guess=self.y_cg, scale=approximate_radius),
z_cg=opti.variable(init_guess=self.z_cg, scale=approximate_radius),
),
MassProperties(
mass=self.mass / 4,
x_cg=opti.variable(init_guess=self.x_cg, scale=approximate_radius),
y_cg=opti.variable(init_guess=self.y_cg, scale=approximate_radius),
z_cg=opti.variable(init_guess=self.z_cg + approximate_radius, scale=approximate_radius),
),
MassProperties(
mass=self.mass / 4,
x_cg=opti.variable(init_guess=self.x_cg, scale=approximate_radius),
y_cg=opti.variable(init_guess=self.y_cg, scale=approximate_radius),
z_cg=opti.variable(init_guess=self.z_cg - approximate_radius, scale=approximate_radius),
),
MassProperties(
mass=self.mass / 4,
x_cg=opti.variable(init_guess=self.x_cg, scale=approximate_radius),
y_cg=opti.variable(init_guess=self.y_cg + approximate_radius, scale=approximate_radius),
z_cg=opti.variable(init_guess=self.z_cg, scale=approximate_radius),
),
]
mass_props_reconstructed = sum(point_masses)
# Add constraints
opti.subject_to(mass_props_reconstructed.x_cg == self.x_cg)
opti.subject_to(mass_props_reconstructed.y_cg == self.y_cg)
opti.subject_to(mass_props_reconstructed.z_cg == self.z_cg)
opti.subject_to(mass_props_reconstructed.Ixx == self.Ixx)
opti.subject_to(mass_props_reconstructed.Iyy == self.Iyy)
opti.subject_to(mass_props_reconstructed.Izz == self.Izz)
opti.subject_to(mass_props_reconstructed.Ixy == self.Ixy)
opti.subject_to(mass_props_reconstructed.Iyz == self.Iyz)
opti.subject_to(mass_props_reconstructed.Ixz == self.Ixz)
opti.subject_to(point_masses[0].y_cg == self.y_cg)
opti.subject_to(point_masses[0].z_cg == self.z_cg)
opti.subject_to(point_masses[1].y_cg == self.y_cg)
opti.subject_to(point_masses[0].x_cg < point_masses[1].x_cg)
return opti.solve(verbose=False)(point_masses)
elif method == "barbell":
raise NotImplementedError("Barbell method not yet implemented!")
principle_inertias, principle_axes = np.linalg.eig(self.inertia_tensor)
else:
raise ValueError("Bad value of `method` argument!")
def export_AVL_mass_file(self,
filename,
) -> None:
"""
Exports this MassProperties object to an AVL mass file.
Note: AVL uses the SolidWorks convention for inertia tensors, which is different from the typical
mathematical convention, and the convention used by this MassProperties class. In short, these differ by a
sign flip in the products of inertia. More details available in the MassProperties docstring. See also:
https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor
Args:
filename: The filename to export to.
Returns: None
"""
lines = [
"Lunit = 1.0 m",
"Munit = 1.0 kg",
"Tunit = 1.0 s",
"",
"g = 9.81",
"rho = 1.225",
"",
]
def fmt(x: float) -> str:
return f"{x:.16g}".ljust(21)
lines.extend([
" ".join([
s.ljust(21) for s in [
"# mass",
"x_cg",
"y_cg",
"z_cg",
"Ixx",
"Iyy",
"Izz",
"Ixy",
"Ixz",
"Iyz",
]
]),
" ".join([
fmt(x) for x in [
self.mass,
self.x_cg,
self.y_cg,
self.z_cg,
self.Ixx,
self.Iyy,
self.Izz,
-self.Ixy,
-self.Ixz,
-self.Iyz,
]
])
])
with open(filename, "w+") as f:
f.write("\n".join(lines))
if __name__ == '__main__':
mp1 = MassProperties(
mass=1
)
mp2 = MassProperties(
mass=1,
x_cg=1
)
mps = mp1 + mp2
assert mps.x_cg == 0.5
assert mp1 + mp2 - mp2 == mp1
r = lambda: np.random.randn()
valid = False
while not valid:
mass_props = MassProperties(
mass=r(),
x_cg=r(), y_cg=r(), z_cg=r(),
Ixx=r(), Iyy=r(), Izz=r(),
Ixy=r(), Iyz=r(), Ixz=r(),
)
valid = mass_props.is_physically_possible() # adds a bunch of checks | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/weights/mass_properties.py | mass_properties.py |
import aerosandbox.numpy as np
from aerosandbox.atmosphere.atmosphere import Atmosphere
from typing import Union
"""
Welcome to the AeroSandbox solar energy library!
The function you're probably looking for is `solar_flux()`, which summarizes this entire module and computes the
realized solar flux on a given surface as a function of many different parameters.
"""
def solar_flux_outside_atmosphere_normal(
day_of_year: Union[int, float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Computes the normal solar flux at the top of the atmosphere ("Airmass 0").
This varies due to Earth's orbital eccentricity (elliptical orbit).
Source: https://www.itacanet.org/the-sun-as-a-source-of-energy/part-2-solar-energy-reaching-the-earths-surface/#2.1.-The-Solar-Constant
Args:
day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
Returns: The normal solar flux [W/m^2] at the top of the atmosphere.
"""
return 1367 * (
1 + 0.034 * np.cosd(360 * (day_of_year) / 365.25)
)
def declination_angle(
day_of_year: Union[int, float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Computes the solar declination angle, in degrees, as a function of day of year.
Accounts for the Earth's obliquity.
Source: https://www.pveducation.org/pvcdrom/properties-of-sunlight/declination-angle
Args:
day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
Returns: Solar declination angle [deg]
"""
return -23.4398 * np.cosd(360 / 365.25 * (day_of_year + 10))
def solar_elevation_angle(
latitude: Union[float, np.ndarray],
day_of_year: Union[int, float, np.ndarray],
time: Union[float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Elevation angle of the sun [degrees] for a local observer.
Solar elevation angle is the angle between the Sun's position and the local horizon plane.
(Solar elevation angle) = 90 deg - (solar zenith angle)
Source: https://www.pveducation.org/pvcdrom/properties-of-sunlight/elevation-angle
Args:
latitude: Local geographic latitude [degrees]. Positive for north, negative for south.
day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
time: Time after local solar noon [seconds]
Returns: Solar elevation angle [degrees] (angle between horizon and sun). Returns negative values if the sun is
below the horizon.
"""
declination = declination_angle(day_of_year)
solar_elevation_angle = np.arcsind(
np.sind(declination) * np.sind(latitude) +
np.cosd(declination) * np.cosd(latitude) * np.cosd(time / 86400 * 360)
) # in degrees
return solar_elevation_angle
def solar_azimuth_angle(
latitude: Union[float, np.ndarray],
day_of_year: Union[int, float, np.ndarray],
time: Union[float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Azimuth angle of the sun [degrees] for a local observer.
Source: https://www.pveducation.org/pvcdrom/properties-of-sunlight/azimuth-angle
Args:
latitude: Local geographic latitude [degrees]. Positive for north, negative for south.
day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
time: Time after local solar noon [seconds]
Returns: Solar azimuth angle [degrees] (the compass direction from which the sunlight is coming).
* 0 corresponds to North, 90 corresponds to East.
* Output ranges from 0 to 360 degrees.
"""
declination = declination_angle(day_of_year)
sdec = np.sind(declination)
cdec = np.cosd(declination)
slat = np.sind(latitude)
clat = np.cosd(latitude)
ctime = np.cosd(time / 86400 * 360)
elevation = solar_elevation_angle(latitude, day_of_year, time)
cele = np.cosd(elevation)
cos_azimuth = (sdec * clat - cdec * slat * ctime) / cele
cos_azimuth = np.clip(cos_azimuth, -1, 1)
azimuth_raw = np.arccosd(cos_azimuth)
is_solar_morning = np.mod(time, 86400) > 43200
solar_azimuth_angle = np.where(
is_solar_morning,
azimuth_raw,
360 - azimuth_raw
)
return solar_azimuth_angle
def airmass(
solar_elevation_angle: Union[float, np.ndarray],
altitude: Union[float, np.ndarray] = 0.,
method='Young'
) -> Union[float, np.ndarray]:
"""
Computes the (relative) airmass as a function of the (true) solar elevation angle and observer altitude.
Includes refractive (e.g. curving) effects due to atmospheric density gradient.
Airmass is the line integral of air density along an upwards-pointed ray, extended to infinity. As a raw
calculation of "absolute airmass", this would have units of kg/m^2. It varies primarily as a function of solar
elevation angle and observer altitude. (Higher altitude means less optical absorption.) However,
"airmass" usually refers to the "relative airmass", which is the absolute airmass of a given scenario divided by
the absolute airmass of a reference scenario. This reference scenario is when the sun is directly overhead (solar
elevation angle of 90 degrees) and the observer is at sea level.
Therefore:
* Outer space has a (relative) airmass of 0 (regardless of solar elevation angle).
* Sea level with the sun directly overhead has a (relative) airmass of 1.
* Sea level with the sun at the horizon has a (relative) airmass of ~31.7. (Not infinity, since the Earth is
spherical, so the ray eventually reaches outer space.) Some models will say that this relative airmass at the
horizon should be ~38; that is only true if one uses the *apparent* solar elevation angle, rather than the
*true* (geometric) one. The discrepancy comes from the fact that light refracts (curves) as it passes through
the atmosphere's density gradient, with the difference between true and apparent elevation angles reaching a
maximum of 0.56 degrees at the horizon.
Solar elevation angle is the angle between the Sun's position and the horizon.
(Solar elevation angle) = 90 deg - (solar zenith angle)
Note that for small negative values of the solar elevation angle (e.g., -0.5 degree), airmass remains finite,
due to ray refraction (curving) through the atmosphere.
For significantly negative values of the solar elevation angle (e.g., -10 degrees), the airmass is theoretically
infinite. This function returns 1e100 in lieu of this here.
Sources:
Young model: Young, A. T. 1994. Air mass and refraction. Applied Optics. 33:1108–1110. doi:
10.1364/AO.33.001108. Reproduced at https://en.wikipedia.org/wiki/Air_mass_(astronomy)
Args:
solar_elevation_angle: Solar elevation angle [degrees] (angle between horizon and sun). Note that we use the
true solar elevation angle, rather than the apparent one. The discrepancy comes from the fact that light
refracts (curves) as it passes through the atmosphere's density gradient, with the difference between true
and apparent elevation angles reaching a maximum of 0.56 degrees at the horizon.
altitude: Altitude of the observer [meters] above sea level.
method: A string that determines which model to use.
Returns: The relative airmass [unitless] as a function of the (true) solar elevation angle and observer altitude.
* Always ranges from 0 to Infinity
"""
true_zenith_angle = 90 - solar_elevation_angle
if method == 'Young':
cos_zt = np.cosd(true_zenith_angle)
cos2_zt = cos_zt ** 2
cos3_zt = cos_zt ** 3
numerator = 1.002432 * cos2_zt + 0.148386 * cos_zt + 0.0096467
denominator = cos3_zt + 0.149864 * cos2_zt + 0.0102963 * cos_zt + 0.000303978
sea_level_airmass = np.where(
denominator > 0,
numerator / denominator,
1e100 # Essentially, infinity.
)
else:
raise ValueError("Bad value of `method`!")
airmass_at_altitude = sea_level_airmass * (
Atmosphere(altitude=altitude).pressure() /
101325.
)
return airmass_at_altitude
def solar_flux(
latitude: Union[float, np.ndarray],
day_of_year: Union[int, float, np.ndarray],
time: Union[float, np.ndarray],
altitude: Union[float, np.ndarray] = 0.,
panel_azimuth_angle: Union[float, np.ndarray] = 0.,
panel_tilt_angle: Union[float, np.ndarray] = 0.,
air_quality: str = 'typical',
albedo: Union[float, np.ndarray] = 0.2,
**deprecated_kwargs
) -> Union[float, np.ndarray]:
"""
Computes the solar power flux (power per unit area) on a flat (possibly tilted) panel. Accounts for atmospheric
absorption, scattering, and re-scattering (e.g. diffuse illumination), all as a function of panel altitude.
Fully vectorizable.
Source for atmospheric absorption:
* Planning and installing photovoltaic systems: a guide for installers, architects and engineers,
2nd Ed. (2008), Table 1.1, Earthscan with the International Institute for Environment and Development,
Deutsche Gesellschaft für Sonnenenergie. ISBN 1-84407-442-0., accessed via
https://en.wikipedia.org/wiki/Air_mass_(solar_energy)
Args:
latitude: Local geographic latitude [degrees]. Positive for north, negative for south.
day_of_year: The day of the year, represented in the Julian day format (i.e., 1 == Jan. 1, 365 == Dec. 31). This
accounts for seasonal variations in the sun's position in the sky.
time: The time of day, measured as the time elapsed after local solar noon [seconds]. Should range from 0 to
86,400 (the number of seconds in a day). Local solar noon is the time of day when the sun is at its highest
point in the sky, directly above the observer's local meridian. This is the time when the sun's rays are most
directly overhead and solar flux is at its peak for a given location. Solar noon does not necessarily occur
at exactly 12:00 PM local standard time, as it depends on your longitude, the equation of time, and the time
of year. (Also, don't forget to account for Daylight Savings Time, if that's a relevant consideration for
your location and season.) Typically, local solar noon is +- 15 minutes from 12:00 PM local standard time.
altitude: Altitude of the panel above sea level [meters]. This affects atmospheric absorption and scattering
characteristics.
panel_azimuth_angle: The azimuth angle of the panel normal [degrees] (the compass direction in which the
panel normal is tilted). Irrelevant if the panel tilt angle is 0 (e.g., the panel is horizontal).
* 0 corresponds to North, 90 corresponds to East.
* Input ranges from 0 to 360 degrees.
panel_tilt_angle: The angle between the panel normal and vertical (zenith) [degrees].
* Note: this angle convention is different than the solar elevation angle convention!
* A horizontal panel has a tilt angle of 0, and a vertical panel has a tilt angle of 90 degrees.
If the angle between the panel normal and the sun direction is ever more than 90 degrees (e.g. the panel
is pointed the wrong way), we assume that the panel receives no direct irradiation. (However,
it may still receive minor amounts of power due to diffuse irradiation from re-scattering.)
air_quality: Indicates the amount of pollution in the air. A string, one of:
* 'clean': Pristine atmosphere conditions.
* 'typical': Corresponds to "rural aerosol loading" following ASTM G-173.
* 'polluted': Urban atmosphere conditions.
Note: in very weird edge cases, a polluted atmosphere can actually result in slightly higher solar flux
than clean air, due to increased back-scattering. For example, imagine it's near sunset, with the sun in
the west, and your panel normal vector points east. Increased pollution can, in some edge cases,
result in enough increased back-scattering (multipathing) that you have a smidge more illumination.
albedo: The fraction of light that hits the ground that is reflected. Affects illumination from re-scattering
when panels are tilted. Typical values for general terrestrial surfaces are 0.2, which is the default here.
* Other values, taken from the Earthscan source (citation above):
* Grass (July, August): 0.25
* Lawn: 0.18 - 0.23
* Dry grass: 0.28 - 0.32
* Milled fields: 0.26
* Barren soil: 0.17
* Gravel: 0.18
* Clean concrete: 0.30
* Eroded concrete: 0.20
* Clean cement: 0.55
* Asphalt: 0.15
* Forests: 0.05 - 0.18
* Sandy areas: 0.10 - 0.25
* Water: Strongly dependent on incidence angle; 0.05 - 0.22
* Fresh snow: 0.80 - 0.90
* Old snow: 0.45 - 0.70
Returns: The solar power flux [W/m^2] on the panel.
* Note: does not account for any potential reflectivity of the solar panel's coating itself.
"""
flux_outside_atmosphere = solar_flux_outside_atmosphere_normal(day_of_year=day_of_year)
solar_elevation = solar_elevation_angle(latitude, day_of_year, time)
solar_azimuth = solar_azimuth_angle(latitude, day_of_year, time)
relative_airmass = airmass(
solar_elevation_angle=solar_elevation,
altitude=altitude,
)
# Source: "Planning and installing..." Earthscan. Full citation in docstring above.
if air_quality == 'typical':
atmospheric_transmission_fraction = 0.70 ** (relative_airmass ** 0.678)
elif air_quality == 'clean':
atmospheric_transmission_fraction = 0.76 ** (relative_airmass ** 0.618)
elif air_quality == 'polluted':
atmospheric_transmission_fraction = 0.56 ** (relative_airmass ** 0.715)
else:
raise ValueError("Bad value of `air_quality`!")
direct_normal_irradiance = np.where(
solar_elevation > 0.,
flux_outside_atmosphere * atmospheric_transmission_fraction,
0.
)
absorption_and_scattering_losses = flux_outside_atmosphere - flux_outside_atmosphere * atmospheric_transmission_fraction
scattering_losses = absorption_and_scattering_losses * (10. / 28.)
# Source: https://www.pveducation.org/pvcdrom/properties-of-sunlight/air-mass
# Indicates that absorption and scattering happen in a 18:10 ratio, at least in AM1.5 conditions. We extrapolate
# this to all conditions.
panel_tilt_angle = np.mod(panel_tilt_angle, 360)
fraction_of_panel_facing_sky = np.where(
panel_tilt_angle < 180,
1 - panel_tilt_angle / 180,
-1 + panel_tilt_angle / 180,
)
diffuse_irradiance = scattering_losses * atmospheric_transmission_fraction * (
fraction_of_panel_facing_sky + albedo * (1 - fraction_of_panel_facing_sky)
)
# We assume that the in-scattering (i.e., diffuse irradiance) and the out-scattering (i.e., scattering losses in
# the direct irradiance calculation) are equal, by argument of approximately parallel incident rays.
# We further assume that any in-scattering must then once-again go through the absorption / re-scattering process,
# which is identical to the original atmospheric transmission fraction.
cosine_of_angle_between_panel_normal_and_sun = (
np.cosd(solar_elevation) *
np.sind(panel_tilt_angle) *
np.cosd(panel_azimuth_angle - solar_azimuth)
+ np.sind(solar_elevation) * np.cosd(panel_tilt_angle)
)
cosine_of_angle_between_panel_normal_and_sun = np.fmax(
cosine_of_angle_between_panel_normal_and_sun,
0
) # Accounts for if you have a downwards-pointing panel while the sun is above you.
# Source: https://www.pveducation.org/pvcdrom/properties-of-sunlight/arbitrary-orientation-and-tilt
# Author of this code (Peter Sharpe) has manually verified correctness of this vector math.
flux_on_panel = (
direct_normal_irradiance * cosine_of_angle_between_panel_normal_and_sun
+ diffuse_irradiance
)
return flux_on_panel
def peak_sun_hours_per_day_on_horizontal(
latitude: Union[float, np.ndarray],
day_of_year: Union[int, float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
How many hours of equivalent peak sun do you get per day?
:param latitude: Latitude [degrees]
:param day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
:param time: Time since (local) solar noon [seconds]
:return:
"""
import warnings
warnings.warn(
"Use `solar_flux()` function from this module instead and integrate, which allows far greater granularity.",
DeprecationWarning
)
time = np.linspace(0, 86400, 1000)
fluxes = solar_flux(latitude, day_of_year, time)
energy_per_area = np.sum(np.trapz(fluxes) * np.diff(time))
duration_of_equivalent_peak_sun = energy_per_area / solar_flux(latitude, day_of_year, time=0.)
sun_hours = duration_of_equivalent_peak_sun / 3600
return sun_hours
def length_day(
latitude: Union[float, np.ndarray],
day_of_year: Union[int, float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Gives the duration where the sun is above the horizon on a given day.
Args:
latitude: Local geographic latitude [degrees]. Positive for north, negative for south.
day_of_year: Julian day (1 == Jan. 1, 365 == Dec. 31)
Returns: The duration where the sun is above the horizon on a given day. [seconds]
"""
dec = declination_angle(day_of_year)
constant = -np.sind(dec) * np.sind(latitude) / (np.cosd(dec) * np.cosd(latitude))
constant = np.clip(constant, -1, 1)
sun_time_nondim = 2 * np.arccos(constant)
sun_time = sun_time_nondim / (2 * np.pi) * 86400
return sun_time
def mass_MPPT(
power: Union[float, np.ndarray]
) -> Union[float, np.ndarray]:
"""
Gives the estimated mass of a Maximum Power Point Tracking (MPPT) unit for solar energy
collection. Based on regressions at AeroSandbox/studies/SolarMPPTMasses.
Args:
power: Power of MPPT [watts]
Returns:
Estimated MPPT mass [kg]
"""
constant = 0.066343
exponent = 0.515140
return constant * power ** exponent
if __name__ == "__main__":
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
# plt.switch_backend('WebAgg')
base_color = p.palettes['categorical'][0]
quality_colors = {
'clean' : p.adjust_lightness(base_color, amount=1.2),
'typical' : p.adjust_lightness(base_color, amount=0.7),
'polluted': p.adjust_lightness(base_color, amount=0.2),
}
##### Plot solar_flux() over the course of a day
time = np.linspace(0, 86400, 86401)
hour = time / 3600
base_kwargs = dict(
latitude=23.5,
day_of_year=172,
time=time,
)
fig, ax = plt.subplots(2, 1, figsize=(7, 6.5))
plt.sca(ax[0])
plt.title(f"Solar Flux on a Horizontal Surface Over A Day\n(Tropic of Cancer, Summer Solstice, Sea Level)")
for q in quality_colors.keys():
plt.plot(
hour,
solar_flux(
**base_kwargs,
air_quality=q
),
color=quality_colors[q],
label=f'ASB Model: {q.capitalize()} air'
)
plt.sca(ax[1])
plt.title(f"Solar Flux on a Sun-Tracking Surface Over A Day\n(Tropic of Cancer, Summer Solstice, Sea Level)")
for q in quality_colors.keys():
plt.plot(
hour,
solar_flux(
**base_kwargs,
panel_tilt_angle=90 - solar_elevation_angle(**base_kwargs),
panel_azimuth_angle=solar_azimuth_angle(**base_kwargs),
air_quality=q
),
color=quality_colors[q],
label=f'ASB Model: {q.capitalize()} air'
)
for a in ax:
plt.sca(a)
plt.xlabel("Time after Local Solar Noon [hours]")
plt.ylabel("Solar Flux [$W/m^2$]")
plt.xlim(0, 24)
plt.ylim(-10, 1200)
p.set_ticks(3, 0.5, 200, 50)
plt.sca(ax[0])
p.show_plot()
##### Plot solar_flux() as a function of elevation angle, and compare to data to validate.
# Source: Ed. (2008), Table 1.1, Earthscan with the International Institute for Environment and Development, Deutsche Gesellschaft für Sonnenenergie. ISBN 1-84407-442-0.
# Via: https://en.wikipedia.org/wiki/Air_mass_(solar_energy)#Solar_intensity
# Values here give lower and upper bounds for measured solar flux on a typical clear day, varying primarily due
# to pollution.
raw_data = """\
z [deg],AM [-],Solar Flux Lower Bound [W/m^2],Solar Flux Upper Bound [W/m^2]
0,1,840,1130
23,1.09,800,1110
30,1.15,780,1100
45,1.41,710,1060
48.2,1.5,680,1050
60,2,560,970
70,2.9,430,880
75,3.8,330,800
80,5.6,200,660
85,10,85,480
90,38,6,34
"""
import pandas as pd
from io import StringIO
delimiter = "\t"
df = pd.read_csv(
StringIO(raw_data),
delimiter=','
)
df["Solar Flux [W/m^2]"] = (df['Solar Flux Lower Bound [W/m^2]'] + df['Solar Flux Upper Bound [W/m^2]']) / 2
fluxes = solar_flux(
**base_kwargs,
panel_tilt_angle=90 - solar_elevation_angle(**base_kwargs),
panel_azimuth_angle=solar_azimuth_angle(**base_kwargs),
)
elevations = solar_elevation_angle(
**base_kwargs
)
fig, ax = plt.subplots()
for q in quality_colors.keys():
plt.plot(
solar_elevation_angle(**base_kwargs),
solar_flux(
**base_kwargs,
panel_tilt_angle=90 - solar_elevation_angle(**base_kwargs),
panel_azimuth_angle=solar_azimuth_angle(**base_kwargs),
air_quality=q
),
color=quality_colors[q],
label=f'ASB Model: {q.capitalize()} air',
zorder=3
)
data_color = p.palettes['categorical'][1]
plt.fill_between(
x=90 - df['z [deg]'].values,
y1=df['Solar Flux Lower Bound [W/m^2]'],
y2=df['Solar Flux Upper Bound [W/m^2]'],
color=data_color,
alpha=0.4,
label='Experimental Data Range\n(due to Pollution)',
zorder=2.9,
)
for d in ['Lower', 'Upper']:
plt.plot(
90 - df['z [deg]'].values,
df[f'Solar Flux {d} Bound [W/m^2]'],
".",
color=data_color,
alpha=0.7,
zorder=2.95
)
plt.annotate(
text='Data: "Planning and Installing Photovoltaic Systems".\nEarthscan (2008), ISBN 1-84407-442-0.',
xy=(0.02, 0.98),
xycoords="axes fraction",
ha="left",
va='top',
fontsize=9
)
plt.xlim(-5, 90)
p.set_ticks(15, 5, 200, 50)
p.show_plot(
f"Sun Position vs. Solar Flux on a Sun-Tracking Surface",
f"Solar Elevation Angle [deg]",
"Solar Flux [$W/m^2$]"
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/power_solar.py | power_solar.py |
import aerosandbox.numpy as np
# import firefly_propulsion.propellant.oxamide_model as oxm
# from proptools import nozzle as ptn
# zeta_c_star = oxm.zeta_c_star
# chamber_pressure_max = oxm.p_c_max
# n = oxm.n
# Burn rate exponent [units: dimensionless]
# Value of 0.402 based on strand burner fit as of 2020-02-21
n = 0.402
# Oxamide parameter [units: dimensionless].
# Theoretically, lambda = 13.3 for a 80% AP + 20% HTPB propellant,
# which we have used as the basis for the no-metal propellant family
# since spring 2018.
# Previously, (e.g. in Matt's MS Thesis) we used a metalized propellant
# which had a lower theoretical value for lambda (~7).
# lamb = 13.3
# Value of 6.20 based on strand burner fit as of 2020-02-21
lamb = 6.20
# Burn rate coefficient at zero oxamide content
# [units: pascal**(-n) meter second**-1].
# This is for propellant in the motor.
# Based on strand burner fit for 400 um blend AP as of 2020-02-21
a_0 = 3.43 * 1.15 * (1e6) ** (-n) * 1e-3
# Strand burner burn rate reduction factor
strand_reduction_factor = 1 / 1.15
# Combustion efficiency [units: dimensionless].
zeta_c_star = 0.90
# Maximum chamber pressure the burn rate model is fit to [units: pascal].
chamber_pressure_max = 2.0e6
# Valid range of oxamide mass fraction values for the model.
# [units: dimensionless]
W_OM_VALID_RANGE = (0, 0.22)
OUT_OF_RANGE_ERROR_STRING = (
'{:.3f} is outside the model valid range of {:.3f} <= w_om <= {:.3f}')
def burn_rate_coefficient(oxamide_fraction):
"""Burn rate vs oxamide content model.
Valid from 0% to 15% oxamide. # TODO IMPLEMENT THIS
Returns:
a: propellant burn rate coefficient
[units: pascal**(-n) meter second**-1].
"""
oxamide_fraction = np.fmax(oxamide_fraction, 0)
return a_0 * (1 - oxamide_fraction) / (1 + lamb * oxamide_fraction)
def c_star(oxamide_fraction):
"""Characteristic velocity vs. oxamide content model.
Valid from 0% to 20% oxamide. # TODO IMPLEMENT THIS
Returns:
c_star: ideal characteristic velocity [units: meter second**-1].
"""
# oxamide_fraction = cas.fmax(oxamide_fraction, 0)
coefs = [1380.2, -983.3, -697.1]
return coefs[0] + coefs[1] * oxamide_fraction + coefs[2] * oxamide_fraction ** 2
def dubious_min_combustion_pressure(oxamide_fraction):
"""Minimum pressure for stable combustion vs. oxamide content model.
Note: this model is of DUBIOUS accuracy. Don't trust it.
"""
coefs = [7.73179444e+00, 3.60886970e-01, 7.64587936e-03]
p_min_MPa = coefs[0] * oxamide_fraction ** 2 + coefs[1] * oxamide_fraction + coefs[2]
p_min = 1e6 * p_min_MPa
return p_min # Pa
def gamma(oxamide_fraction):
"""Ratio of specific heats vs. oxamide content model.
Returns:
gamma: Exhaust gas ratio of specific heats [units: dimensionless].
"""
# oxamide_fraction = cas.fmax(oxamide_fraction, 0)
coefs = [1.238, 0.216, -0.351]
return coefs[0] + coefs[1] * oxamide_fraction + coefs[2] * oxamide_fraction ** 2
def expansion_ratio_from_pressure(chamber_pressure, exit_pressure, gamma, oxamide_fraction):
"""Find the nozzle expansion ratio from the chamber and exit pressures.
See :ref:`expansion-ratio-tutorial-label` for a physical description of the
expansion ratio.
Reference: Rocket Propulsion Elements, 8th Edition, Equation 3-25
Arguments:
chamber_pressure (scalar): Nozzle stagnation chamber pressure [units: pascal].
exit_pressure (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
Returns:
scalar: Expansion ratio :math:`\\epsilon = A_e / A_t` [units: dimensionless]
"""
chamber_pressure = np.fmax(chamber_pressure, dubious_min_combustion_pressure(oxamide_fraction))
chamber_pressure = np.fmax(chamber_pressure, exit_pressure * 1.5)
AtAe = ((gamma + 1) / 2) ** (1 / (gamma - 1)) \
* (exit_pressure / chamber_pressure) ** (1 / gamma) \
* np.sqrt((gamma + 1) / (gamma - 1) * (1 - (exit_pressure / chamber_pressure) ** ((gamma - 1) / gamma)))
er = 1 / AtAe
return er
def thrust_coefficient(chamber_pressure, exit_pressure, gamma, p_a=None, er=None):
"""Nozzle thrust coefficient, :math:`C_F`.
The thrust coefficient is a figure of merit for the nozzle expansion process.
See :ref:`thrust-coefficient-label` for a description of the physical meaning of the
thrust coefficient.
Reference: Equation 1-33a in Huzel and Huang.
Arguments:
chamber_pressure (scalar): Nozzle stagnation chamber pressure [units: pascal].
exit_pressure (scalar): Nozzle exit static pressure [units: pascal].
gamma (scalar): Exhaust gas ratio of specific heats [units: dimensionless].
p_a (scalar, optional): Ambient pressure [units: pascal]. If None,
then p_a = exit_pressure.
er (scalar, optional): Nozzle area expansion ratio [units: dimensionless]. If None,
then p_a = exit_pressure.
Returns:
scalar: The thrust coefficient, :math:`C_F` [units: dimensionless].
"""
# if (p_a is None and er is not None) or (er is None and p_a is not None):
# raise ValueError('Both p_a and er must be provided.')
C_F = (2 * gamma ** 2 / (gamma - 1)
* (2 / (gamma + 1)) ** ((gamma + 1) / (gamma - 1))
* (1 - (exit_pressure / chamber_pressure) ** ((gamma - 1) / gamma))
) ** 0.5
# if p_a is not None and er is not None:
C_F += er * (exit_pressure - p_a) / chamber_pressure
return C_F
if __name__ == "__main__":
import plotly.express as px
import pandas as pd
# Oxamide Function tests
oxamides = np.linspace(-0.3, 0.5, 200)
burn_rate_coefficients = burn_rate_coefficient(oxamides)
c_stars = c_star(oxamides)
min_combustion_pressures = dubious_min_combustion_pressure(oxamides)
gammas = gamma(oxamides)
px.scatter(x=oxamides, y=burn_rate_coefficients, labels={"x": "Oxamide", "y": "Burn Rate Coeff"}).show()
px.scatter(x=oxamides, y=c_stars, labels={"x": "Oxamide", "y": "c_star"}).show()
px.scatter(x=oxamides, y=min_combustion_pressures, labels={"x": "Oxamide", "y": "Min. Combustion Pressure"}).show()
px.scatter(x=oxamides, y=gammas, labels={"x": "Oxamide", "y": "Gamma"}).show()
# # ER_from_P test
chamber_pressure_inputs = np.logspace(5, 6, 200)
exit_pressure_inputs = np.logspace(4, 5, 200)
ox_for_test = 0
chamber_pressures = []
exit_pressures = []
ers = []
for chamber_pressure in chamber_pressure_inputs:
for exit_pressure in exit_pressure_inputs:
chamber_pressures.append(chamber_pressure)
exit_pressures.append(exit_pressure)
ers.append(expansion_ratio_from_pressure(chamber_pressure, exit_pressure, gamma(ox_for_test), ox_for_test))
data = pd.DataFrame({
'chamber_pressure': chamber_pressures,
'exit_pressure' : exit_pressures,
'ers' : ers
})
px.scatter_3d(data, x='chamber_pressure', y='exit_pressure', z='ers', color='ers', log_x=True, log_y=True,
log_z=True).show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/propulsion_small_solid_rocket.py | propulsion_small_solid_rocket.py |
import aerosandbox.numpy as np
import datetime
from aerosandbox.modeling.interpolation import InterpolatedModel
from pathlib import Path
import os
def wind_speed_conus_summer_99(altitude, latitude):
"""
Returns the 99th-percentile wind speed magnitude over the continental United States (CONUS) in July-Aug. Aggregate of data from 1972 to 2019.
Fits at C:\Projects\GitHub\Wind_Analysis
:param altitude: altitude [m]
:param latitude: latitude, in degrees North [deg]
:return: 99th-percentile wind speed over the continental United States in the summertime. [m/s]
"""
l = (latitude - 37.5) / 11.5
a = (altitude - 24200) / 24200
agc = -0.5363486000267786
agh = 1.9569754777072828
ags = 0.1458701999734713
aqc = -1.4645014948089652
c0 = -0.5169694086686631
c12 = 0.0849519807021402
c21 = -0.0252010113059998
c4a = 0.0225856848053377
c4c = 1.0281877353734501
cg = 0.8050736230004489
cgc = 0.2786691793571486
cqa = 0.1866078047914259
cql = 0.0165126852561671
cqla = -0.1361667658248024
lgc = 0.6943655538727291
lgh = 2.0777449841036777
lgs = 0.9805766577269118
lqc = 4.0356834595743214
s = c0 + cql * (l - lqc) ** 2 + cqa * (a - aqc) ** 2 + cqla * a * l + cg * np.exp(
-(np.fabs(l - lgc) ** lgh / (2 * lgs ** 2) + np.fabs(a - agc) ** agh / (
2 * ags ** 2) + cgc * a * l)) + c4a * (
a - c4c) ** 4 + c12 * l * a ** 2 + c21 * l ** 2 * a
speed = s * 56 + 7
return speed
### Prep data for global wind speed function
# Import data
root = Path(os.path.abspath(__file__)).parent
altitudes_world = np.load(root / "datasets" / "winds_and_tropopause_global" / "altitudes.npy")
latitudes_world = np.load(root / "datasets" / "winds_and_tropopause_global" / "latitudes.npy")
day_of_year_world_boundaries = np.linspace(0, 365, 13)
day_of_year_world = (day_of_year_world_boundaries[1:] + day_of_year_world_boundaries[:-1]) / 2
winds_95_world = np.load(root / "datasets" / "winds_and_tropopause_global" / "winds_95_vs_altitude_latitude_day.npy")
# Trim the poles
latitudes_world = latitudes_world[1:-1]
winds_95_world = winds_95_world[:, 1:-1, :]
# Flip data appropriately
altitudes_world = np.flip(altitudes_world)
latitudes_world = np.flip(latitudes_world)
### NOTE: winds_95_world has *already* been flipped appropriately
# # Extend altitude range down to the ground # TODO review and redo properly
# altitudes_world_to_extend = [-1000, 0, 5000]
# altitudes_world = np.hstack((
# altitudes_world_to_extend,
# altitudes_world
# ))
# winds_95_world = np.concatenate(
# (
# np.tile(
# winds_95_world[0, :, :],
# (3, 1, 1)
# ),
# winds_95_world
# ),
# axis=0
# )
# Downsample
latitudes_world = latitudes_world[::5]
winds_95_world = winds_95_world[:, ::5, :]
# Extend boundaries so that cubic spline interpolates around day_of_year appropriately.
extend_bounds = 3
day_of_year_world = np.hstack((
day_of_year_world[-extend_bounds:] - 365,
day_of_year_world,
day_of_year_world[:extend_bounds] + 365
))
winds_95_world = np.dstack((
winds_95_world[:, :, -extend_bounds:],
winds_95_world,
winds_95_world[:, :, :extend_bounds]
))
# Make the model
winds_95_world_model = InterpolatedModel(
x_data_coordinates={
"altitude" : altitudes_world,
"latitude" : latitudes_world,
"day of year": day_of_year_world,
},
y_data_structured=winds_95_world,
)
def wind_speed_world_95(
altitude,
latitude,
day_of_year
):
"""
Gives the 95th-percentile wind speed as a function of altitude, latitude, and day of year.
Args:
altitude: Altitude, in meters
latitude: Latitude, in degrees north
day_of_year: Day of year (Julian day), in range 0 to 365
Returns: The 95th-percentile wind speed, in meters per second.
"""
return winds_95_world_model({
"altitude" : altitude,
"latitude" : latitude,
"day of year": day_of_year
})
### Prep data for tropopause altitude function
# Import data
latitudes_trop = np.linspace(-80, 80, 50)
day_of_year_trop_boundaries = np.linspace(0, 365, 13)
day_of_year_trop = (day_of_year_trop_boundaries[1:] + day_of_year_trop_boundaries[:-1]) / 2
tropopause_altitude_km = np.genfromtxt(
root / "datasets" / "winds_and_tropopause_global" / "strat-height-monthly.csv",
delimiter=","
)
# Extend boundaries
extend_bounds = 3
day_of_year_trop = np.hstack((
day_of_year_trop[-extend_bounds:] - 365,
day_of_year_trop,
day_of_year_trop[:extend_bounds] + 365
))
tropopause_altitude_km = np.hstack((
tropopause_altitude_km[:, -extend_bounds:],
tropopause_altitude_km,
tropopause_altitude_km[:, :extend_bounds]
))
# Make the model
tropopause_altitude_model = InterpolatedModel(
x_data_coordinates={
"latitude" : latitudes_trop,
"day of year": day_of_year_trop
},
y_data_structured=tropopause_altitude_km * 1e3
)
def tropopause_altitude(
latitude,
day_of_year
):
"""
Gives the altitude of the tropopause (as determined by the altitude where lapse rate >= 2 C/km) as a function of
latitude and day of year.
Args:
latitude: Latitude, in degrees north
day_of_year: Day of year (Julian day), in range 0 to 365
Returns: The tropopause altitude, in meters.
"""
return tropopause_altitude_model({
"latitude" : latitude,
"day of year": day_of_year
})
if __name__ == '__main__':
from aerosandbox.tools.pretty_plots import plt, sns, mpl, show_plot
def plot_winds_at_altitude(altitude=18000):
fig, ax = plt.subplots()
day_of_years = np.linspace(0, 365, 150)
latitudes = np.linspace(-80, 80, 120)
Day_of_years, Latitudes = np.meshgrid(day_of_years, latitudes)
winds = wind_speed_world_95(
altitude=altitude * np.ones_like(Latitudes.flatten()),
latitude=Latitudes.flatten(),
day_of_year=Day_of_years.flatten(),
).reshape(Latitudes.shape)
args = [
day_of_years,
latitudes,
winds
]
levels = np.arange(0, 80.1, 5)
CS = plt.contour(*args, levels=levels, linewidths=0.5, colors="k", alpha=0.7)
CF = plt.contourf(*args, levels=levels, cmap='viridis_r', alpha=0.7, extend="max")
cbar = plt.colorbar(label="Wind Speed [m/s]", extendrect=True)
ax.clabel(CS, inline=1, fontsize=9, fmt="%.0f m/s")
plt.xticks(
np.linspace(0, 365, 13)[:-1],
(
"Jan. 1",
"Feb. 1",
"Mar. 1",
"Apr. 1",
"May 1",
"June 1",
"July 1",
"Aug. 1",
"Sep. 1",
"Oct. 1",
"Nov. 1",
"Dec. 1"
),
rotation=40
)
lat_label_vals = np.arange(-80, 80.1, 20)
lat_labels = []
for lat in lat_label_vals:
if lat >= 0:
lat_labels.append(f"{lat:.0f}N")
else:
lat_labels.append(f"{-lat:.0f}S")
plt.yticks(
lat_label_vals,
lat_labels
)
show_plot(
f"95th-Percentile Wind Speeds at {altitude / 1e3:.0f} km Altitude",
xlabel="Day of Year",
ylabel="Latitude",
)
def plot_winds_at_day(day_of_year=0):
fig, ax = plt.subplots()
altitudes = np.linspace(0, 30000, 150)
latitudes = np.linspace(-80, 80, 120)
Altitudes, Latitudes = np.meshgrid(altitudes, latitudes)
winds = wind_speed_world_95(
altitude=Altitudes.flatten(),
latitude=Latitudes.flatten(),
day_of_year=day_of_year * np.ones_like(Altitudes.flatten()),
).reshape(Altitudes.shape)
args = [
altitudes / 1e3,
latitudes,
winds
]
levels = np.arange(0, 80.1, 5)
CS = plt.contour(*args, levels=levels, linewidths=0.5, colors="k", alpha=0.7)
CF = plt.contourf(*args, levels=levels, cmap='viridis_r', alpha=0.7, extend="max")
cbar = plt.colorbar(label="Wind Speed [m/s]", extendrect=True)
ax.clabel(CS, inline=1, fontsize=9, fmt="%.0f m/s")
lat_label_vals = np.arange(-80, 80.1, 20)
lat_labels = []
for lat in lat_label_vals:
if lat >= 0:
lat_labels.append(f"{lat:.0f}N")
else:
lat_labels.append(f"{-lat:.0f}S")
plt.yticks(
lat_label_vals,
lat_labels
)
show_plot(
f"95th-Percentile Wind Speeds at Day {day_of_year:.0f}",
xlabel="Altitude [km]",
ylabel="Latitude",
)
def plot_tropopause_altitude():
fig, ax = plt.subplots()
day_of_years = np.linspace(0, 365, 250)
latitudes = np.linspace(-80, 80, 200)
Day_of_years, Latitudes = np.meshgrid(day_of_years, latitudes)
trop_alt = tropopause_altitude(
Latitudes.flatten(),
Day_of_years.flatten()
).reshape(Latitudes.shape)
args = [
day_of_years,
latitudes,
trop_alt / 1e3
]
levels = np.arange(10, 20.1, 1)
CS = plt.contour(*args, levels=levels, linewidths=0.5, colors="k", alpha=0.7)
CF = plt.contourf(*args, levels=levels, cmap='viridis_r', alpha=0.7, extend="both")
cbar = plt.colorbar(label="Tropopause Altitude [km]", extendrect=True)
ax.clabel(CS, inline=1, fontsize=9, fmt="%.0f km")
plt.xticks(
np.linspace(0, 365, 13)[:-1],
(
"Jan. 1",
"Feb. 1",
"Mar. 1",
"Apr. 1",
"May 1",
"June 1",
"July 1",
"Aug. 1",
"Sep. 1",
"Oct. 1",
"Nov. 1",
"Dec. 1"
),
rotation=40
)
lat_label_vals = np.arange(-80, 80.1, 20)
lat_labels = []
for lat in lat_label_vals:
if lat >= 0:
lat_labels.append(f"{lat:.0f}N")
else:
lat_labels.append(f"{-lat:.0f}S")
plt.yticks(
lat_label_vals,
lat_labels
)
show_plot(
f"Tropopause Altitude by Season and Latitude",
xlabel="Day of Year",
ylabel="Latitude",
)
def plot_winds_at_tropopause_altitude():
fig, ax = plt.subplots()
day_of_years = np.linspace(0, 365, 150)
latitudes = np.linspace(-80, 80, 120)
Day_of_years, Latitudes = np.meshgrid(day_of_years, latitudes)
winds = wind_speed_world_95(
altitude=tropopause_altitude(Latitudes.flatten(), Day_of_years.flatten()),
latitude=Latitudes.flatten(),
day_of_year=Day_of_years.flatten(),
).reshape(Latitudes.shape)
args = [
day_of_years,
latitudes,
winds
]
levels = np.arange(0, 80.1, 5)
CS = plt.contour(*args, levels=levels, linewidths=0.5, colors="k", alpha=0.7)
CF = plt.contourf(*args, levels=levels, cmap='viridis_r', alpha=0.7, extend="max")
cbar = plt.colorbar(label="Wind Speed [m/s]", extendrect=True)
ax.clabel(CS, inline=1, fontsize=9, fmt="%.0f m/s")
plt.xticks(
np.linspace(0, 365, 13)[:-1],
(
"Jan. 1",
"Feb. 1",
"Mar. 1",
"Apr. 1",
"May 1",
"June 1",
"July 1",
"Aug. 1",
"Sep. 1",
"Oct. 1",
"Nov. 1",
"Dec. 1"
),
rotation=40
)
lat_label_vals = np.arange(-80, 80.1, 20)
lat_labels = []
for lat in lat_label_vals:
if lat >= 0:
lat_labels.append(f"{lat:.0f}N")
else:
lat_labels.append(f"{-lat:.0f}S")
plt.yticks(
lat_label_vals,
lat_labels
)
show_plot(
f"95th-Percentile Wind Speeds at Tropopause Altitude",
xlabel="Day of Year",
ylabel="Latitude",
)
# plot_winds_at_altitude(altitude=18000)
# plot_winds_at_day(day_of_year=0)
# plot_tropopause_altitude()
plot_winds_at_tropopause_altitude() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/winds.py | winds.py |
import aerosandbox.numpy as np
def propeller_shaft_power_from_thrust(
thrust_force,
area_propulsive,
airspeed,
rho,
propeller_coefficient_of_performance=0.8,
):
"""
Using dynamic disc actuator theory, gives the shaft power required to generate
a certain amount of thrust.
Source: https://web.mit.edu/16.unified/www/FALL/thermodynamics/notes/node86.html
:param thrust_force: Thrust force [N]
:param area_propulsive: Total disc area of all propulsive surfaces [m^2]
:param airspeed: Airspeed [m/s]
:param rho: Air density [kg/m^3]
:param propeller_coefficient_of_performance: propeller coeff. of performance (due to viscous losses) [unitless]
:return: Shaft power [W]
"""
return 0.5 * thrust_force * airspeed * (
np.sqrt(
thrust_force / (area_propulsive * airspeed ** 2 * rho / 2) + 1
) + 1
) / propeller_coefficient_of_performance
def mass_hpa_propeller(
diameter,
max_power,
include_variable_pitch_mechanism=False
):
"""
Returns the estimated mass of a propeller assembly for low-disc-loading applications (human powered airplane, paramotor, etc.)
:param diameter: diameter of the propeller [m]
:param max_power: maximum power of the propeller [W]
:param include_variable_pitch_mechanism: boolean, does this propeller have a variable pitch mechanism?
:return: estimated weight [kg]
"""
mass_propeller = (
0.495 *
(diameter / 1.25) ** 1.6 *
np.softmax(0.6, max_power / 14914, hardness=5) ** 2
) # Baselining to a 125cm E-Props Top 80 Propeller for paramotor, with some sketchy scaling assumptions
# Parameters on diameter exponent and min power were chosen such that Daedalus propeller is roughly on the curve.
mass_variable_pitch_mech = 216.8 / 800 * mass_propeller
# correlation to Daedalus data: http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
if include_variable_pitch_mechanism:
mass_propeller += mass_variable_pitch_mech
return mass_propeller
def mass_gearbox(
power,
rpm_in,
rpm_out,
):
"""
Estimates the mass of a gearbox.
Based on data from NASA/TM-2009-215680, available here:
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20090042817.pdf
R^2 = 0.92 to the data.
To quote this document:
"The correlation was developed based on actual weight
data from over fifty rotorcrafts, tiltrotors, and turboprop
aircraft."
Data fits in the NASA document were thrown out and refitted to extrapolate more sensibly; see:
C:\Projects\GitHub\AeroSandbox\studies\GearboxMassFits
:param power: Shaft power through the gearbox [W]
:param rpm_in: RPM of the input to the gearbox [rpm]
:param rpm_out: RPM of the output of the gearbox [rpm]
:return: Estimated mass of the gearbox [kg]
"""
power_hp = power / 745.7
beta = (power_hp / rpm_out) ** 0.75 * (rpm_in / rpm_out) ** 0.15
# Beta is a parametric value that tends to collapse the masses of gearboxes onto a line.
# Data fit is considered tightly valid for gearboxes with 1 < beta < 100. Sensible extrapolations are made beyond that.
p1 = 1.0445171124733774
p2 = 2.0083615496306910
mass_lb = 10 ** (p1 * np.log10(beta) + p2)
mass = mass_lb / 2.20462262185
return mass
if __name__ == '__main__':
import matplotlib.style as style
style.use("seaborn")
# Daedalus propeller
print(
mass_hpa_propeller(
diameter=3.4442,
max_power=177.93 * 8.2, # max thrust at cruise speed
include_variable_pitch_mechanism=False
)
) # Should weight ca. 800 grams
print(mass_gearbox(
power=3000,
rpm_in=6000,
rpm_out=600
)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/propulsion_propeller.py | propulsion_propeller.py |
from aerosandbox.geometry.airfoil import Airfoil
from aerosandbox.library.aerodynamics.viscous import *
from aerosandbox.geometry.airfoil.airfoil_families import get_NACA_coordinates, \
get_UIUC_coordinates
def diamond_airfoil(
t_over_c: float,
n_points_per_panel=2,
) -> Airfoil:
x_nondim = [1, 0.5, 0, 0.5, 1]
y_nondim = [0, 1, 0, -1, 0]
x = np.concatenate(
[
list(np.cosspace(a, b, n_points_per_panel))[:-1]
for a, b in zip(x_nondim[:-1], x_nondim[1:])
] + [[x_nondim[-1]]]
)
y = np.concatenate(
[
list(np.cosspace(a, b, n_points_per_panel))[:-1]
for a, b in zip(y_nondim[:-1], y_nondim[1:])
] + [[y_nondim[-1]]]
)
y = y * (t_over_c / 2)
coordinates = np.array([x, y]).T
return Airfoil(
name="Diamond",
coordinates=coordinates,
)
generic_cambered_airfoil = Airfoil(
name="Generic Cambered Airfoil",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
(alpha * np.pi / 180) * (2 * np.pi) + 0.4550
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.1
),
coordinates=get_UIUC_coordinates(name="clarky")
)
generic_airfoil = Airfoil(
name="Generic Airfoil",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
(alpha * np.pi / 180) * (2 * np.pi)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
0
), # TODO make this an actual curve!
coordinates=get_NACA_coordinates(name="naca0012")
)
e216 = Airfoil(
name="e216",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_e216(alpha=alpha, Re_c=Re)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cd_profile_e216(alpha=alpha, Re_c=Re) +
Cd_wave_e216(Cl=Cl_e216(alpha=alpha, Re_c=Re), mach=mach)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.15
), # TODO make this an actual curve!
)
rae2822 = Airfoil(
name="rae2822",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_rae2822(alpha=alpha, Re_c=Re)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cd_profile_rae2822(alpha=alpha, Re_c=Re) +
Cd_wave_rae2822(Cl=Cl_rae2822(alpha=alpha, Re_c=Re), mach=mach)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.05
), # TODO make this an actual curve!
)
naca0008 = Airfoil(
name="naca0008",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_flat_plate(alpha=alpha) # TODO fit this to actual data
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re) + # TODO fit this to actual data
Cd_wave_Korn(Cl=Cl_flat_plate(alpha=alpha), t_over_c=0.08, mach=mach, kappa_A=0.87)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
0
), # TODO make this an actual curve!
)
flat_plate = Airfoil(
name="Flat Plate",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_flat_plate(alpha=alpha)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cf_flat_plate(Re_L=Re) * 2
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function
0
),
coordinates=np.array([
[1, 0],
[1, 1e-6],
[0, 1e-6],
[0, -1e-6],
[1, -1e-6],
[1, 0],
])
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/airfoils.py | airfoils.py |
import aerosandbox.numpy as np
from aerosandbox.tools import units as u
from typing import Union, Dict
def motor_electric_performance(
voltage: Union[float, np.ndarray] = None,
current: Union[float, np.ndarray] = None,
rpm: Union[float, np.ndarray] = None,
torque: Union[float, np.ndarray] = None,
kv: float = 1000., # rpm/volt
resistance: float = 0.1, # ohms
no_load_current: float = 0.4 # amps
) -> Dict[str, Union[float, np.ndarray]]:
"""
A function for predicting the performance of an electric motor.
Performance equations based on Mark Drela's First Order Motor Model:
http://web.mit.edu/drela/Public/web/qprop/motor1_theory.pdf
Instructions: Input EXACTLY TWO of the following parameters: voltage, current, rpm, torque.
Exception: You cannot supply the combination of current and torque - this makes for an ill-posed problem.
Note that this function is fully vectorized, so arrays can be supplied to any of the inputs.
Args:
voltage: Voltage across motor terminals [Volts]
current: Current through motor [Amps]
rpm: Motor rotation speed [rpm]
torque: Motor torque [N m]
kv: voltage constant, in rpm/volt
resistance: resistance, in ohms
no_load_current: no-load current, in amps
Returns:
A dictionary where keys are:
"voltage",
"current",
"rpm",
"torque",
"shaft power",
"electrical power",
"efficiency"
"waste heat"
And values are corresponding quantities in SI units.
Note that "efficiency" is just (shaft power) / (electrical power), and hence implicitly assumes that the
motor is operating as a motor (electrical -> shaft power), and not a generator (shaft power -> electrical).
If you want to know the efficiency of the motor as a generator, you can simply calculate it as (electrical
power) / (shaft power).
"""
# Validate inputs
voltage_known = voltage is not None
current_known = current is not None
rpm_known = rpm is not None
torque_known = torque is not None
if not (
voltage_known + current_known + rpm_known + torque_known == 2
):
raise ValueError("You must give exactly two input arguments.")
if current_known and torque_known:
raise ValueError(
"You cannot supply the combination of current and torque - this makes for an ill-posed problem.")
kv_rads_per_sec_per_volt = kv * np.pi / 30 # rads/sec/volt
### Iterate through the motor equations until all quantities are known.
while not (voltage_known and current_known and rpm_known and torque_known):
if rpm_known:
if current_known and not voltage_known:
speed = rpm * np.pi / 30 # rad/sec
back_EMF_voltage = speed / kv_rads_per_sec_per_volt
voltage = back_EMF_voltage + current * resistance
voltage_known = True
if torque_known:
if not current_known:
current = torque * kv_rads_per_sec_per_volt + no_load_current
current_known = True
if voltage_known:
if rpm_known and not current_known:
speed = rpm * np.pi / 30 # rad/sec
back_EMF_voltage = speed / kv_rads_per_sec_per_volt
current = (voltage - back_EMF_voltage) / resistance
current_known = True
if not rpm_known and current_known:
back_EMF_voltage = voltage - (current * resistance)
speed = back_EMF_voltage * kv_rads_per_sec_per_volt
rpm = speed * 30 / np.pi
rpm_known = True
if current_known:
if not torque_known:
torque = (current - no_load_current) / kv_rads_per_sec_per_volt
torque_known = True
shaft_power = (rpm * np.pi / 30) * torque
electrical_power = voltage * current
efficiency = shaft_power / electrical_power
waste_heat = np.fabs(electrical_power - shaft_power)
return {
"voltage" : voltage,
"current" : current,
"rpm" : rpm,
"torque" : torque,
"shaft power" : shaft_power,
"electrical power": electrical_power,
"efficiency" : efficiency,
"waste heat" : waste_heat,
}
def electric_propeller_propulsion_analysis(
total_thrust,
n_engines,
propeller_diameter,
op_point,
propeller_tip_mach,
motor_kv,
motor_no_load_current,
motor_resistance,
wire_resistance,
battery_voltage,
gearbox_ratio=1,
gearbox_efficiency=1,
esc_efficiency=0.98,
battery_discharge_efficiency=0.985,
) -> Dict[str, float]:
### Propeller Analysis
propulsive_area_per_propeller = (np.pi / 4) * propeller_diameter ** 2
propulsive_area_total = propulsive_area_per_propeller * n_engines
propeller_wake_dynamic_pressure = op_point.dynamic_pressure() + total_thrust / propulsive_area_total
propeller_wake_velocity = (
# Derived from the above pressure jump relation, with adjustments to avoid singularity at zero velocity
2 * total_thrust / (propulsive_area_total * op_point.atmosphere.density())
+ op_point.velocity ** 2
) ** 0.5
propeller_tip_speed = propeller_tip_mach * op_point.atmosphere.speed_of_sound()
propeller_rads_per_sec = propeller_tip_speed / (propeller_diameter / 2)
propeller_rpm = propeller_rads_per_sec * 60 / (2 * np.pi)
propeller_advance_ratio = op_point.velocity / propeller_tip_speed
air_power = total_thrust * op_point.velocity
from propulsion_propeller import propeller_shaft_power_from_thrust
shaft_power = propeller_shaft_power_from_thrust(
thrust_force=total_thrust,
area_propulsive=propulsive_area_total,
airspeed=op_point.velocity,
rho=op_point.atmosphere.density(),
propeller_coefficient_of_performance=0.90,
)
propeller_efficiency = air_power / shaft_power
### Motor Analysis
motor_rpm = propeller_rpm / gearbox_ratio
motor_rads_per_sec = motor_rpm * 2 * np.pi / 60
motor_torque_per_motor = shaft_power / n_engines / motor_rads_per_sec / gearbox_efficiency
motor_parameters_per_motor = motor_electric_performance(
rpm=motor_rpm,
torque=motor_torque_per_motor,
kv=motor_kv,
no_load_current=motor_no_load_current,
resistance=motor_resistance,
)
motor_electrical_power = motor_parameters_per_motor["electrical power"] * n_engines
motor_efficiency = shaft_power / motor_electrical_power
### ESC Analysis
esc_electrical_power = motor_electrical_power / esc_efficiency
### Wire Analysis
wire_power_loss = (esc_electrical_power / battery_voltage) ** 2 * wire_resistance
wire_efficiency = esc_electrical_power / (esc_electrical_power + wire_power_loss)
### Battery Analysis
battery_power = (esc_electrical_power + wire_power_loss) / battery_discharge_efficiency
battery_current = battery_power / battery_voltage
### Overall
overall_efficiency = air_power / battery_power
return locals()
def motor_resistance_from_no_load_current(
no_load_current
):
"""
Estimates the internal resistance of a motor from its no_load_current. Gates quotes R^2=0.93 for this model.
Source: Gates, et. al., "Combined Trajectory, Propulsion, and Battery Mass Optimization for Solar-Regen..."
https://scholarsarchive.byu.edu/cgi/viewcontent.cgi?article=3932&context=facpub
Args:
no_load_current: No-load current [amps]
Returns:
motor internal resistance [ohms]
"""
return 0.0467 * no_load_current ** -1.892
def mass_ESC(
max_power,
):
"""
Estimates the mass of an ESC.
Informal correlation I did to Hobbyking ESCs in the 8S LiPo, 100A range
Args:
max_power: maximum power [W]
Returns:
estimated ESC mass [kg]
"""
return 2.38e-5 * max_power
def mass_battery_pack(
battery_capacity_Wh,
battery_cell_specific_energy_Wh_kg=240,
battery_pack_cell_fraction=0.7,
):
"""
Estimates the mass of a lithium-polymer battery.
Args:
battery_capacity_Wh: Battery capacity, in Watt-hours [W*h]
battery_cell_specific_energy: Specific energy of the battery at the CELL level [W*h/kg]
battery_pack_cell_fraction: Fraction of the battery pack that is cells, by weight.
* Note: Ed Lovelace, a battery engineer for Aurora Flight Sciences, gives this figure as 0.70 in a Feb.
2020 presentation for MIT 16.82
Returns:
Estimated battery mass [kg]
"""
return battery_capacity_Wh / battery_cell_specific_energy_Wh_kg / battery_pack_cell_fraction
def mass_motor_electric(
max_power,
kv_rpm_volt=1000, # This is in rpm/volt, not rads/sec/volt!
voltage=20,
method="hobbyking"
):
"""
Estimates the mass of a brushless DC electric motor.
Curve fit to scraped Hobbyking BLDC motor data as of 2/24/2020.
Estimated range of validity: 50 < max_power < 10000
Args:
max_power (float): maximum power [W]
kv_rpm_volt (float): Voltage constant of the motor, measured in rpm/volt, not rads/sec/volt! [rpm/volt]
voltage (float): Operating voltage of the motor [V]
method (str): method to use. "burton", "hobbyking", or "astroflight" (increasing level of detail).
* Burton source: https://dspace.mit.edu/handle/1721.1/112414
* Hobbyking source: C:\Projects\GitHub\MotorScraper,
* Astroflight source: Gates, et. al., "Combined Trajectory, Propulsion, and Battery Mass Optimization for
Solar-Regen..." https://scholarsarchive.byu.edu/cgi/viewcontent.cgi?article=3932&context=facpub
* Validity claimed from 1.5 kW to 15 kW, kv from 32 to 1355.
Returns:
Estimated motor mass [kg]
"""
if method == "burton":
return max_power / 4128 # Less sophisticated model. 95% CI (3992, 4263), R^2 = 0.866
elif method == "hobbyking":
return 10 ** (0.8205 * np.log10(max_power) - 3.155) # More sophisticated model
elif method == "astroflight":
max_current = max_power / voltage
return 2.464 * max_current / kv_rpm_volt + 0.368 # Even more sophisticated model
def mass_wires(
wire_length,
max_current,
allowable_voltage_drop,
material="aluminum",
insulated=True,
max_voltage=600,
wire_packing_factor=1,
insulator_density=1700,
insulator_dielectric_strength=12e6,
insulator_min_thickness=0.2e-3, # silicone wire
return_dict: bool = False
):
"""
Estimates the mass of wires used for power transmission.
Materials data from: https://en.wikipedia.org/wiki/Electrical_resistivity_and_conductivity#Resistivity-density_product
All data measured at STP; beware, as this data (especially resistivity) can be a strong function of temperature.
Args:
wire_length (float): Length of the wire [m]
max_current (float): Max current of the wire [Amps]
allowable_voltage_drop (float): How much is the voltage allowed to drop along the wire?
material (str): Conductive material of the wire ("aluminum"). Determines density and resistivity. One of:
* "sodium"
* "lithium"
* "calcium"
* "potassium"
* "beryllium"
* "aluminum"
* "magnesium"
* "copper"
* "silver"
* "gold"
* "iron"
insulated (bool): Should we add the mass of the wire's insulator coating? Usually you'll want to leave this True.
max_voltage (float): Maximum allowable voltage (used for sizing insulator). 600 is a common off-the-shelf rating.
wire_packing_factor (float): What fraction of the enclosed cross section is conductor? This is 1 for solid wire,
and less for stranded wire.
insulator_density (float): Density of the insulator [kg/m^3]
insulator_dielectric_strength (float): Dielectric strength of the insulator [V/m]. The default value of 12e6 corresponds
to rubber.
insulator_min_thickness (float): Minimum thickness of the insulator [m]. This is essentially a gauge limit.
The default value is 0.2 mm.
return_dict (bool): If True, returns a dictionary of all local variables. If False, just returns the wire
mass as a float. Defaults to False.
Returns: If `return_dict` is False (default), returns the wire mass as a single number. If `return_dict` is True,
returns a dictionary of all local variables.
"""
if material == "sodium": # highly reactive with water & oxygen, low physical strength
density = 970 # kg/m^3
resistivity = 47.7e-9 # ohm-meters
elif material == "lithium": # highly reactive with water & oxygen, low physical strength
density = 530 # kg/m^3
resistivity = 92.8e-9 # ohm-meters
elif material == "calcium": # highly reactive with water & oxygen, low physical strength
density = 1550 # kg/m^3
resistivity = 33.6e-9 # ohm-meters
elif material == "potassium": # highly reactive with water & oxygen, low physical strength
density = 890 # kg/m^3
resistivity = 72.0e-9 # ohm-meters
elif material == "beryllium": # toxic, brittle
density = 1850 # kg/m^3
resistivity = 35.6e-9 # ohm-meters
elif material == "aluminum":
density = 2700 # kg/m^3
resistivity = 26.50e-9 # ohm-meters
elif material == "magnesium": # worse specific conductivity than aluminum
density = 1740 # kg/m^3
resistivity = 43.90e-9 # ohm-meters
elif material == "copper": # worse specific conductivity than aluminum, moderately expensive
density = 8960 # kg/m^3
resistivity = 16.78e-9 # ohm-meters
elif material == "silver": # worse specific conductivity than aluminum, expensive
density = 10490 # kg/m^3
resistivity = 15.87e-9 # ohm-meters
elif material == "gold": # worse specific conductivity than aluminum, very expensive
density = 19300 # kg/m^3
resistivity = 22.14e-9 # ohm-meters
elif material == "iron": # worse specific conductivity than aluminum
density = 7874 # kg/m^3
resistivity = 96.1e-9 # ohm-meters
else:
raise ValueError("Bad value of 'material'!")
# Conductor mass
resistance = allowable_voltage_drop / max_current
area_conductor = resistivity * wire_length / resistance
volume_conductor = area_conductor * wire_length
mass_conductor = volume_conductor * density
# Insulator mass
if insulated:
insulator_thickness = np.softmax(
4.0 * max_voltage / insulator_dielectric_strength,
insulator_min_thickness,
softness=0.005 * u.inch,
)
radius_conductor = (area_conductor / wire_packing_factor / np.pi) ** 0.5
radius_insulator = radius_conductor + insulator_thickness
area_insulator = np.pi * radius_insulator ** 2 - area_conductor
volume_insulator = area_insulator * wire_length
mass_insulator = insulator_density * volume_insulator
else:
mass_insulator = 0
# Total them up
mass_total = mass_conductor + mass_insulator
if return_dict:
return locals()
else:
return mass_total
if __name__ == '__main__':
print(motor_electric_performance(
rpm=100,
current=3
))
print(motor_electric_performance(
rpm=4700,
torque=0.02482817
))
print(
mass_battery_pack(100)
)
pows = np.logspace(2, 5, 300)
mass_mot_burton = mass_motor_electric(pows, method="burton")
mass_mot_hobbyking = mass_motor_electric(pows, method="hobbyking")
mass_mot_astroflight = mass_motor_electric(pows, method="astroflight")
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.loglog(pows, np.array(mass_mot_burton), "-", label="Burton Model")
plt.plot(pows, np.array(mass_mot_hobbyking), "--", label="Hobbyking Model")
plt.plot(pows, np.array(mass_mot_astroflight), "-.", label="Astroflight Model")
p.show_plot(
"Small Electric Motor Mass Models\n(500 kv, 100 V)",
"Motor Power [W]",
"Motor Mass [kg]"
)
print(mass_wires(
wire_length=1,
max_current=100,
allowable_voltage_drop=1,
material="aluminum"
)) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/propulsion_electric.py | propulsion_electric.py |
import aerosandbox.numpy as np
def overall_pressure_ratio_turboshaft_technology_limit(
mass_turboshaft: float
) -> float:
"""
Estimates the maximum-practically-achievable overall pressure ratio (OPR) of a turboshaft engine, as a function
of its mass. A regression to historical data.
Based on an envelope of data for both civilian and military turboshafts (including RC-scale turboshafts), available in:
`aerosandbox/library/datasets/turbine_engines/data.xlsx`
See study in `/AeroSandbox/studies/TurboshaftStudies/make_fit_overall_pressure_ratio.py` for model details.
Args:
mass_turboshaft: The mass of the turboshaft engine. [kg]
Returns:
The maximum-practically-achievable overall pressure ratio (OPR) of the turboshaft engine. [-]
"""
p = {'scl': 1.0222956615376533, 'cen': 1.6535195257959798, 'high': 23.957335474997656}
return np.blend(
np.log10(mass_turboshaft) / p["scl"] - p["cen"],
value_switch_high=p["high"],
value_switch_low=1,
)
def power_turboshaft(
mass_turboshaft: float,
overall_pressure_ratio: float = None,
) -> float:
"""
Estimates the maximum rated power of a turboshaft engine, given its mass. A regression to historical data.
Based on data for both civilian and military turboshafts, available in:
`aerosandbox/library/datasets/turbine_engines/data.xlsx`
See studies in `/AeroSandbox/studies/TurboshaftStudies/make_fit_power.py` for model details.
Args:
mass_turboshaft: The mass of the turboshaft engine. [kg]
overall_pressure_ratio: The overall pressure ratio of the turboshaft engine. [-] If unspecified, a sensible
default based on the technology envelope (with a 0.7x knockdown) will be used.
Returns:
The maximum (rated takeoff) power of the turboshaft engine. [W]
"""
if overall_pressure_ratio is None:
overall_pressure_ratio = overall_pressure_ratio_turboshaft_technology_limit(
mass_turboshaft
) * 0.7
p = {'a': 1674.9411795202134, 'OPR': 0.5090953411025091, 'Weight [kg]': 0.9418482117552568}
return (
p["a"]
* mass_turboshaft ** p["Weight [kg]"]
* overall_pressure_ratio ** p["OPR"]
)
def thermal_efficiency_turboshaft(
mass_turboshaft: float,
overall_pressure_ratio: float = None,
throttle_setting: float = 1,
) -> float:
"""
Estimates the thermal efficiency of a turboshaft engine. A regression to historical data.
Based on data for both civilian and military turboshafts, available in:
`aerosandbox/library/datasets/turbine_engines/data.xlsx`
See studies in `/AeroSandbox/studies/TurboshaftStudies/make_turboshaft_fits.py` for model details.
Thermal efficiency knockdown at partial power is based on:
Ingmar Geiß, "Sizing of the Series Hybrid-electric Propulsion System of General Aviation Aircraft", 2020.
PhD Thesis, University of Stuttgart. Page 18, Figure 3.2.
Args:
mass_turboshaft: The mass of the turboshaft engine. [kg]
overall_pressure_ratio: The overall pressure ratio of the turboshaft engine. [-] If unspecified, a sensible
default based on the technology envelope (with a 0.7x knockdown) will be used.
throttle_setting: The throttle setting of the turboshaft engine. [-] 1 is full throttle, 0 is no throttle.
Returns:
The thermal efficiency of the turboshaft engine. [-]
"""
if overall_pressure_ratio is None:
overall_pressure_ratio = overall_pressure_ratio_turboshaft_technology_limit(
mass_turboshaft
) * 0.7
p = {'a': 0.12721246565294902, 'wcen': 2.679474077211383, 'wscl': 4.10824884208311}
ideal_efficiency = 1 - (1 / overall_pressure_ratio) ** ((1.4 - 1) / 1.4)
thermal_efficiency_at_full_power = np.blend(
p["a"] + (np.log10(mass_turboshaft) - p["wcen"]) / p["wscl"],
value_switch_high=ideal_efficiency,
value_switch_low=0,
)
p = {
'B0': 0.0592, # Modified from Geiß thesis such that B values sum to 1 by construction. Orig: 0.05658
'B1': 2.567,
'B2': -2.612,
'B3': 0.9858
}
thermal_efficiency_knockdown_from_partial_power = (
p["B0"]
+ p["B1"] * throttle_setting
+ p["B2"] * throttle_setting ** 2
+ p["B3"] * throttle_setting ** 3
)
return (
thermal_efficiency_at_full_power
* thermal_efficiency_knockdown_from_partial_power
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import aerosandbox.tools.pretty_plots as p
fig, ax = plt.subplots()
x = np.linspace(0, 1)
plt.plot(
x,
thermal_efficiency_turboshaft(1000, throttle_setting=x) / thermal_efficiency_turboshaft(1000),
)
ax.xaxis.set_major_formatter(p.ticker.PercentFormatter(1))
plt.xlim(0, 1)
plt.ylim(0, 1)
p.set_ticks(0.1, 0.05, 0.1, 0.05)
p.show_plot(
"Turboshaft: Thermal Efficiency at Partial Power",
"Throttle Setting [-]",
"Thermal Efficiency Knockdown\nrelative to Design Point [-]\n$\eta / \eta_\mathrm{max}$"
)
##### Do Weight/OPR Efficiency Plot #####
fig, ax = plt.subplots()
mass = np.geomspace(1e0, 1e4, 300)
opr = np.geomspace(1, 100, 500)
Mass, Opr = np.meshgrid(mass, opr)
Mask = overall_pressure_ratio_turboshaft_technology_limit(Mass) > Opr
cont, contf, cbar = p.contour(
Mass,
Opr,
thermal_efficiency_turboshaft(Mass, Opr),
mask=Mask,
linelabels_format=lambda x: f"{x:.0%}",
x_log_scale=True,
colorbar_label="Thermal Efficiency [%]",
cmap="turbo_r",
)
cbar.ax.yaxis.set_major_formatter(p.ticker.PercentFormatter(1, decimals=0))
p.set_ticks(None, None, 5, 1)
p.show_plot(
"Turboshaft Model: Thermal Efficiency vs. Weight and OPR",
"Engine Weight [kg]",
"Overall Pressure Ratio [-]",
dpi=300
) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/power_turboshaft.py | power_turboshaft.py |
import aerosandbox as asb
import aerosandbox.numpy as np
import aerosandbox.tools.units as u
from typing import Dict
def field_length_analysis_torenbeek(
design_mass_TOGW: float,
thrust_at_liftoff: float,
lift_over_drag_climb: float,
CL_max: float,
s_ref: float,
n_engines: int,
atmosphere: asb.Atmosphere = None,
CD_zero_lift: float = 0.03,
obstacle_height: float = 35 * u.foot,
friction_coefficient: float = 0.02,
V_obstacle_over_V_stall: float = 1.3,
minimum_V_liftoff_over_V_stall: float = 1.2,
V_approach_over_V_stall: float = 1.3,
maximum_braking_deceleration_g: float = 0.37,
inertia_time: float = 4.5,
approach_angle_deg: float = 3,
) -> Dict[str, float]:
"""
Performs a field length analysis on an aircraft, returning a dictionary of field length parameters.
Citations:
* "Torenbeek": Egbert Torenbeek, "Synthesis of Subsonic Airplane Design", 1976. (Generally, section 5.4.5:
Takeoff)
Args:
design_mass_TOGW: The takeoff gross weight of the entire aircraft [kg].
thrust_at_liftoff: The thrust of the aircraft at the moment of liftoff [N].
lift_over_drag_climb: The lift-to-drag ratio of the aircraft during the climb phase of takeoff [dimensionless].
CL_max: The maximum lift coefficient of the aircraft [dimensionless]. Assumes any lift-augmentation devices (
e.g., slats, flaps) are deployed.
s_ref: The reference area of the aircraft [m^2].
atmosphere: The atmosphere object to use for the analysis. Defaults to sea level.
n_engines: The number of engines on the aircraft. Used during balanced field length calculation,
which involves a single-engine-failure assumption.
CD_zero_lift: The zero-lift drag coefficient of the aircraft [dimensionless].
obstacle_height: The height of the obstacle clearance [m].
Note:
* FAR 23 requires a 50 foot obstacle clearance height.
* FAR 25 requires a 35 foot obstacle clearance height.
friction_coefficient: The coefficient of friction between the wheels and the runway.
* 0.02 is a good value for a dry concrete runway.
* 0.045 is a good value for short grass.
V_obstacle_over_V_stall: The ratio of the airspeed while flying over the obstacle to the stall airspeed.
minimum_V_liftoff_over_V_stall: The minimum-allowable ratio of the liftoff airspeed to the stall airspeed.
V_approach_over_V_stall: The ratio of the approach airspeed to the stall airspeed.
maximum_braking_deceleration_g: The maximum deceleration of the aircraft during braking [G].
This is used when calculating the "brake" portion of the "accelerate-brake" balanced field length, as well
as the braking during normal landing.
* Standard brakes are around 0.37 G on dry concrete.
* Advanced brakes with optimum brake pressure control, lift dumpers, and nosewheel braking can be as high
as 0.5 G on dry concrete.
inertia_time: The time it takes for the pilot and aircraft to collectively react to an engine failure during
takeoff [seconds]. This is collectively the sum of:
* The pilot's reaction time
* The time it takes the other engines to spool down, in the event of a rejected takeoff and deceleration
on the ground.
Returns:
A dictionary of field length parameters, including:
* "takeoff_ground_roll_distance": The distance the aircraft will roll on the ground during a normal
takeoff before liftoff [meters].
* "takeoff_airborne_distance": The distance the aircraft will travel in the air during a normal takeoff [
meters]. This is after liftoff, but before the aircraft has reached the obstacle clearance height.
* "takeoff_total_distance": The total field length required during a normal takeoff [meters]. This
includes both the ground roll itself, as well as the airborne distance before the obstacle clearance
height is reached.
* "balanced_field_length": The field length required for takeoff and obstacle clearance when one engine
fails at "decision speed" [meters]. Decision speed is the speed during the ground roll at which,
if an engine fails, the aircraft can either continue the takeoff or brake to a complete stop in the same
total distance.
* "landing_airborne_distance": The distance the aircraft will travel in the air during a normal landing
before touchdown [meters]. Note that a normal landing involves passing the runway threshold at the
specified obstacle clearance height.
* "landing_ground_roll_distance": The distance the aircraft will roll on the ground after touchdown
during a normal landing [meters].
* "landing_total_distance": The total field length required during a normal landing [meters]. This
includes both the airborne distance beyond the threshold that is required for obstacle clearance,
as well as the ground roll distance after touchdown.
* "V_stall": The stall speed of the aircraft at its takeoff gross weight [m/s].
* "V_liftoff": The airspeed at the moment of liftoff during a normal takeoff [m/s].
* "V_obstacle": The airspeed when the aircraft reaches the obstacle clearance height during a normal
takeoff [m/s].
* "V_approach": The airspeed when the aircraft reaches the runway threshold during a normal landing.
* "V_touchdown": The airspeed when the aircraft touches down during a normal landing.
* "flight_path_angle_climb": The flight path angle during a normal takeoff at the point when the airplane
reaches the obstacle clearance height [radians].
* "flight_path_angle_climb_one_engine_out": The flight path angle during a critical-engine-out takeoff at
the point when the airplane reaches the obstacle clearance height [radians]. If this number is negative,
engine failure results in inability to climb.
"""
### Set defaults
if atmosphere is None:
atmosphere = asb.Atmosphere(altitude=0)
### Constants
g = 9.81 # m/s^2, gravitational acceleration
##### Normal takeoff analysis #####
### Compute TWR and climb physics
thrust_over_weight_takeoff = thrust_at_liftoff / (design_mass_TOGW * g)
flight_path_angle_climb = (
thrust_over_weight_takeoff
- 1 / lift_over_drag_climb
)
### V_stall is the stall speed of the airplane.
V_stall = np.sqrt(
2 * design_mass_TOGW * g / (atmosphere.density() * s_ref * CL_max)
)
### V_obstacle is the airspeed at the obstacle clearance height
V_obstacle = V_obstacle_over_V_stall * V_stall
### V_liftoff is the airspeed at the moment of liftoff
V_liftoff = V_obstacle * np.softmax(
(1 + flight_path_angle_climb * 2 ** 0.5) ** -0.5, # From Torenbeek
minimum_V_liftoff_over_V_stall / V_obstacle_over_V_stall,
hardness=1 / 0.01
)
takeoff_effective_friction_coefficient = ( # From Torenbeek, Eq. 5-76; an approximation
friction_coefficient +
0.72 * (CD_zero_lift / CL_max)
)
# From Torenbeek, Eq. 5-74
takeoff_acceleration_g = thrust_over_weight_takeoff - takeoff_effective_friction_coefficient
takeoff_ground_roll_distance = V_liftoff ** 2 / (
2 * g * takeoff_acceleration_g
)
### Compute the airborne distance required to clear the obstacle
# From Torenbeek. Assumes an air maneuver after liftoff with CL=CL_liftoff and constant (thrust - drag).
takeoff_airborne_distance = (
(
V_liftoff ** 2 / (g * 2 ** 0.5)
) + (
obstacle_height / flight_path_angle_climb
)
)
### Compute the total distance required for normal takeoff, including obstacle clearance
takeoff_total_distance = takeoff_ground_roll_distance + takeoff_airborne_distance
##### Balanced field length analysis #####
if n_engines == 1:
# If there is only one engine, the worst time *during the ground roll* for the engine to fail is right at liftoff.
balanced_field_length = takeoff_ground_roll_distance + (
V_liftoff ** 2 / (2 * g * maximum_braking_deceleration_g)
)
flight_path_angle_climb_one_engine_out = -1 / lift_over_drag_climb
else:
### The flight path angle during a climb with one engine inoperative.
flight_path_angle_climb_one_engine_out = (
thrust_over_weight_takeoff * (n_engines - 1) / n_engines
- 1 / lift_over_drag_climb
)
if n_engines == 2:
minimum_allowable_flight_path_angle = 0.024
elif n_engines == 3:
minimum_allowable_flight_path_angle = 0.027
elif n_engines >= 4:
minimum_allowable_flight_path_angle = 0.030
else:
raise ValueError("`n_engines` must be an integer >= 1")
# This is an approximation made by Torenbeek (Eq. 5-90, see citation in docstring)
gamma_bar_takeoff = 0.06 + (flight_path_angle_climb_one_engine_out - minimum_allowable_flight_path_angle)
air_density_ratio = atmosphere.density() / asb.Atmosphere(altitude=0).density()
balanced_field_length = ( # From Torenbeek, Eq. 5-89, modified to have inertia distance scale with V_liftoff
(V_liftoff ** 2 / (2 * g * (1 + gamma_bar_takeoff / maximum_braking_deceleration_g))) *
(1 / takeoff_acceleration_g + 1 / maximum_braking_deceleration_g) *
(1 + (2 * g * obstacle_height) / V_liftoff ** 2) +
inertia_time * V_liftoff
)
# Do a softmax to make sure that the BFL is never shorter than the normal takeoff distance.
balanced_field_length = np.softmax(
balanced_field_length,
takeoff_total_distance,
softness=takeoff_total_distance / 100,
)
##### Landing analysis #####
# The factor of 2 is an approximation factor from Torenbeek, Section 5.4.6
gamma_bar_landing = 2 * np.tand(approach_angle_deg)
### Compute the landing distance
V_approach = V_approach_over_V_stall * V_stall
V_touchdown = V_liftoff
landing_airborne_distance = ( # From Torenbeek
(V_approach ** 2 - V_touchdown ** 2) / (2 * g) + obstacle_height
) / gamma_bar_landing
landing_ground_roll_distance = (
inertia_time * V_touchdown +
V_touchdown ** 2 / (2 * g * maximum_braking_deceleration_g)
)
landing_total_distance = (
landing_airborne_distance +
landing_ground_roll_distance
)
return {
"takeoff_ground_roll_distance" : takeoff_ground_roll_distance,
"takeoff_airborne_distance" : takeoff_airborne_distance,
"takeoff_total_distance" : takeoff_total_distance,
"balanced_field_length" : balanced_field_length,
"landing_airborne_distance" : landing_airborne_distance,
"landing_ground_roll_distance" : landing_ground_roll_distance,
"landing_total_distance" : landing_total_distance,
"V_stall" : V_stall,
"V_liftoff" : V_liftoff,
"V_obstacle" : V_obstacle,
"V_approach" : V_approach,
"V_touchdown" : V_touchdown,
"flight_path_angle_climb" : flight_path_angle_climb,
"flight_path_angle_climb_one_engine_out": flight_path_angle_climb_one_engine_out,
}
def field_length_analysis(
design_mass_TOGW: float,
thrust_at_liftoff: float,
lift_over_drag_climb: float,
CL_max: float,
s_ref: float,
n_engines: int,
V_engine_failure_balanced_field_length: float,
atmosphere: asb.Atmosphere = None,
CD_zero_lift: float = 0.03,
obstacle_height: float = 35 * u.foot,
friction_coefficient: float = 0.02,
minimum_V_liftoff_over_V_stall: float = 1.2,
maximum_braking_deceleration_g: float = 0.37,
inertia_time: float = 2,
approach_angle_deg: float = 3,
) -> Dict[str, float]:
### Set defaults
if atmosphere is None:
atmosphere = asb.Atmosphere(altitude=0)
### Constants
g = 9.81 # m/s^2, gravitational acceleration
### Compute TWR and climb physics
thrust_over_weight_takeoff = thrust_at_liftoff / (design_mass_TOGW * g)
##### Compute various accelerations
acceleration_friction_and_drag = -g * ( # Based on Torenbeek, Eq. 5-76; an approximation
friction_coefficient +
0.72 * (CD_zero_lift / CL_max)
)
acceleration_braking = -g * maximum_braking_deceleration_g
acceleration_engines = thrust_at_liftoff / design_mass_TOGW
acceleration_takeoff = acceleration_engines + acceleration_friction_and_drag
acceleration_coasting = acceleration_friction_and_drag
acceleration_landing = acceleration_braking
##### Normal takeoff analysis #####
### V_stall is the stall speed of the airplane.
V_stall = np.sqrt(
2 * design_mass_TOGW * g / (atmosphere.density() * s_ref * CL_max)
)
### V_liftoff is the airspeed at the moment of liftoff
V_liftoff = minimum_V_liftoff_over_V_stall * V_stall
takeoff_ground_roll_distance = V_liftoff ** 2 / (2 * acceleration_takeoff)
### Compute the airborne distance required to clear the obstacle
flight_path_angle_climb = ( # radians, small angle approximation
thrust_over_weight_takeoff
- 1 / lift_over_drag_climb
)
flight_path_angle_climb = np.softmax(flight_path_angle_climb, 0, softness=0.001)
takeoff_airborne_distance = obstacle_height / flight_path_angle_climb
### Compute the total distance required for normal takeoff, including obstacle clearance
takeoff_total_distance = takeoff_ground_roll_distance + takeoff_airborne_distance
##### Normal landing analysis #####
landing_airborne_distance = obstacle_height / np.tand(approach_angle_deg)
V_touchdown = V_liftoff
landing_ground_roll_distance = (
inertia_time * V_touchdown +
V_touchdown ** 2 / (2 * -acceleration_landing)
)
landing_total_distance = landing_airborne_distance + landing_ground_roll_distance
##### Balanced field length analysis #####
if n_engines == 1:
# If there is only one engine, the worst time *during the ground roll* for the engine to fail is right at liftoff.
balanced_field_length = takeoff_ground_roll_distance + (
V_liftoff ** 2 / (2 * -acceleration_landing)
)
balanced_field_length_accept = balanced_field_length
balanced_field_length_reject = balanced_field_length
else:
acceleration_takeoff_one_engine_out = acceleration_engines * (
n_engines - 1) / n_engines + acceleration_friction_and_drag
### The flight path angle during a climb with one engine inoperative.
flight_path_angle_climb_one_engine_out = (
thrust_over_weight_takeoff * (n_engines - 1) / n_engines
- 1 / lift_over_drag_climb
)
flight_path_angle_climb_one_engine_out = np.softmax(flight_path_angle_climb_one_engine_out, 0, softness=0.001)
balanced_field_length_accept = (
(V_engine_failure_balanced_field_length ** 2 / (2 * acceleration_takeoff)) + # Both engines working
((V_liftoff ** 2 - V_engine_failure_balanced_field_length ** 2) / (
2 * acceleration_takeoff_one_engine_out)) +
(obstacle_height / flight_path_angle_climb_one_engine_out)
)
balanced_field_length_reject = (
(V_engine_failure_balanced_field_length ** 2 / (2 * acceleration_takeoff)) + # Both engines working
(inertia_time * V_engine_failure_balanced_field_length) + # Reaction time for pilot / engines
(V_engine_failure_balanced_field_length ** 2 / (2 * -acceleration_landing)) # Braking time
)
return {
"takeoff_ground_roll_distance" : takeoff_ground_roll_distance,
"takeoff_airborne_distance" : takeoff_airborne_distance,
"takeoff_total_distance" : takeoff_total_distance,
"balanced_field_length_accept" : balanced_field_length_accept,
"balanced_field_length_reject" : balanced_field_length_reject,
"landing_airborne_distance" : landing_airborne_distance,
"landing_ground_roll_distance" : landing_ground_roll_distance,
"landing_total_distance" : landing_total_distance,
"V_stall" : V_stall,
"V_liftoff" : V_liftoff,
"V_touchdown" : V_touchdown,
"flight_path_angle_climb" : flight_path_angle_climb,
"flight_path_angle_climb_one_engine_out": flight_path_angle_climb_one_engine_out,
}
if __name__ == '__main__':
from aerosandbox.tools import units as u
results = field_length_analysis(
design_mass_TOGW=19000 * u.lbm,
thrust_at_liftoff=19000 * u.lbf * 0.3,
lift_over_drag_climb=20,
CL_max=1.9,
s_ref=24,
n_engines=2,
V_engine_failure_balanced_field_length=70,
atmosphere=asb.Atmosphere(altitude=0),
)
results_torenbeek= field_length_analysis_torenbeek(
design_mass_TOGW=19000 * u.lbm,
thrust_at_liftoff=19000 * u.lbf * 0.3,
lift_over_drag_climb=20,
CL_max=1.9,
s_ref=24,
n_engines=2,
atmosphere=asb.Atmosphere(altitude=0),
)
from pprint import pprint
pprint(results) | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/field_lengths.py | field_lengths.py |
import matplotlib.pyplot as plt
import aerosandbox.numpy as np
from aerosandbox.common import *
from aerosandbox.library.aerodynamics.unsteady import *
class TransverseGustPitchControl(ImplicitAnalysis):
"""
An implicit analysis that calculates the optimal pitching maneuver
through a specified transverse gust, with the goal of minimzing the
deviation from a specified lift coefficient. It utilizes differentiable
duhamel superposition integrals for Kussner's gust model and Wagner's
pitching model, as well as any additional lift from the added mass.
Args:
reduced_time (np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time in the unsteady aero library
gust_profile (np.ndarray) : An array that specifies the gust velocity at each reduced time
velocity (float) : The velocity of the aircraft
"""
@ImplicitAnalysis.initialize
def __init__(self,
reduced_time: np.ndarray,
gust_profile: np.ndarray,
velocity: float
):
self.reduced_time = reduced_time
self.gust_profile = gust_profile
self.timesteps = len(reduced_time)
self.velocity = velocity
self._setup_unknowns()
self._enforce_governing_equations()
def _setup_unknowns(self):
self.angles_of_attack = self.opti.variable(init_guess=1, n_vars=self.timesteps)
self.lift_coefficients = self.opti.variable(init_guess=1, n_vars=self.timesteps - 1)
def _enforce_governing_equations(self):
# Calculate unsteady lift due to pitching
wagner = wagners_function(self.reduced_time)
ds = self.reduced_time[1:] - self.reduced_time[:-1]
da_ds = (self.angles_of_attack[1:] - self.angles_of_attack[:-1]) / ds
init_term = self.angles_of_attack[0] * wagner[:-1]
for i in range(self.timesteps - 1):
integral_term = np.sum(da_ds[j] * wagner[i - j] * ds[j] for j in range(i))
self.lift_coefficients[i] = 2 * np.pi * (integral_term + init_term[i])
# Calculate unsteady lift due to transverse gust
kussner = kussners_function(self.reduced_time)
dw_ds = (self.gust_profile[1:] - self.gust_profile[:-1]) / ds
init_term = self.gust_profile[0] * kussner
for i in range(self.timesteps - 1):
integral_term = 0
for j in range(i):
integral_term += dw_ds[j] * kussner[i - j] * ds[j]
self.lift_coefficients[i] += 2 * np.pi / self.velocity * (init_term[i] + integral_term)
# Calculate unsteady lift due to added mass
self.lift_coefficients += np.pi / 2 * np.cos(self.angles_of_attack[:-1]) ** 2 * da_ds
# Integral of lift to be minimized
lift_squared_integral = np.sum(self.lift_coefficients ** 2)
# Constraints and objective to minimize
self.opti.subject_to(self.angles_of_attack[0] == 0)
self.opti.minimize(lift_squared_integral)
def calculate_transients(self):
self.optimal_pitching_profile_rad = self.opti.value(self.angles_of_attack)
self.optimal_pitching_profile_deg = np.rad2deg(self.optimal_pitching_profile_rad)
self.optimal_lift_history = self.opti.value(self.lift_coefficients)
self.pitching_lift = np.zeros(self.timesteps - 1)
# Calculate unsteady lift due to pitching
wagner = wagners_function(self.reduced_time)
ds = self.reduced_time[1:] - self.reduced_time[:-1]
da_ds = (self.optimal_pitching_profile_rad[1:] - self.optimal_pitching_profile_rad[:-1]) / ds
init_term = self.optimal_pitching_profile_rad[0] * wagner[:-1]
for i in range(self.timesteps - 1):
integral_term = np.sum(da_ds[j] * wagner[i - j] * ds[j] for j in range(i))
self.pitching_lift[i] = 2 * np.pi * (integral_term + init_term[i])
self.gust_lift = np.zeros(self.timesteps - 1)
# Calculate unsteady lift due to transverse gust
kussner = kussners_function(self.reduced_time)
dw_ds = (self.gust_profile[1:] - self.gust_profile[:-1]) / ds
init_term = self.gust_profile[0] * kussner
for i in range(self.timesteps - 1):
integral_term = 0
for j in range(i):
integral_term += dw_ds[j] * kussner[i - j] * ds[j]
self.gust_lift[i] += 2 * np.pi / self.velocity * (init_term[i] + integral_term)
# Calculate unsteady lift due to added mass
self.added_mass_lift = np.pi / 2 * np.cos(self.optimal_pitching_profile_rad[:-1]) ** 2 * da_ds
if __name__ == "__main__":
N = 100 # Number of discrete spatial points
time = np.linspace(0, 10, N) # Time in seconds
wing_velocity = 2 # Velocity of wing/aircraft in m/s
chord = 2
reduced_time = calculate_reduced_time(time, wing_velocity, chord)
profile = np.array([top_hat_gust(s) for s in reduced_time])
optimal = TransverseGustPitchControl(reduced_time, profile, wing_velocity)
print("Calculating Transients...")
optimal.calculate_transients()
fig, ax1 = plt.subplots(dpi=300)
ax2 = ax1.twinx()
ax1.plot(reduced_time[:-1], optimal.optimal_lift_history, label="Total Lift", lw=2, c="k")
ax1.plot(reduced_time[:-1], optimal.gust_lift, label="Gust Lift", lw=2)
ax1.plot(reduced_time[:-1], optimal.pitching_lift, label="Pitching Lift", lw=2)
ax1.plot(reduced_time[:-1], optimal.added_mass_lift, label="Added Mass Lift", lw=2)
ax2.plot(reduced_time, optimal.optimal_pitching_profile_deg, label="Angle of attack", lw=2, ls="--")
ax2.set_ylim([-40, 40])
ax1.legend(loc="lower left")
ax2.legend(loc="lower right")
ax1.set_xlabel("Reduced time")
ax1.set_ylabel("$C_\ell$")
ax2.set_ylabel("Angle of attack, degrees")
plt.title("Optimal Pitch Maneuver Through Top-Hat Gust")
plt.show() | AeroSandbox | /AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/gust_pitch_control.py | gust_pitch_control.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.