body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
07abdaaac9bc820128716e92b7d86418c967fbc247feb2d85e9ee2e50334af53 | @staticmethod
def count_orders(orders):
'Return count of orders.'
return len(orders) | Return count of orders. | shopify_alexa.py | count_orders | johntelforduk/shopify-alexa-skill | 0 | python | @staticmethod
def count_orders(orders):
return len(orders) | @staticmethod
def count_orders(orders):
return len(orders)<|docstring|>Return count of orders.<|endoftext|> |
04477885aa8e59e6ca9ebec63e40f911181a46943768be28f99db4b2c9838322 | def gross_sales(self, target_date: str) -> float:
'Return the gross amount of sales in the shop currency on parm day.\n Sales are returned as both an integer and a formatted string.'
total = 0.0
orders = self.orders_on_date(target_date)
for each_order in orders:
total += float(each_order['total_price'])
return total | Return the gross amount of sales in the shop currency on parm day.
Sales are returned as both an integer and a formatted string. | shopify_alexa.py | gross_sales | johntelforduk/shopify-alexa-skill | 0 | python | def gross_sales(self, target_date: str) -> float:
'Return the gross amount of sales in the shop currency on parm day.\n Sales are returned as both an integer and a formatted string.'
total = 0.0
orders = self.orders_on_date(target_date)
for each_order in orders:
total += float(each_order['total_price'])
return total | def gross_sales(self, target_date: str) -> float:
'Return the gross amount of sales in the shop currency on parm day.\n Sales are returned as both an integer and a formatted string.'
total = 0.0
orders = self.orders_on_date(target_date)
for each_order in orders:
total += float(each_order['total_price'])
return total<|docstring|>Return the gross amount of sales in the shop currency on parm day.
Sales are returned as both an integer and a formatted string.<|endoftext|> |
30c9ed48353a84b8a96c89861118cc550864f31c94263caa46df3d700d5ba366 | def date_as_str(self, delta_days: int) -> str:
'Return a date relative to today as a string in yyyy-mm-dd format.'
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
debug('Skill.date_as_str : utc_now = {} local_now = {}'.format(utc_now, local_now))
required_date = (local_now + timedelta(days=delta_days))
return required_date.strftime('%Y-%m-%d') | Return a date relative to today as a string in yyyy-mm-dd format. | shopify_alexa.py | date_as_str | johntelforduk/shopify-alexa-skill | 0 | python | def date_as_str(self, delta_days: int) -> str:
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
debug('Skill.date_as_str : utc_now = {} local_now = {}'.format(utc_now, local_now))
required_date = (local_now + timedelta(days=delta_days))
return required_date.strftime('%Y-%m-%d') | def date_as_str(self, delta_days: int) -> str:
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
debug('Skill.date_as_str : utc_now = {} local_now = {}'.format(utc_now, local_now))
required_date = (local_now + timedelta(days=delta_days))
return required_date.strftime('%Y-%m-%d')<|docstring|>Return a date relative to today as a string in yyyy-mm-dd format.<|endoftext|> |
23fbd499ce0e5613a562f55358a8dcbe4d61edf5572b434707e1ab2274091df6 | def today_str(self) -> str:
"Return today's date as a string in format yyyy-mm-dd."
return self.date_as_str(delta_days=0) | Return today's date as a string in format yyyy-mm-dd. | shopify_alexa.py | today_str | johntelforduk/shopify-alexa-skill | 0 | python | def today_str(self) -> str:
return self.date_as_str(delta_days=0) | def today_str(self) -> str:
return self.date_as_str(delta_days=0)<|docstring|>Return today's date as a string in format yyyy-mm-dd.<|endoftext|> |
c069993b1c2c476e8b61dcbdde0397b0b3cafdb3164ccb7a4f50f27dafe88921 | def yesterday_str(self) -> str:
"Return yesterday's date as a string in format yyyy-mm-dd."
return self.date_as_str(delta_days=(- 1)) | Return yesterday's date as a string in format yyyy-mm-dd. | shopify_alexa.py | yesterday_str | johntelforduk/shopify-alexa-skill | 0 | python | def yesterday_str(self) -> str:
return self.date_as_str(delta_days=(- 1)) | def yesterday_str(self) -> str:
return self.date_as_str(delta_days=(- 1))<|docstring|>Return yesterday's date as a string in format yyyy-mm-dd.<|endoftext|> |
fcda8c925cf74a2c874ec61de646be0660f0a0e791576e82ccc9a5484be9c92d | def formatted_money(self, money: float) -> (int, str):
'Return parm real as a tuple (integer amount of the money, string of money).\n The string includes the currency symbol.'
total_as_str = '{:,.2f}'.format(round(money, 2))
sales_str = self.shop.money_format.replace('{{amount}}', total_as_str)
return (int(money), sales_str) | Return parm real as a tuple (integer amount of the money, string of money).
The string includes the currency symbol. | shopify_alexa.py | formatted_money | johntelforduk/shopify-alexa-skill | 0 | python | def formatted_money(self, money: float) -> (int, str):
'Return parm real as a tuple (integer amount of the money, string of money).\n The string includes the currency symbol.'
total_as_str = '{:,.2f}'.format(round(money, 2))
sales_str = self.shop.money_format.replace('{{amount}}', total_as_str)
return (int(money), sales_str) | def formatted_money(self, money: float) -> (int, str):
'Return parm real as a tuple (integer amount of the money, string of money).\n The string includes the currency symbol.'
total_as_str = '{:,.2f}'.format(round(money, 2))
sales_str = self.shop.money_format.replace('{{amount}}', total_as_str)
return (int(money), sales_str)<|docstring|>Return parm real as a tuple (integer amount of the money, string of money).
The string includes the currency symbol.<|endoftext|> |
28694aaa3e3f98e6b00b06597cb22af42bdc09edbb430df59b11dd1703998029 | def number_orders_today(self) -> str:
'Return a string saying how many orders there have been today so far.'
orders = self.shop.orders_on_date(self.today_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You have had no orders yet today.'
elif (num_orders == 1):
return 'You have had 1 order so far today.'
else:
return 'You have had {} orders so far today.'.format(num_orders) | Return a string saying how many orders there have been today so far. | shopify_alexa.py | number_orders_today | johntelforduk/shopify-alexa-skill | 0 | python | def number_orders_today(self) -> str:
orders = self.shop.orders_on_date(self.today_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You have had no orders yet today.'
elif (num_orders == 1):
return 'You have had 1 order so far today.'
else:
return 'You have had {} orders so far today.'.format(num_orders) | def number_orders_today(self) -> str:
orders = self.shop.orders_on_date(self.today_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You have had no orders yet today.'
elif (num_orders == 1):
return 'You have had 1 order so far today.'
else:
return 'You have had {} orders so far today.'.format(num_orders)<|docstring|>Return a string saying how many orders there have been today so far.<|endoftext|> |
ee7aed67735405947ada704f3198425f0c7215c321fb9d5e7ccfb246c9b89fbf | def number_orders_yesterday(self) -> str:
'Return a string saying how many orders there were yesterday.'
orders = self.shop.orders_on_date(self.yesterday_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You had no orders yesterday.'
elif (num_orders == 1):
return 'You had 1 order yesterday.'
else:
return 'You had {} orders yesterday.'.format(num_orders) | Return a string saying how many orders there were yesterday. | shopify_alexa.py | number_orders_yesterday | johntelforduk/shopify-alexa-skill | 0 | python | def number_orders_yesterday(self) -> str:
orders = self.shop.orders_on_date(self.yesterday_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You had no orders yesterday.'
elif (num_orders == 1):
return 'You had 1 order yesterday.'
else:
return 'You had {} orders yesterday.'.format(num_orders) | def number_orders_yesterday(self) -> str:
orders = self.shop.orders_on_date(self.yesterday_str())
num_orders = self.shop.count_orders(orders)
if (num_orders == 0):
return 'You had no orders yesterday.'
elif (num_orders == 1):
return 'You had 1 order yesterday.'
else:
return 'You had {} orders yesterday.'.format(num_orders)<|docstring|>Return a string saying how many orders there were yesterday.<|endoftext|> |
74fe2af2023842d139b656fbbffd4d88999dae36cac9e67637db3a4ea35631ad | def gross_sales_today(self) -> str:
'Return a string saying what the gross sales are today so far.'
sales = self.shop.gross_sales(self.today_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yet today.'
else:
return 'Gross sales so far today are {}'.format(sales_str) | Return a string saying what the gross sales are today so far. | shopify_alexa.py | gross_sales_today | johntelforduk/shopify-alexa-skill | 0 | python | def gross_sales_today(self) -> str:
sales = self.shop.gross_sales(self.today_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yet today.'
else:
return 'Gross sales so far today are {}'.format(sales_str) | def gross_sales_today(self) -> str:
sales = self.shop.gross_sales(self.today_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yet today.'
else:
return 'Gross sales so far today are {}'.format(sales_str)<|docstring|>Return a string saying what the gross sales are today so far.<|endoftext|> |
bc3186550cc23c494f8f9933c70736fc89f811212db718dc79a353ec0a2dc288 | def gross_sales_yesterday(self) -> str:
'Return a string saying what the gross sales were yesterday.'
sales = self.shop.gross_sales(self.yesterday_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yesterday.'
else:
return 'Gross sales yesterday were {}'.format(sales_str) | Return a string saying what the gross sales were yesterday. | shopify_alexa.py | gross_sales_yesterday | johntelforduk/shopify-alexa-skill | 0 | python | def gross_sales_yesterday(self) -> str:
sales = self.shop.gross_sales(self.yesterday_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yesterday.'
else:
return 'Gross sales yesterday were {}'.format(sales_str) | def gross_sales_yesterday(self) -> str:
sales = self.shop.gross_sales(self.yesterday_str())
(sales_int, sales_str) = self.formatted_money(sales)
if (sales_int == 0):
return 'No sales yesterday.'
else:
return 'Gross sales yesterday were {}'.format(sales_str)<|docstring|>Return a string saying what the gross sales were yesterday.<|endoftext|> |
a615c70d38e63e8e4b458967582898b9953c4ff792256b64461963642553fb42 | def most_recent_order(self) -> str:
'Return a sting with details of the most recent order.'
if (len(self.shop.orders) == 0):
return 'There are no recent orders.'
else:
order = self.shop.orders[0]
money = float(order['total_price'])
(_, money_str) = self.formatted_money(money)
datetime_format1 = '%Y-%m-%d %H:%M:%S'
datetime_format2 = '%Y-%m-%dT%H:%M:%S'
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
local_now_str = str(local_now)
now_dt = datetime.strptime(local_now_str[0:19], datetime_format1)
debug('Skill.most_recent_order : utc_now = {} local_now = {} local_now_str = {} now_dt = {}'.format(utc_now, local_now, local_now_str, now_dt))
order_time_str = order['created_at']
order_dt = datetime.strptime(order_time_str[0:19], datetime_format2)
debug('Skill.most_recent_order : order_time_str = {} order_dt = {}'.format(order_time_str, order_dt))
diff = (now_dt - order_dt)
diff_mins = int((diff.seconds / 60))
hours = (diff_mins // 60)
mins = (diff_mins % 60)
debug('Skill.most_recent_order : diff = {} dif_mins = {} hours = {} mins = {}'.format(diff, diff_mins, hours, mins))
if (diff_mins <= 2):
return ('The most recent order was just now for ' + money_str)
elif (hours == 0):
return ((('The most recent order was ' + str(diff_mins)) + ' minutes ago for ') + money_str)
elif (hours == 1):
if (mins == 0):
return ('The most recent order was exactly 1 hour ago for ' + money_str)
if (mins == 1):
return ('The most recent order was 1 hour and 1 minute ago for ' + money_str)
else:
return ((('The most recent order was 1 hour and ' + str(mins)) + ' minutes ago for ') + money_str)
elif (mins == 0):
return ((('The most recent order was exactly ' + str(hours)) + ' hours ago for ') + money_str)
elif (mins == 1):
return ((('The most recent order was ' + str(hours)) + ' hours and 1 minute ago for ') + money_str)
else:
return ((((('The most recent order was ' + str(hours)) + ' hours and ') + str(mins)) + ' minutes ago for ') + money_str) | Return a sting with details of the most recent order. | shopify_alexa.py | most_recent_order | johntelforduk/shopify-alexa-skill | 0 | python | def most_recent_order(self) -> str:
if (len(self.shop.orders) == 0):
return 'There are no recent orders.'
else:
order = self.shop.orders[0]
money = float(order['total_price'])
(_, money_str) = self.formatted_money(money)
datetime_format1 = '%Y-%m-%d %H:%M:%S'
datetime_format2 = '%Y-%m-%dT%H:%M:%S'
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
local_now_str = str(local_now)
now_dt = datetime.strptime(local_now_str[0:19], datetime_format1)
debug('Skill.most_recent_order : utc_now = {} local_now = {} local_now_str = {} now_dt = {}'.format(utc_now, local_now, local_now_str, now_dt))
order_time_str = order['created_at']
order_dt = datetime.strptime(order_time_str[0:19], datetime_format2)
debug('Skill.most_recent_order : order_time_str = {} order_dt = {}'.format(order_time_str, order_dt))
diff = (now_dt - order_dt)
diff_mins = int((diff.seconds / 60))
hours = (diff_mins // 60)
mins = (diff_mins % 60)
debug('Skill.most_recent_order : diff = {} dif_mins = {} hours = {} mins = {}'.format(diff, diff_mins, hours, mins))
if (diff_mins <= 2):
return ('The most recent order was just now for ' + money_str)
elif (hours == 0):
return ((('The most recent order was ' + str(diff_mins)) + ' minutes ago for ') + money_str)
elif (hours == 1):
if (mins == 0):
return ('The most recent order was exactly 1 hour ago for ' + money_str)
if (mins == 1):
return ('The most recent order was 1 hour and 1 minute ago for ' + money_str)
else:
return ((('The most recent order was 1 hour and ' + str(mins)) + ' minutes ago for ') + money_str)
elif (mins == 0):
return ((('The most recent order was exactly ' + str(hours)) + ' hours ago for ') + money_str)
elif (mins == 1):
return ((('The most recent order was ' + str(hours)) + ' hours and 1 minute ago for ') + money_str)
else:
return ((((('The most recent order was ' + str(hours)) + ' hours and ') + str(mins)) + ' minutes ago for ') + money_str) | def most_recent_order(self) -> str:
if (len(self.shop.orders) == 0):
return 'There are no recent orders.'
else:
order = self.shop.orders[0]
money = float(order['total_price'])
(_, money_str) = self.formatted_money(money)
datetime_format1 = '%Y-%m-%d %H:%M:%S'
datetime_format2 = '%Y-%m-%dT%H:%M:%S'
utc_now = datetime.now(pytz.utc)
local_now = utc_now.astimezone(pytz.timezone(self.server_timezone))
local_now_str = str(local_now)
now_dt = datetime.strptime(local_now_str[0:19], datetime_format1)
debug('Skill.most_recent_order : utc_now = {} local_now = {} local_now_str = {} now_dt = {}'.format(utc_now, local_now, local_now_str, now_dt))
order_time_str = order['created_at']
order_dt = datetime.strptime(order_time_str[0:19], datetime_format2)
debug('Skill.most_recent_order : order_time_str = {} order_dt = {}'.format(order_time_str, order_dt))
diff = (now_dt - order_dt)
diff_mins = int((diff.seconds / 60))
hours = (diff_mins // 60)
mins = (diff_mins % 60)
debug('Skill.most_recent_order : diff = {} dif_mins = {} hours = {} mins = {}'.format(diff, diff_mins, hours, mins))
if (diff_mins <= 2):
return ('The most recent order was just now for ' + money_str)
elif (hours == 0):
return ((('The most recent order was ' + str(diff_mins)) + ' minutes ago for ') + money_str)
elif (hours == 1):
if (mins == 0):
return ('The most recent order was exactly 1 hour ago for ' + money_str)
if (mins == 1):
return ('The most recent order was 1 hour and 1 minute ago for ' + money_str)
else:
return ((('The most recent order was 1 hour and ' + str(mins)) + ' minutes ago for ') + money_str)
elif (mins == 0):
return ((('The most recent order was exactly ' + str(hours)) + ' hours ago for ') + money_str)
elif (mins == 1):
return ((('The most recent order was ' + str(hours)) + ' hours and 1 minute ago for ') + money_str)
else:
return ((((('The most recent order was ' + str(hours)) + ' hours and ') + str(mins)) + ' minutes ago for ') + money_str)<|docstring|>Return a sting with details of the most recent order.<|endoftext|> |
3f22e68a306e5f841d27c333c6e891775838e763d0cc834c3b8529031a53b847 | def taxstring(tid, verbose=False):
'\n\n :param tid: taxonomy ID\n :param verbose: more output\n :return: an array of the taxnomy from kingdom -> species\n '
global taxa
if (tid in taxa):
return taxa[tid]
want = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
thistaxa = ['', '', '', '', '', '', '']
c = get_taxonomy_db()
try:
(m, n) = get_taxonomy(tid, c)
except EntryNotInDatabaseError:
sys.stderr.write(f'''{bcolors.RED}{tid} not in database.Skipped line{bcolors.ENDC}
''')
taxa[tid] = thistaxa
return taxa[tid]
thisname = choosename(n, verbose)
if thisname:
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
for p in taxonomy_hierarchy(tid, verbose=False):
(m, n) = get_taxonomy(p, c)
thisname = choosename(n, verbose)
if (not thisname):
sys.stderr.write(f'''{bcolors.RED}ERROR: No name for {tid}{bcolors.ENDC}
''')
return
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
taxa[tid] = thistaxa
return taxa[tid] | :param tid: taxonomy ID
:param verbose: more output
:return: an array of the taxnomy from kingdom -> species | ncbi/blast2taxonomy_col.py | taxstring | johned0/EdwardsLab | 30 | python | def taxstring(tid, verbose=False):
'\n\n :param tid: taxonomy ID\n :param verbose: more output\n :return: an array of the taxnomy from kingdom -> species\n '
global taxa
if (tid in taxa):
return taxa[tid]
want = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
thistaxa = [, , , , , , ]
c = get_taxonomy_db()
try:
(m, n) = get_taxonomy(tid, c)
except EntryNotInDatabaseError:
sys.stderr.write(f'{bcolors.RED}{tid} not in database.Skipped line{bcolors.ENDC}
')
taxa[tid] = thistaxa
return taxa[tid]
thisname = choosename(n, verbose)
if thisname:
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
for p in taxonomy_hierarchy(tid, verbose=False):
(m, n) = get_taxonomy(p, c)
thisname = choosename(n, verbose)
if (not thisname):
sys.stderr.write(f'{bcolors.RED}ERROR: No name for {tid}{bcolors.ENDC}
')
return
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
taxa[tid] = thistaxa
return taxa[tid] | def taxstring(tid, verbose=False):
'\n\n :param tid: taxonomy ID\n :param verbose: more output\n :return: an array of the taxnomy from kingdom -> species\n '
global taxa
if (tid in taxa):
return taxa[tid]
want = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
thistaxa = [, , , , , , ]
c = get_taxonomy_db()
try:
(m, n) = get_taxonomy(tid, c)
except EntryNotInDatabaseError:
sys.stderr.write(f'{bcolors.RED}{tid} not in database.Skipped line{bcolors.ENDC}
')
taxa[tid] = thistaxa
return taxa[tid]
thisname = choosename(n, verbose)
if thisname:
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
for p in taxonomy_hierarchy(tid, verbose=False):
(m, n) = get_taxonomy(p, c)
thisname = choosename(n, verbose)
if (not thisname):
sys.stderr.write(f'{bcolors.RED}ERROR: No name for {tid}{bcolors.ENDC}
')
return
if (m.rank in want):
thistaxa[want.index(m.rank)] = (thisname[0].upper() + thisname[1:])
taxa[tid] = thistaxa
return taxa[tid]<|docstring|>:param tid: taxonomy ID
:param verbose: more output
:return: an array of the taxnomy from kingdom -> species<|endoftext|> |
b1833bdae9d68340dcd2da66430887adeb2309224a09e4d06fc913a1620729c6 | def parse_blast(bf, taxcol, verbose=False):
'\n\n :param bf: the blast output file\n :param taxcol: the column that contains the taxonomy ID\n :param verbose: more output\n :return:\n '
lastcol = (- 1)
if bf.endswith('.gz'):
f = gzip.open(bf, 'rt')
else:
f = open(bf, 'r')
for l in f:
p = l.strip().split('\t')
if (lastcol == (- 1)):
lastcol = len(p)
if (len(p) != lastcol):
sys.stderr.write(f'''{bcolors.RED}FATAL: Uneven number of columns. We had {lastcol} but now {len(p)}
''')
sys.exit((- 1))
t = taxstring(p[taxcol], verbose)
print('\t'.join((p + t)))
f.close() | :param bf: the blast output file
:param taxcol: the column that contains the taxonomy ID
:param verbose: more output
:return: | ncbi/blast2taxonomy_col.py | parse_blast | johned0/EdwardsLab | 30 | python | def parse_blast(bf, taxcol, verbose=False):
'\n\n :param bf: the blast output file\n :param taxcol: the column that contains the taxonomy ID\n :param verbose: more output\n :return:\n '
lastcol = (- 1)
if bf.endswith('.gz'):
f = gzip.open(bf, 'rt')
else:
f = open(bf, 'r')
for l in f:
p = l.strip().split('\t')
if (lastcol == (- 1)):
lastcol = len(p)
if (len(p) != lastcol):
sys.stderr.write(f'{bcolors.RED}FATAL: Uneven number of columns. We had {lastcol} but now {len(p)}
')
sys.exit((- 1))
t = taxstring(p[taxcol], verbose)
print('\t'.join((p + t)))
f.close() | def parse_blast(bf, taxcol, verbose=False):
'\n\n :param bf: the blast output file\n :param taxcol: the column that contains the taxonomy ID\n :param verbose: more output\n :return:\n '
lastcol = (- 1)
if bf.endswith('.gz'):
f = gzip.open(bf, 'rt')
else:
f = open(bf, 'r')
for l in f:
p = l.strip().split('\t')
if (lastcol == (- 1)):
lastcol = len(p)
if (len(p) != lastcol):
sys.stderr.write(f'{bcolors.RED}FATAL: Uneven number of columns. We had {lastcol} but now {len(p)}
')
sys.exit((- 1))
t = taxstring(p[taxcol], verbose)
print('\t'.join((p + t)))
f.close()<|docstring|>:param bf: the blast output file
:param taxcol: the column that contains the taxonomy ID
:param verbose: more output
:return:<|endoftext|> |
3a55da6f6b0b559e2f7870b4fe086a8d3c78efd78369cc035a65d9d363de93db | @weak_script
def ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the structural similarity index (SSIM) error.\n See :class:`~torch.nn.SSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
(ret, _) = _ssim(input, target, max_val, k1, k2, channel, kernel)
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret | ssim_loss(input, target, max_val, filter_size, k1, k2,
sigma, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the structural similarity index (SSIM) error.
See :class:`~torch.nn.SSIMLoss` for details. | metrics/my_ssim.py | ssim_loss | veritas9872/fastMRI-kspace | 18 | python | @weak_script
def ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the structural similarity index (SSIM) error.\n See :class:`~torch.nn.SSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
(ret, _) = _ssim(input, target, max_val, k1, k2, channel, kernel)
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret | @weak_script
def ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the structural similarity index (SSIM) error.\n See :class:`~torch.nn.SSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
(ret, _) = _ssim(input, target, max_val, k1, k2, channel, kernel)
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret<|docstring|>ssim_loss(input, target, max_val, filter_size, k1, k2,
sigma, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the structural similarity index (SSIM) error.
See :class:`~torch.nn.SSIMLoss` for details.<|endoftext|> |
38bc87e5154cf0773ad0415cabe8ae22e714b2dc8cf5f9802348b3ba91f0592c | def ms_ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ms_ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the multi-scale structural similarity index (MS-SSIM) error.\n See :class:`~torch.nn.MSSSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], device=input.device)
weights = weights.unsqueeze((- 1)).unsqueeze((- 1))
levels = weights.size(0)
mssim = list()
mcs = list()
for _ in range(levels):
(ssim, cs) = _ssim(input, target, max_val, k1, k2, channel, kernel)
ssim = ssim.mean((2, 3))
cs = cs.mean((2, 3))
mssim.append(ssim)
mcs.append(cs)
input = avg_pool2d(input, (2, 2))
target = avg_pool2d(target, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
p1 = (mcs ** weights)
p2 = (mssim ** weights)
ret = (torch.prod(p1[:(- 1)], 0) * p2[(- 1)])
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret | ms_ssim_loss(input, target, max_val, filter_size, k1, k2,
sigma, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the multi-scale structural similarity index (MS-SSIM) error.
See :class:`~torch.nn.MSSSIMLoss` for details. | metrics/my_ssim.py | ms_ssim_loss | veritas9872/fastMRI-kspace | 18 | python | def ms_ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ms_ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the multi-scale structural similarity index (MS-SSIM) error.\n See :class:`~torch.nn.MSSSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], device=input.device)
weights = weights.unsqueeze((- 1)).unsqueeze((- 1))
levels = weights.size(0)
mssim = list()
mcs = list()
for _ in range(levels):
(ssim, cs) = _ssim(input, target, max_val, k1, k2, channel, kernel)
ssim = ssim.mean((2, 3))
cs = cs.mean((2, 3))
mssim.append(ssim)
mcs.append(cs)
input = avg_pool2d(input, (2, 2))
target = avg_pool2d(target, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
p1 = (mcs ** weights)
p2 = (mssim ** weights)
ret = (torch.prod(p1[:(- 1)], 0) * p2[(- 1)])
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret | def ms_ssim_loss(input, target, max_val, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, size_average=None, reduce=None, reduction='mean'):
"ms_ssim_loss(input, target, max_val, filter_size, k1, k2,\n sigma, size_average=None, reduce=None, reduction='mean') -> Tensor\n Measures the multi-scale structural similarity index (MS-SSIM) error.\n See :class:`~torch.nn.MSSSIMLoss` for details.\n "
if (input.size() != target.size()):
raise ValueError('Expected input size ({}) to match target size ({}).'.format(input.size(0), target.size(0)))
if (input.device != target.device):
raise RuntimeError(f'The input device {input.device} and target device {target.device} do not match.')
dim = input.dim()
if (dim == 2):
input = input.expand(1, 1, input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, 1, target.dim((- 2)), target.dim((- 1)))
elif (dim == 3):
input = input.expand(1, input.dim((- 3)), input.dim((- 2)), input.dim((- 1)))
target = target.expand(1, target.dim((- 3)), target.dim((- 2)), target.dim((- 1)))
elif (dim != 4):
raise ValueError('Expected 2, 3, or 4 dimensions (got {})'.format(dim))
if ((size_average is not None) or (reduce is not None)):
reduction = _Reduction.legacy_get_string(size_average, reduce)
channel = input.size(1)
kernel = _fspecial_gaussian(filter_size, channel, sigma, input.device)
weights = torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], device=input.device)
weights = weights.unsqueeze((- 1)).unsqueeze((- 1))
levels = weights.size(0)
mssim = list()
mcs = list()
for _ in range(levels):
(ssim, cs) = _ssim(input, target, max_val, k1, k2, channel, kernel)
ssim = ssim.mean((2, 3))
cs = cs.mean((2, 3))
mssim.append(ssim)
mcs.append(cs)
input = avg_pool2d(input, (2, 2))
target = avg_pool2d(target, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
p1 = (mcs ** weights)
p2 = (mssim ** weights)
ret = (torch.prod(p1[:(- 1)], 0) * p2[(- 1)])
if (reduction != 'none'):
ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret))
return ret<|docstring|>ms_ssim_loss(input, target, max_val, filter_size, k1, k2,
sigma, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the multi-scale structural similarity index (MS-SSIM) error.
See :class:`~torch.nn.MSSSIMLoss` for details.<|endoftext|> |
8de0197d1f089d600596c29f44a4c0bd0fb3772f5e6f2e15ba8f793d7e4f2c8d | def dfs_impl(graph, cur_vertex, path_marked, marked, cycles, cur_path):
'\n plain depth first search implementation function.\n\n :param cur_vertex: currently processed vertex\n :param path_marked: list of booleans that defines whether a vertex is\n a part of path that connects current vertex and vertex dfs algo started\n with\n :param marked: visited vertices\n :param cycles: cycles detected\n :param cur_path: path to particular vertex from starting point\n :rtype cur_vertex: int\n :rtype path_marked: list<int>\n :rtype marked: list<int>\n :rtype cycles: list<list<int> >\n :rtype cur_path: list <int>\n :returns: if cur_vertex is a part of cycle\n :rtype: boolean\n '
result = False
cur_path.append(cur_vertex)
for next_vertex in graph.get_connected(cur_vertex):
if path_marked[next_vertex]:
cycles.append(([next_vertex], []))
path_marked[next_vertex] = False
result = True
if (not marked[next_vertex]):
path_marked[next_vertex] = True
marked[next_vertex] = True
if dfs_impl(graph, next_vertex, path_marked, marked, cycles, cur_path):
cycle = cycles[(- 1)][0]
if (cycle[0] != next_vertex):
path_marked[next_vertex] = False
cycle.append(next_vertex)
result = True
break
else:
cycles[(- 1)][1].extend(cur_path)
path_marked[next_vertex] = False
cur_path.pop()
return result | plain depth first search implementation function.
:param cur_vertex: currently processed vertex
:param path_marked: list of booleans that defines whether a vertex is
a part of path that connects current vertex and vertex dfs algo started
with
:param marked: visited vertices
:param cycles: cycles detected
:param cur_path: path to particular vertex from starting point
:rtype cur_vertex: int
:rtype path_marked: list<int>
:rtype marked: list<int>
:rtype cycles: list<list<int> >
:rtype cur_path: list <int>
:returns: if cur_vertex is a part of cycle
:rtype: boolean | inclusion_analysis/graph.py | dfs_impl | Andrey-Dubas/inclusion_analysis | 0 | python | def dfs_impl(graph, cur_vertex, path_marked, marked, cycles, cur_path):
'\n plain depth first search implementation function.\n\n :param cur_vertex: currently processed vertex\n :param path_marked: list of booleans that defines whether a vertex is\n a part of path that connects current vertex and vertex dfs algo started\n with\n :param marked: visited vertices\n :param cycles: cycles detected\n :param cur_path: path to particular vertex from starting point\n :rtype cur_vertex: int\n :rtype path_marked: list<int>\n :rtype marked: list<int>\n :rtype cycles: list<list<int> >\n :rtype cur_path: list <int>\n :returns: if cur_vertex is a part of cycle\n :rtype: boolean\n '
result = False
cur_path.append(cur_vertex)
for next_vertex in graph.get_connected(cur_vertex):
if path_marked[next_vertex]:
cycles.append(([next_vertex], []))
path_marked[next_vertex] = False
result = True
if (not marked[next_vertex]):
path_marked[next_vertex] = True
marked[next_vertex] = True
if dfs_impl(graph, next_vertex, path_marked, marked, cycles, cur_path):
cycle = cycles[(- 1)][0]
if (cycle[0] != next_vertex):
path_marked[next_vertex] = False
cycle.append(next_vertex)
result = True
break
else:
cycles[(- 1)][1].extend(cur_path)
path_marked[next_vertex] = False
cur_path.pop()
return result | def dfs_impl(graph, cur_vertex, path_marked, marked, cycles, cur_path):
'\n plain depth first search implementation function.\n\n :param cur_vertex: currently processed vertex\n :param path_marked: list of booleans that defines whether a vertex is\n a part of path that connects current vertex and vertex dfs algo started\n with\n :param marked: visited vertices\n :param cycles: cycles detected\n :param cur_path: path to particular vertex from starting point\n :rtype cur_vertex: int\n :rtype path_marked: list<int>\n :rtype marked: list<int>\n :rtype cycles: list<list<int> >\n :rtype cur_path: list <int>\n :returns: if cur_vertex is a part of cycle\n :rtype: boolean\n '
result = False
cur_path.append(cur_vertex)
for next_vertex in graph.get_connected(cur_vertex):
if path_marked[next_vertex]:
cycles.append(([next_vertex], []))
path_marked[next_vertex] = False
result = True
if (not marked[next_vertex]):
path_marked[next_vertex] = True
marked[next_vertex] = True
if dfs_impl(graph, next_vertex, path_marked, marked, cycles, cur_path):
cycle = cycles[(- 1)][0]
if (cycle[0] != next_vertex):
path_marked[next_vertex] = False
cycle.append(next_vertex)
result = True
break
else:
cycles[(- 1)][1].extend(cur_path)
path_marked[next_vertex] = False
cur_path.pop()
return result<|docstring|>plain depth first search implementation function.
:param cur_vertex: currently processed vertex
:param path_marked: list of booleans that defines whether a vertex is
a part of path that connects current vertex and vertex dfs algo started
with
:param marked: visited vertices
:param cycles: cycles detected
:param cur_path: path to particular vertex from starting point
:rtype cur_vertex: int
:rtype path_marked: list<int>
:rtype marked: list<int>
:rtype cycles: list<list<int> >
:rtype cur_path: list <int>
:returns: if cur_vertex is a part of cycle
:rtype: boolean<|endoftext|> |
314bdfccb97ed0ec6a4c819a68699663654f8f51302773a57625835351eeddc4 | def cycle_detect(graph, root_vertex):
'\n cycle detection function\n\n :param graph: processed graph\n :param root_vertex: a vertex to start processing with\n :type graph: graph\n :type root_vertex: root_vertex\n :return: a list of pairs that combine a cycle detected and a\n path to a vertex cycle starts with\n :rtype: list<(list, list)>\n\n '
path_marked = ([False] * len(graph))
marked = ([False] * len(graph))
cycles = []
dfs_impl(graph, root_vertex, path_marked, marked, cycles, [])
return cycles | cycle detection function
:param graph: processed graph
:param root_vertex: a vertex to start processing with
:type graph: graph
:type root_vertex: root_vertex
:return: a list of pairs that combine a cycle detected and a
path to a vertex cycle starts with
:rtype: list<(list, list)> | inclusion_analysis/graph.py | cycle_detect | Andrey-Dubas/inclusion_analysis | 0 | python | def cycle_detect(graph, root_vertex):
'\n cycle detection function\n\n :param graph: processed graph\n :param root_vertex: a vertex to start processing with\n :type graph: graph\n :type root_vertex: root_vertex\n :return: a list of pairs that combine a cycle detected and a\n path to a vertex cycle starts with\n :rtype: list<(list, list)>\n\n '
path_marked = ([False] * len(graph))
marked = ([False] * len(graph))
cycles = []
dfs_impl(graph, root_vertex, path_marked, marked, cycles, [])
return cycles | def cycle_detect(graph, root_vertex):
'\n cycle detection function\n\n :param graph: processed graph\n :param root_vertex: a vertex to start processing with\n :type graph: graph\n :type root_vertex: root_vertex\n :return: a list of pairs that combine a cycle detected and a\n path to a vertex cycle starts with\n :rtype: list<(list, list)>\n\n '
path_marked = ([False] * len(graph))
marked = ([False] * len(graph))
cycles = []
dfs_impl(graph, root_vertex, path_marked, marked, cycles, [])
return cycles<|docstring|>cycle detection function
:param graph: processed graph
:param root_vertex: a vertex to start processing with
:type graph: graph
:type root_vertex: root_vertex
:return: a list of pairs that combine a cycle detected and a
path to a vertex cycle starts with
:rtype: list<(list, list)><|endoftext|> |
0868527f38fd025905ed8edf316d8c1bf0b38b77aec6fd1853311ed4267b8d84 | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation (directed edge) between vertices.\n from_vertex -> to_vertex\n\n :param from_vertex: index of vertex that edge goes from\n :param to_vertex: index of vertex that edge goes to\n :type from_vertex: int\n :type to_vertex: int\n :return: None\n '
if (not self.__vertices.has_key(from_vertex)):
self.add_vertex(from_vertex)
if (not self.__vertices.has_key(to_vertex)):
self.add_vertex(to_vertex)
self.__vertices[from_vertex].append(to_vertex) | sets a one-direction relation (directed edge) between vertices.
from_vertex -> to_vertex
:param from_vertex: index of vertex that edge goes from
:param to_vertex: index of vertex that edge goes to
:type from_vertex: int
:type to_vertex: int
:return: None | inclusion_analysis/graph.py | connect | Andrey-Dubas/inclusion_analysis | 0 | python | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation (directed edge) between vertices.\n from_vertex -> to_vertex\n\n :param from_vertex: index of vertex that edge goes from\n :param to_vertex: index of vertex that edge goes to\n :type from_vertex: int\n :type to_vertex: int\n :return: None\n '
if (not self.__vertices.has_key(from_vertex)):
self.add_vertex(from_vertex)
if (not self.__vertices.has_key(to_vertex)):
self.add_vertex(to_vertex)
self.__vertices[from_vertex].append(to_vertex) | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation (directed edge) between vertices.\n from_vertex -> to_vertex\n\n :param from_vertex: index of vertex that edge goes from\n :param to_vertex: index of vertex that edge goes to\n :type from_vertex: int\n :type to_vertex: int\n :return: None\n '
if (not self.__vertices.has_key(from_vertex)):
self.add_vertex(from_vertex)
if (not self.__vertices.has_key(to_vertex)):
self.add_vertex(to_vertex)
self.__vertices[from_vertex].append(to_vertex)<|docstring|>sets a one-direction relation (directed edge) between vertices.
from_vertex -> to_vertex
:param from_vertex: index of vertex that edge goes from
:param to_vertex: index of vertex that edge goes to
:type from_vertex: int
:type to_vertex: int
:return: None<|endoftext|> |
1accd8d484f40fb1aada7cbf898eda678cbac3bf89d2b0c7d215647ba8320ace | def is_adjacent(self, from_vertex, to_vertex):
' checks if there is an edge between vertices '
return (to_vertex in self.__vertices[from_vertex]) | checks if there is an edge between vertices | inclusion_analysis/graph.py | is_adjacent | Andrey-Dubas/inclusion_analysis | 0 | python | def is_adjacent(self, from_vertex, to_vertex):
' '
return (to_vertex in self.__vertices[from_vertex]) | def is_adjacent(self, from_vertex, to_vertex):
' '
return (to_vertex in self.__vertices[from_vertex])<|docstring|>checks if there is an edge between vertices<|endoftext|> |
a7e6567d1a9f239fe5a17a72b248c38c4db6ccdf21de896e8e25e8807d0ff2db | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :rtype: list<int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex] | get all vertices that are connected directly to the particular one
:param from_vertex: particular vertex
:rtype: list<int> | inclusion_analysis/graph.py | get_connected | Andrey-Dubas/inclusion_analysis | 0 | python | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :rtype: list<int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex] | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :rtype: list<int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex]<|docstring|>get all vertices that are connected directly to the particular one
:param from_vertex: particular vertex
:rtype: list<int><|endoftext|> |
a34c7628865308fe1aa5608701682e5bb2a38473abf9cc68f533305bf9ebf536 | def add_vertex(self, index):
'\n add a informational vertex to the graph with\n :param data: an information contained by vertex\n :return: None\n '
self.__vertices[index] = [] | add a informational vertex to the graph with
:param data: an information contained by vertex
:return: None | inclusion_analysis/graph.py | add_vertex | Andrey-Dubas/inclusion_analysis | 0 | python | def add_vertex(self, index):
'\n add a informational vertex to the graph with\n :param data: an information contained by vertex\n :return: None\n '
self.__vertices[index] = [] | def add_vertex(self, index):
'\n add a informational vertex to the graph with\n :param data: an information contained by vertex\n :return: None\n '
self.__vertices[index] = []<|docstring|>add a informational vertex to the graph with
:param data: an information contained by vertex
:return: None<|endoftext|> |
b02e32717891099f25e9b600e62520b0b286b9fb11d31588ec972b03640a4c7f | def has_vertex(self, index):
"\n checks if graph contains a vertex with particular information\n :param name: an info we're looking for\n :return: Boolean\n "
return self.__vertices.has_key(index) | checks if graph contains a vertex with particular information
:param name: an info we're looking for
:return: Boolean | inclusion_analysis/graph.py | has_vertex | Andrey-Dubas/inclusion_analysis | 0 | python | def has_vertex(self, index):
"\n checks if graph contains a vertex with particular information\n :param name: an info we're looking for\n :return: Boolean\n "
return self.__vertices.has_key(index) | def has_vertex(self, index):
"\n checks if graph contains a vertex with particular information\n :param name: an info we're looking for\n :return: Boolean\n "
return self.__vertices.has_key(index)<|docstring|>checks if graph contains a vertex with particular information
:param name: an info we're looking for
:return: Boolean<|endoftext|> |
7115927f32fe33fd87aa3d8c658a7d65195e134ba804d68c4d0662c2337305d9 | def __init__(self):
'\n __graph is a graph of indexes, each index represents file\n __name_to_index if a dict which key is filename and its value is index\n __index_name if a dict which key is index and its value is filename\n '
self.__graph = Graph()
self.__name_to_index = {}
self.__index_name = {} | __graph is a graph of indexes, each index represents file
__name_to_index if a dict which key is filename and its value is index
__index_name if a dict which key is index and its value is filename | inclusion_analysis/graph.py | __init__ | Andrey-Dubas/inclusion_analysis | 0 | python | def __init__(self):
'\n __graph is a graph of indexes, each index represents file\n __name_to_index if a dict which key is filename and its value is index\n __index_name if a dict which key is index and its value is filename\n '
self.__graph = Graph()
self.__name_to_index = {}
self.__index_name = {} | def __init__(self):
'\n __graph is a graph of indexes, each index represents file\n __name_to_index if a dict which key is filename and its value is index\n __index_name if a dict which key is index and its value is filename\n '
self.__graph = Graph()
self.__name_to_index = {}
self.__index_name = {}<|docstring|>__graph is a graph of indexes, each index represents file
__name_to_index if a dict which key is filename and its value is index
__index_name if a dict which key is index and its value is filename<|endoftext|> |
dc2f91b923e0b5108e88a3bb5ef8d07d596bb664ff85345af3f056e23cf2ab42 | def get_name_by_index(self, index):
' returns filename by its index '
return self.__index_name[index] | returns filename by its index | inclusion_analysis/graph.py | get_name_by_index | Andrey-Dubas/inclusion_analysis | 0 | python | def get_name_by_index(self, index):
' '
return self.__index_name[index] | def get_name_by_index(self, index):
' '
return self.__index_name[index]<|docstring|>returns filename by its index<|endoftext|> |
3e68c3640786392f9b8d7905dcfc0b0f891d5453345f5c733bd3f0daea3960e0 | def get_index_by_name(self, name):
" returns file's index by its name "
return self.__name_to_index[name] | returns file's index by its name | inclusion_analysis/graph.py | get_index_by_name | Andrey-Dubas/inclusion_analysis | 0 | python | def get_index_by_name(self, name):
" "
return self.__name_to_index[name] | def get_index_by_name(self, name):
" "
return self.__name_to_index[name]<|docstring|>returns file's index by its name<|endoftext|> |
e939fa09a72ce46e72b04bfbbbbb4e1b57fadf352c837b926eb54eefd621dcd8 | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation between vertices. from_vertex -> to_vertex\n\n :param from_vertex: filename that contains inclusion\n :param to_vertex: included filename\n :type from_vertex: str\n :type to_vertex: str\n :return: None\n '
if isinstance(from_vertex, str):
if (not self.__name_to_index.has_key(from_vertex)):
self.add_vertex(from_vertex)
from_vertex_index = self.__name_to_index[from_vertex]
else:
raise ValueError('vertices must be names of files')
if isinstance(to_vertex, str):
if (not self.__name_to_index.has_key(to_vertex)):
self.add_vertex(to_vertex)
to_vertex_index = self.__name_to_index[to_vertex]
else:
raise ValueError('vertices must be names of files')
self.__graph.connect(from_vertex_index, to_vertex_index) | sets a one-direction relation between vertices. from_vertex -> to_vertex
:param from_vertex: filename that contains inclusion
:param to_vertex: included filename
:type from_vertex: str
:type to_vertex: str
:return: None | inclusion_analysis/graph.py | connect | Andrey-Dubas/inclusion_analysis | 0 | python | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation between vertices. from_vertex -> to_vertex\n\n :param from_vertex: filename that contains inclusion\n :param to_vertex: included filename\n :type from_vertex: str\n :type to_vertex: str\n :return: None\n '
if isinstance(from_vertex, str):
if (not self.__name_to_index.has_key(from_vertex)):
self.add_vertex(from_vertex)
from_vertex_index = self.__name_to_index[from_vertex]
else:
raise ValueError('vertices must be names of files')
if isinstance(to_vertex, str):
if (not self.__name_to_index.has_key(to_vertex)):
self.add_vertex(to_vertex)
to_vertex_index = self.__name_to_index[to_vertex]
else:
raise ValueError('vertices must be names of files')
self.__graph.connect(from_vertex_index, to_vertex_index) | def connect(self, from_vertex, to_vertex):
'\n sets a one-direction relation between vertices. from_vertex -> to_vertex\n\n :param from_vertex: filename that contains inclusion\n :param to_vertex: included filename\n :type from_vertex: str\n :type to_vertex: str\n :return: None\n '
if isinstance(from_vertex, str):
if (not self.__name_to_index.has_key(from_vertex)):
self.add_vertex(from_vertex)
from_vertex_index = self.__name_to_index[from_vertex]
else:
raise ValueError('vertices must be names of files')
if isinstance(to_vertex, str):
if (not self.__name_to_index.has_key(to_vertex)):
self.add_vertex(to_vertex)
to_vertex_index = self.__name_to_index[to_vertex]
else:
raise ValueError('vertices must be names of files')
self.__graph.connect(from_vertex_index, to_vertex_index)<|docstring|>sets a one-direction relation between vertices. from_vertex -> to_vertex
:param from_vertex: filename that contains inclusion
:param to_vertex: included filename
:type from_vertex: str
:type to_vertex: str
:return: None<|endoftext|> |
2a71183de3d9407f54bac87a1d3766aa2c1081c302d325228b121656bcd52a34 | def is_adjacent(self, from_vertex, to_vertex):
' returns whether to_vertex is adjacent to from_vertex '
from_vertex = self.get_index_by_name(from_vertex)
to_vertex = self.get_index_by_name(to_vertex)
return self.__graph.is_adjacent(from_vertex, to_vertex) | returns whether to_vertex is adjacent to from_vertex | inclusion_analysis/graph.py | is_adjacent | Andrey-Dubas/inclusion_analysis | 0 | python | def is_adjacent(self, from_vertex, to_vertex):
' '
from_vertex = self.get_index_by_name(from_vertex)
to_vertex = self.get_index_by_name(to_vertex)
return self.__graph.is_adjacent(from_vertex, to_vertex) | def is_adjacent(self, from_vertex, to_vertex):
' '
from_vertex = self.get_index_by_name(from_vertex)
to_vertex = self.get_index_by_name(to_vertex)
return self.__graph.is_adjacent(from_vertex, to_vertex)<|docstring|>returns whether to_vertex is adjacent to from_vertex<|endoftext|> |
143d015101d453951827a4ba41f49799cfe7cc02639b149aab6de2db1bb060c8 | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :type from_vertex: str\n :returns: all adjacent vertices\n :rtype: list <int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex] | get all vertices that are connected directly to the particular one
:param from_vertex: particular vertex
:type from_vertex: str
:returns: all adjacent vertices
:rtype: list <int> | inclusion_analysis/graph.py | get_connected | Andrey-Dubas/inclusion_analysis | 0 | python | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :type from_vertex: str\n :returns: all adjacent vertices\n :rtype: list <int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex] | def get_connected(self, from_vertex):
'\n get all vertices that are connected directly to the particular one\n\n :param from_vertex: particular vertex\n :type from_vertex: str\n :returns: all adjacent vertices\n :rtype: list <int>\n '
if isinstance(from_vertex, int):
return self.__vertices[from_vertex]<|docstring|>get all vertices that are connected directly to the particular one
:param from_vertex: particular vertex
:type from_vertex: str
:returns: all adjacent vertices
:rtype: list <int><|endoftext|> |
1a23b53f207f675723d206bf1d3be317a2c113e2d7270a831942695281300cb0 | def add_vertex(self, data):
'\n add a informational vertex to the graph with\n\n :param data: an information contained by vertex\n :type data: str\n :rtype: None\n '
self.__name_to_index[data] = len(self)
self.__index_name[len(self)] = data
self.__graph.add_vertex(len(self)) | add a informational vertex to the graph with
:param data: an information contained by vertex
:type data: str
:rtype: None | inclusion_analysis/graph.py | add_vertex | Andrey-Dubas/inclusion_analysis | 0 | python | def add_vertex(self, data):
'\n add a informational vertex to the graph with\n\n :param data: an information contained by vertex\n :type data: str\n :rtype: None\n '
self.__name_to_index[data] = len(self)
self.__index_name[len(self)] = data
self.__graph.add_vertex(len(self)) | def add_vertex(self, data):
'\n add a informational vertex to the graph with\n\n :param data: an information contained by vertex\n :type data: str\n :rtype: None\n '
self.__name_to_index[data] = len(self)
self.__index_name[len(self)] = data
self.__graph.add_vertex(len(self))<|docstring|>add a informational vertex to the graph with
:param data: an information contained by vertex
:type data: str
:rtype: None<|endoftext|> |
2368e075e902fa9750a792f88690daf31221c3c850e3dc2bd76d9ca7be61f66d | def has_vertex(self, name):
'\n checks if graph contains a vertex with particular information\n\n :param name: an info we are looking for\n :rtype name: str\n :return: if the graph contains particular filename\n :rtype: Boolean\n '
return self.__vertices.has_key(name) | checks if graph contains a vertex with particular information
:param name: an info we are looking for
:rtype name: str
:return: if the graph contains particular filename
:rtype: Boolean | inclusion_analysis/graph.py | has_vertex | Andrey-Dubas/inclusion_analysis | 0 | python | def has_vertex(self, name):
'\n checks if graph contains a vertex with particular information\n\n :param name: an info we are looking for\n :rtype name: str\n :return: if the graph contains particular filename\n :rtype: Boolean\n '
return self.__vertices.has_key(name) | def has_vertex(self, name):
'\n checks if graph contains a vertex with particular information\n\n :param name: an info we are looking for\n :rtype name: str\n :return: if the graph contains particular filename\n :rtype: Boolean\n '
return self.__vertices.has_key(name)<|docstring|>checks if graph contains a vertex with particular information
:param name: an info we are looking for
:rtype name: str
:return: if the graph contains particular filename
:rtype: Boolean<|endoftext|> |
7f904aeed0c6fda6915f17809842bdd984a954ffc93191b35620a14d304e1801 | def cycle_detect(self, root_vertex):
'\n detects all cycles of the graph\n\n :param root_vertex: the vertex it start graph traverse\n :rtype root_vertex: str\n :return: a list of pairs that combine a cycle detected and a path to a vertex cycle starts with\n '
root_vertex = self.get_index_by_name(root_vertex)
cycles = cycle_detect(self.__graph, root_vertex)
named_cycles = []
for cycle in cycles:
named_cycles.append(([self.get_name_by_index(index) for index in cycle[0]], [self.get_name_by_index(index) for index in cycle[1]]))
return named_cycles | detects all cycles of the graph
:param root_vertex: the vertex it start graph traverse
:rtype root_vertex: str
:return: a list of pairs that combine a cycle detected and a path to a vertex cycle starts with | inclusion_analysis/graph.py | cycle_detect | Andrey-Dubas/inclusion_analysis | 0 | python | def cycle_detect(self, root_vertex):
'\n detects all cycles of the graph\n\n :param root_vertex: the vertex it start graph traverse\n :rtype root_vertex: str\n :return: a list of pairs that combine a cycle detected and a path to a vertex cycle starts with\n '
root_vertex = self.get_index_by_name(root_vertex)
cycles = cycle_detect(self.__graph, root_vertex)
named_cycles = []
for cycle in cycles:
named_cycles.append(([self.get_name_by_index(index) for index in cycle[0]], [self.get_name_by_index(index) for index in cycle[1]]))
return named_cycles | def cycle_detect(self, root_vertex):
'\n detects all cycles of the graph\n\n :param root_vertex: the vertex it start graph traverse\n :rtype root_vertex: str\n :return: a list of pairs that combine a cycle detected and a path to a vertex cycle starts with\n '
root_vertex = self.get_index_by_name(root_vertex)
cycles = cycle_detect(self.__graph, root_vertex)
named_cycles = []
for cycle in cycles:
named_cycles.append(([self.get_name_by_index(index) for index in cycle[0]], [self.get_name_by_index(index) for index in cycle[1]]))
return named_cycles<|docstring|>detects all cycles of the graph
:param root_vertex: the vertex it start graph traverse
:rtype root_vertex: str
:return: a list of pairs that combine a cycle detected and a path to a vertex cycle starts with<|endoftext|> |
9f410f30ed668655fffa2e60fe421c39b5b56c214a253a0f055c7c197fdd2364 | @staticmethod
def checkOptions(options):
'\n :return: True if dependent options changed, otherwise False.\n '
dependentChanges = MeshType_3d_heartventricles2.checkOptions(options)
options['Number of elements around LV free wall'] = 5
options['Number of elements around ventricular septum'] = 7
options['Number of elements around atria'] = 8
options['Number of elements around atrial septum'] = 2
for key in ['Atria base inner major axis length', 'Atria base inner minor axis length', 'Atrial septum thickness', 'Atrial base wall thickness', 'Atrial base slope degrees', 'Base height', 'Base thickness', 'Fibrous ring thickness', 'LV outlet inner diameter', 'LV outlet wall thickness', 'RV outlet inner diameter', 'RV outlet wall thickness', 'Ventricles outlet element length', 'Ventricles outlet spacing']:
if (options[key] < 0.0):
options[key] = 0.0
if (options['Atria major axis rotation degrees'] < (- 75.0)):
options['Atria major axis rotation degrees'] = (- 75.0)
elif (options['Atria major axis rotation degrees'] > 75.0):
options['Atria major axis rotation degrees'] = 75.0
return dependentChanges | :return: True if dependent options changed, otherwise False. | src/scaffoldmaker/meshtypes/meshtype_3d_heartventriclesbase2.py | checkOptions | keeran97/scaffoldmaker | 1 | python | @staticmethod
def checkOptions(options):
'\n \n '
dependentChanges = MeshType_3d_heartventricles2.checkOptions(options)
options['Number of elements around LV free wall'] = 5
options['Number of elements around ventricular septum'] = 7
options['Number of elements around atria'] = 8
options['Number of elements around atrial septum'] = 2
for key in ['Atria base inner major axis length', 'Atria base inner minor axis length', 'Atrial septum thickness', 'Atrial base wall thickness', 'Atrial base slope degrees', 'Base height', 'Base thickness', 'Fibrous ring thickness', 'LV outlet inner diameter', 'LV outlet wall thickness', 'RV outlet inner diameter', 'RV outlet wall thickness', 'Ventricles outlet element length', 'Ventricles outlet spacing']:
if (options[key] < 0.0):
options[key] = 0.0
if (options['Atria major axis rotation degrees'] < (- 75.0)):
options['Atria major axis rotation degrees'] = (- 75.0)
elif (options['Atria major axis rotation degrees'] > 75.0):
options['Atria major axis rotation degrees'] = 75.0
return dependentChanges | @staticmethod
def checkOptions(options):
'\n \n '
dependentChanges = MeshType_3d_heartventricles2.checkOptions(options)
options['Number of elements around LV free wall'] = 5
options['Number of elements around ventricular septum'] = 7
options['Number of elements around atria'] = 8
options['Number of elements around atrial septum'] = 2
for key in ['Atria base inner major axis length', 'Atria base inner minor axis length', 'Atrial septum thickness', 'Atrial base wall thickness', 'Atrial base slope degrees', 'Base height', 'Base thickness', 'Fibrous ring thickness', 'LV outlet inner diameter', 'LV outlet wall thickness', 'RV outlet inner diameter', 'RV outlet wall thickness', 'Ventricles outlet element length', 'Ventricles outlet spacing']:
if (options[key] < 0.0):
options[key] = 0.0
if (options['Atria major axis rotation degrees'] < (- 75.0)):
options['Atria major axis rotation degrees'] = (- 75.0)
elif (options['Atria major axis rotation degrees'] > 75.0):
options['Atria major axis rotation degrees'] = 75.0
return dependentChanges<|docstring|>:return: True if dependent options changed, otherwise False.<|endoftext|> |
a904880d7b7302c2940b704e7873110addc9a410e92b6d57a2794322284f9dc4 | @classmethod
def generateBaseMesh(cls, region, options):
'\n Generate the base tricubic Hermite mesh.\n :param region: Zinc region to define model in. Must be empty.\n :param options: Dict containing options. See getDefaultOptions().\n :return: list of AnnotationGroup\n '
elementsCountAroundLVFreeWall = options['Number of elements around LV free wall']
elementsCountAroundVSeptum = options['Number of elements around ventricular septum']
elementsCountAroundLV = (elementsCountAroundLVFreeWall + elementsCountAroundVSeptum)
elementsCountUpLVApex = options['Number of elements up LV apex']
elementsCountUpVSeptum = options['Number of elements up ventricular septum']
elementsCountUpLV = (elementsCountUpLVApex + elementsCountUpVSeptum)
elementsCountUpRV = (elementsCountUpVSeptum + 1)
elementsCountAroundRV = (elementsCountAroundVSeptum + 2)
elementsCountAroundAtria = options['Number of elements around atria']
elementsCountAtrialSeptum = options['Number of elements around atrial septum']
lvOuterHeight = options['LV outer height']
lvOuterRadius = options['LV outer radius']
lvFreeWallThickness = options['LV free wall thickness']
lvApexThickness = options['LV apex thickness']
rvHeight = options['RV inner height']
rvArcAroundRadians = math.radians(options['RV arc around degrees'])
rvFreeWallThickness = options['RV free wall thickness']
rvWidth = options['RV width']
rvExtraCrossRadiusBase = options['RV extra cross radius base']
vSeptumThickness = options['Ventricular septum thickness']
vSeptumBaseRadialDisplacement = options['Ventricular septum base radial displacement']
useCrossDerivatives = options['Use cross derivatives']
aBaseInnerMajorMag = (0.5 * options['Atria base inner major axis length'])
aBaseInnerMinorMag = (0.5 * options['Atria base inner minor axis length'])
aMajorAxisRadians = math.radians(options['Atria major axis rotation degrees'])
aSeptumThickness = options['Atrial septum thickness']
aBaseWallThickness = options['Atrial base wall thickness']
aBaseSlopeRadians = math.radians(options['Atrial base slope degrees'])
baseHeight = options['Base height']
baseThickness = options['Base thickness']
fibrousRingThickness = options['Fibrous ring thickness']
lvOutletInnerRadius = (options['LV outlet inner diameter'] * 0.5)
lvOutletWallThickness = options['LV outlet wall thickness']
lvOutletOuterRadius = (lvOutletInnerRadius + lvOutletWallThickness)
rvOutletInnerRadius = (options['RV outlet inner diameter'] * 0.5)
rvOutletWallThickness = options['RV outlet wall thickness']
rvOutletOuterRadius = (rvOutletInnerRadius + rvOutletWallThickness)
vOutletElementLength = options['Ventricles outlet element length']
vOutletInclineRadians = math.radians(options['Ventricles outlet incline degrees'])
vOutletSpacing = options['Ventricles outlet spacing']
annotationGroups = MeshType_3d_heartventricles2.generateBaseMesh(region, options)
lvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('left ventricle myocardium'))
rvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('right ventricle myocardium'))
vSeptumGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('interventricular septum'))
conusArteriosusGroup = AnnotationGroup(region, get_heart_term('conus arteriosus'))
annotationGroups += [conusArteriosusGroup]
lFibrousRingGroup = AnnotationGroup(region, get_heart_term('left fibrous ring'))
rFibrousRingGroup = AnnotationGroup(region, get_heart_term('right fibrous ring'))
fm = region.getFieldmodule()
fm.beginChange()
coordinates = findOrCreateFieldCoordinates(fm)
cache = fm.createFieldcache()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS3, 1)
nodetemplateLinearS3 = nodes.createNodetemplate()
nodetemplateLinearS3.defineField(coordinates)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodeIdentifier = (getMaximumNodeIdentifier(nodes) + 1)
norl = elementsCountAroundLV
nowl = (1 + (elementsCountUpLV * norl))
nidl = ((nowl - norl) + 1)
nsdl = (nidl + elementsCountAroundVSeptum)
nedl = (nidl + elementsCountAroundLV)
norr = (elementsCountAroundRV - 1)
nowr = (elementsCountUpRV * norr)
nidr = ((((nowl * 2) + 1) + nowr) - norr)
elementsCountAroundOutlet = 6
defaultOutletScale3 = 0.5
nidca = (((nidl + nowl) + elementsCountAroundVSeptum) - 1)
nidcb = ((nidr + elementsCountAroundVSeptum) - 1)
cache.setNode(nodes.findNodeByIdentifier(nidca))
(result, pxa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(nidcb))
(result, pxb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
px = [(0.5 * (pxa[c] + pxb[c])) for c in range(3)]
node = nodes.findNodeByIdentifier(nidl)
cache.setNode(node)
(result, ax) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
node = nodes.findNodeByIdentifier(nidr)
cache.setNode(node)
(result, bx) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
bx = [(0.5 * (ax[i] + bx[i])) for i in range(2)]
bx.append(ax[2])
ax = [(bx[c] - px[c]) for c in range(3)]
ax = vector.normalise(ax)
baseRotationRadians = math.atan2(ax[1], ax[0])
outletSpacingRadians = (0.25 * math.pi)
outletSpacingHorizontal = (vOutletSpacing * math.cos(outletSpacingRadians))
outletSpacingVertical = (vOutletSpacing * math.sin(outletSpacingRadians))
cruxOffset = ((rvOutletOuterRadius + outletSpacingHorizontal) + (2.0 * lvOutletOuterRadius))
cx = [(px[c] + (ax[c] * cruxOffset)) for c in range(3)]
aBaseSlopeLength = (aBaseWallThickness * math.cos(aBaseSlopeRadians))
aBaseSlopeHeight = (aBaseWallThickness * math.sin(aBaseSlopeRadians))
cosOutletInclineRadians = math.cos(vOutletInclineRadians)
sinOutletInclineRadians = math.sin(vOutletInclineRadians)
lvOutletCentre = [(cx[0] - (ax[0] * lvOutletOuterRadius)), (cx[1] - (ax[1] * lvOutletOuterRadius)), (((baseHeight + baseThickness) - aBaseSlopeHeight) + (sinOutletInclineRadians * lvOutletOuterRadius))]
radiansPerElementAroundOutlet = ((2.0 * math.pi) / elementsCountAroundOutlet)
x = [0.0, 0.0, 0.0]
dx_ds1 = [0.0, 0.0, 0.0]
dx_ds3 = [0.0, 0.0, 0.0]
lvOutletNodeId = []
for n3 in range(2):
radius = (lvOutletInnerRadius if (n3 == 0) else lvOutletOuterRadius)
loAxis1 = [(radius * ax[c]) for c in range(3)]
loAxis2 = [((- loAxis1[1]) * cosOutletInclineRadians), (loAxis1[0] * cosOutletInclineRadians), ((- radius) * sinOutletInclineRadians)]
loAxis3 = vector.crossproduct3(loAxis1, loAxis2)
scale = (vOutletElementLength / vector.magnitude(loAxis3))
dx_ds2 = [(v * scale) for v in loAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 3) else defaultOutletScale3)
for c in range(3):
x[c] = ((lvOutletCentre[c] + (loAxis1[c] * cosRadiansAround)) + (loAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((loAxis1[c] * (- sinRadiansAround)) + (loAxis2[c] * cosRadiansAround)))
node = nodes.createNode(nodeIdentifier, (nodetemplateLinearS3 if (n3 == 0) else nodetemplate))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if (n3 == 1):
dx_ds3 = [(outletScale3 * ((loAxis1[c] * cosRadiansAround) + (loAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [2, 4]):
if (n1 == 2):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = ((- 2.0) * dx_ds3[2])
scale = ((radiansPerElementAroundOutlet * rvOutletOuterRadius) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
cruxCentreNodeId = nodeIdentifier
cruxCentre = [x[0], x[1], x[2]]
elif (n1 == 1):
cruxRightNodeId = nodeIdentifier
cruxRight = [x[0], x[1], x[2]]
elif (n1 == (elementsCountAroundOutlet - 1)):
cruxLeftNodeId = nodeIdentifier
cruxLeft = [x[0], x[1], x[2]]
elif (n1 == 3):
lvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
lvOutletNodeId.append(outletNodeId)
outletCentreSpacing = ((lvOutletOuterRadius + outletSpacingHorizontal) + rvOutletOuterRadius)
rvOutletCentre = [(lvOutletCentre[c] - (outletCentreSpacing * ax[c])) for c in range(3)]
unitCrossX = vector.normalise([(- ax[1]), ax[0]])
rvOutletCentre[0] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[0])
rvOutletCentre[1] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[1])
rvOutletCentre[2] += (outletSpacingVertical * cosOutletInclineRadians)
rvOutletNodeId = []
for n3 in range(2):
radius = (rvOutletInnerRadius if (n3 == 0) else rvOutletOuterRadius)
roAxis1 = [(radius * ax[c]) for c in range(3)]
roAxis2 = [((- roAxis1[1]) * cosOutletInclineRadians), (roAxis1[0] * cosOutletInclineRadians), (radius * sinOutletInclineRadians)]
roAxis3 = vector.crossproduct3(roAxis1, roAxis2)
scale = (vOutletElementLength / vector.magnitude(roAxis3))
dx_ds2 = [(v * scale) for v in roAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 0) else defaultOutletScale3)
for c in range(3):
x[c] = ((rvOutletCentre[c] + (roAxis1[c] * cosRadiansAround)) + (roAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((roAxis1[c] * (- sinRadiansAround)) + (roAxis2[c] * cosRadiansAround)))
hasDerivative3 = ((n3 == 1) and (n1 in [0, 5]))
node = nodes.createNode(nodeIdentifier, (nodetemplate if hasDerivative3 else nodetemplateLinearS3))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if hasDerivative3:
dx_ds3 = [(outletScale3 * ((roAxis1[c] * cosRadiansAround) + (roAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [1, 5]):
if (n1 == 1):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = (4.0 * dx_ds3[2])
dx_ds3 = [(dx_ds1[c] + dx_ds3[c]) for c in range(3)]
mag3 = (radiansPerElementAroundOutlet * rvOutletOuterRadius)
scale = (mag3 / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
rvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
rvOutletNodeId.append(outletNodeId)
cache.setNode(nodes.findNodeByIdentifier(lvOutletNodeId[1][3]))
dx_ds3 = [(rvOutletOuterSpaceX[c] - lvOutletOuterSpaceX[c]) for c in range(3)]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(rvOutletNodeId[1][0]))
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [(- d) for d in dx_ds3])
aInnerMajorMag = aBaseInnerMajorMag
aInnerMinorMag = aBaseInnerMinorMag
aOuterMajorMag = (aInnerMajorMag + aBaseSlopeLength)
aOuterMinorMag = (aInnerMinorMag + aBaseSlopeLength)
laMajorAxisRadians = ((baseRotationRadians + (0.5 * math.pi)) - aMajorAxisRadians)
laInnerMajor = [(aInnerMajorMag * math.cos(laMajorAxisRadians)), (aInnerMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laInnerMinor = [((- aInnerMinorMag) * math.sin(laMajorAxisRadians)), (aInnerMinorMag * math.cos(laMajorAxisRadians)), 0.0]
laOuterMajor = [(aOuterMajorMag * math.cos(laMajorAxisRadians)), (aOuterMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laOuterMinor = [((- aOuterMinorMag) * math.sin(laMajorAxisRadians)), (aOuterMinorMag * math.cos(laMajorAxisRadians)), 0.0]
raMajorAxisRadians = ((baseRotationRadians - (0.5 * math.pi)) + aMajorAxisRadians)
raInnerMajor = [(aInnerMajorMag * math.cos(raMajorAxisRadians)), (aInnerMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raInnerMinor = [((- aInnerMinorMag) * math.sin(raMajorAxisRadians)), (aInnerMinorMag * math.cos(raMajorAxisRadians)), 0.0]
raOuterMajor = [(aOuterMajorMag * math.cos(raMajorAxisRadians)), (aOuterMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raOuterMinor = [((- aOuterMinorMag) * math.sin(raMajorAxisRadians)), (aOuterMinorMag * math.cos(raMajorAxisRadians)), 0.0]
rotRadians = (baseRotationRadians + (0.5 * math.pi))
cosRotRadians = math.cos(rotRadians)
sinRotRadians = math.sin(rotRadians)
cruxLeftModX = (((cruxLeft[0] - cruxCentre[0]) * cosRotRadians) + ((cruxLeft[1] - cruxCentre[1]) * sinRotRadians))
cruxLeftModY = (((cruxLeft[0] - cruxCentre[0]) * (- sinRotRadians)) + ((cruxLeft[1] - cruxCentre[1]) * cosRotRadians))
axInnerMod = (aInnerMajorMag * math.cos(aMajorAxisRadians))
bxInnerMod = (aInnerMinorMag * math.sin(aMajorAxisRadians))
laSeptumRadians = math.atan2(bxInnerMod, axInnerMod)
raSeptumRadians = (- laSeptumRadians)
laCentreModX = ((((- 0.5) * aSeptumThickness) - (axInnerMod * math.cos(laSeptumRadians))) - (bxInnerMod * math.sin(laSeptumRadians)))
axMod = (aOuterMajorMag * math.cos(aMajorAxisRadians))
ayMod = (aOuterMajorMag * (- math.sin(aMajorAxisRadians)))
bxMod = (aOuterMinorMag * math.sin(aMajorAxisRadians))
byMod = (aOuterMinorMag * math.cos(aMajorAxisRadians))
laCruxLeftRadians = getEllipseRadiansToX(axMod, bxMod, (cruxLeftModX - laCentreModX), (math.pi * 0.5))
laCentreModY = ((cruxLeftModY - (ayMod * math.cos(laCruxLeftRadians))) - (byMod * math.sin(laCruxLeftRadians)))
laCentreX = ((cruxCentre[0] + (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
laCentreY = ((cruxCentre[1] + (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
raCruxLeftRadians = (- laCruxLeftRadians)
raCentreX = ((cruxCentre[0] - (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
raCentreY = ((cruxCentre[1] - (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
aCentreOuterZ = (baseHeight + baseThickness)
aCentreInnerZ = (aCentreOuterZ - aBaseSlopeHeight)
atrialPerimeterLength = getApproximateEllipsePerimeter(aOuterMajorMag, aOuterMinorMag)
atrialSeptumCentreToCruxLeftLength = getEllipseArcLength(aOuterMajorMag, aOuterMinorMag, laSeptumRadians, laCruxLeftRadians)
atrialSeptumElementLength = (atrialSeptumCentreToCruxLeftLength / (1.0 + (elementsCountAtrialSeptum * 0.5)))
atrialFreeWallElementLength = ((atrialPerimeterLength - (atrialSeptumElementLength * (elementsCountAtrialSeptum + 2))) / ((elementsCountAroundAtria - elementsCountAtrialSeptum) - 2))
atrialTransitionElementLength = (0.5 * (atrialSeptumElementLength + atrialFreeWallElementLength))
laRadians = []
laOuterDerivatives = []
radiansAround = laSeptumRadians
if ((elementsCountAtrialSeptum % 2) == 1):
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, (0.5 * atrialSeptumElementLength))
outerDerivative = atrialSeptumElementLength
lan1CruxLimit = ((elementsCountAtrialSeptum // 2) + 1)
lan1SeptumLimit = ((elementsCountAroundAtria - ((elementsCountAtrialSeptum + 1) // 2)) - 1)
for n1 in range(elementsCountAroundAtria):
laRadians.append(radiansAround)
laOuterDerivatives.append(outerDerivative)
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
elementLength = atrialSeptumElementLength
outerDerivative = atrialSeptumElementLength
elif (n1 == lan1CruxLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialFreeWallElementLength
elif (n1 == lan1SeptumLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialSeptumElementLength
else:
elementLength = atrialFreeWallElementLength
outerDerivative = atrialFreeWallElementLength
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, elementLength)
laInnerDerivatives = []
finalArcLength = prevArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, (laRadians[(- 1)] - (2.0 * math.pi)), laRadians[0])
for n1 in range(elementsCountAroundAtria):
if (n1 == (elementsCountAroundAtria - 1)):
nextArcLength = finalArcLength
else:
nextArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, laRadians[n1], laRadians[(n1 + 1)])
if (laOuterDerivatives[n1] is atrialSeptumElementLength):
arcLength = min(prevArcLength, nextArcLength)
else:
arcLength = max(prevArcLength, nextArcLength)
laInnerDerivatives.append(arcLength)
prevArcLength = nextArcLength
raRadians = []
raInnerDerivatives = []
raOuterDerivatives = []
for n1 in range(elementsCountAroundAtria):
raRadians.append(((2.0 * math.pi) - laRadians[(- n1)]))
raInnerDerivatives.append(laInnerDerivatives[(- n1)])
raOuterDerivatives.append(laOuterDerivatives[(- n1)])
raRadians[0] = raSeptumRadians
laNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = laRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((laCentreX + (cosRadiansAround * laInnerMajor[0])) + (sinRadiansAround * laInnerMinor[0])), ((laCentreY + (cosRadiansAround * laInnerMajor[1])) + (sinRadiansAround * laInnerMinor[1])), aCentreInnerZ]
outer = [((laCentreX + (cosRadiansAround * laOuterMajor[0])) + (sinRadiansAround * laOuterMinor[0])), ((laCentreY + (cosRadiansAround * laOuterMajor[1])) + (sinRadiansAround * laOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 <= lan1CruxLimit) or (n1 > (lan1SeptumLimit + 2)))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
laNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * laInnerMajor[0]) + (cosRadiansAround * laInnerMinor[0])), (((- sinRadiansAround) * laInnerMajor[1]) + (cosRadiansAround * laInnerMinor[1])), 0.0]
scale1 = laInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * laOuterMajor[0]) + (cosRadiansAround * laOuterMinor[0])), (((- sinRadiansAround) * laOuterMajor[1]) + (cosRadiansAround * laOuterMinor[1])), 0.0]
scale1 = laOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (lan1CruxLimit + 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laInnerMajor[0], laInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laInnerMinor[0], laInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laOuterMajor[0], laOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laOuterMinor[0], laOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
ran1SeptumLimit = (elementsCountAtrialSeptum // 2)
ran1CruxLimit = ((elementsCountAroundAtria - ran1SeptumLimit) - 1)
raNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
raNodeId[1][0] = laNodeId[0][0]
raNodeId[1][(- 2)] = lvOutletNodeId[1][1]
raNodeId[1][(- 1)] = lvOutletNodeId[1][0]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = raRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((raCentreX + (cosRadiansAround * raInnerMajor[0])) + (sinRadiansAround * raInnerMinor[0])), ((raCentreY + (cosRadiansAround * raInnerMajor[1])) + (sinRadiansAround * raInnerMinor[1])), aCentreInnerZ]
outer = [((raCentreX + (cosRadiansAround * raOuterMajor[0])) + (sinRadiansAround * raOuterMinor[0])), ((raCentreY + (cosRadiansAround * raOuterMajor[1])) + (sinRadiansAround * raOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 < ran1SeptumLimit) or (n1 >= ran1CruxLimit))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
raNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * raInnerMajor[0]) + (cosRadiansAround * raInnerMinor[0])), (((- sinRadiansAround) * raInnerMajor[1]) + (cosRadiansAround * raInnerMinor[1])), 0.0]
scale1 = raInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * raOuterMajor[0]) + (cosRadiansAround * raOuterMinor[0])), (((- sinRadiansAround) * raOuterMajor[1]) + (cosRadiansAround * raOuterMinor[1])), 0.0]
scale1 = raOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 <= ran1SeptumLimit) or (n1 >= ran1CruxLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (ran1CruxLimit - 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raInnerMajor[0], raInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raInnerMinor[0], raInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raOuterMajor[0], raOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raOuterMinor[0], raOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
laNodeId[1][0] = raNodeId[0][0]
laNodeId[1][1] = lvOutletNodeId[1][0]
laNodeId[1][2] = lvOutletNodeId[1][(- 1)]
for i in range(2):
aNodeId = (laNodeId if (i == 0) else raNodeId)
for n1 in range(elementsCountAroundAtria):
nid2 = aNodeId[1][n1]
node2 = nodes.findNodeByIdentifier(nid2)
cache.setNode(node2)
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
nid1 = aNodeId[0][n1]
node1 = nodes.findNodeByIdentifier(nid1)
cache.setNode(node1)
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
dx_ds3 = [(x2[0] - x1[0]), (x2[1] - x1[1]), (x2[2] - x1[2])]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if ((i == 1) and ((n1 == 0) or (nid2 == cruxCentreNodeId))):
continue
if (nid2 in [cruxLeftNodeId, cruxRightNodeId]):
dx_ds3 = [(- d) for d in dx_ds3]
cache.setNode(node2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][1]))
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(raNodeId[0][(- 1)]))
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(cruxCentreNodeId))
(result, xc) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
d1 = [(x1[c] - xc[c]) for c in range(3)]
d2 = [(x2[c] - xc[c]) for c in range(3)]
dx_ds3 = [(d1[0] + d2[0]), (d1[1] + d2[1]), (d1[2] + d2[2])]
scale = (vector.magnitude(d1) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][2]))
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
dx_ds2 = vector.crossproduct3(dx_ds3, dx_ds1)
scale2 = ((0.5 * vector.magnitude(dx_ds3)) / vector.magnitude(dx_ds2))
dx_ds2 = [(scale2 * d) for d in dx_ds2]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
nida = (nsdl + nowl)
nidb = lvOutletNodeId[1][(- 2)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.5, baseThickness, node1, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0, node2, Node.VALUE_LABEL_D_DS3, (- 1.0), Node.VALUE_LABEL_D_DS1, 1.0)
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
lv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
nida = ((nidr + nowr) + 4)
nidb = lvOutletNodeId[1][2]
node = nodes.findNodeByIdentifier(nida)
cache.setNode(node)
(result, xa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
node = nodes.findNodeByIdentifier(nidb)
cache.setNode(node)
(result, xb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
d2b = [((- 2.0) * d) for d in d2b]
scale = ((4.0 * (baseHeight + baseThickness)) / vector.magnitude(d2a))
d2a = [(scale * d) for d in d2a]
xi = 0.5
xr = (1.0 - xi)
x = interp.interpolateCubicHermite(xa, d2a, xb, d2b, xi)
dx_ds1 = [((xr * d1a[c]) + (xi * d1b[c])) for c in range(3)]
dx_ds2 = interp.interpolateCubicHermiteDerivative(xa, d2a, xb, d2b, xi)
dx_ds2 = [(xr * d) for d in dx_ds2]
radialVector = vector.normalise(vector.crossproduct3(dx_ds1, dx_ds2))
dx_ds3 = [(baseThickness * d) for d in radialVector]
x_inner = [(x[c] - dx_ds3[c]) for c in range(3)]
curvatureScale = (1.0 - (baseThickness * interp.getCubicHermiteCurvature(xa, d2a, x, dx_ds2, radialVector, 1.0)))
dx_ds2_inner = [(curvatureScale * d) for d in dx_ds2]
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid2 = nodeIdentifier
nodeIdentifier += 1
nida = laNodeId[0][2]
nidb = lvOutletNodeId[0][(- 1)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.4, lvOutletWallThickness, node1, Node.VALUE_LABEL_D_DS2, (- 1.0), Node.VALUE_LABEL_D_DS1, (- 1.0), node2, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0)
dx_ds1 = [(2.0 * d) for d in dx_ds1]
node = nodes.createNode(nodeIdentifier, nodetemplateLinearS3)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
lv_bridge_nid1 = nodeIdentifier
nodeIdentifier += 1
tlaNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
traNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
else:
baNodeId = raNodeId
taNodeId = traNodeId
for n1 in range(elementsCountAroundAtria):
if ((n3 == 1) and (((i == 0) and ((n1 < (lan1CruxLimit - 1)) or (n1 > (lan1SeptumLimit + 2)))) or ((i == 1) and ((n1 < ran1SeptumLimit) or (n1 > ran1CruxLimit))))):
continue
node = nodes.findNodeByIdentifier(baNodeId[n3][n1])
cache.setNode(node)
(result, x) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
x[2] += fibrousRingThickness
if ((n3 == 1) and (((i == 0) and ((n1 == 1) or (n1 == 2))) or ((i == 1) and (n1 == (elementsCountAroundAtria - 2))))):
dx_ds1 = [(- d) for d in dx_ds1]
dx_ds3 = [(- d) for d in dx_ds3]
taNodeId[n3][n1] = nodeIdentifier
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
tlaNodeId[1][0] = traNodeId[0][0]
traNodeId[1][(- 1)] = tlaNodeId[1][1]
traNodeId[1][0] = tlaNodeId[0][0]
mesh = fm.findMeshByDimension(3)
lvMeshGroup = lvGroup.getMeshGroup(mesh)
rvMeshGroup = rvGroup.getMeshGroup(mesh)
vSeptumMeshGroup = vSeptumGroup.getMeshGroup(mesh)
conusArteriosusMeshGroup = conusArteriosusGroup.getMeshGroup(mesh)
lFibrousRingMeshGroup = lFibrousRingGroup.getMeshGroup(mesh)
rFibrousRingMeshGroup = rFibrousRingGroup.getMeshGroup(mesh)
tricubichermite = eftfactory_tricubichermite(mesh, useCrossDerivatives)
eft = tricubichermite.createEftNoCrossDerivatives()
elementIdentifier = (getMaximumElementIdentifier(mesh) + 1)
elementtemplate1 = mesh.createElementtemplate()
elementtemplate1.setElementShapeType(Element.SHAPE_TYPE_CUBE)
for e in range(19):
eft1 = eft
nids = None
meshGroups = [lvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidl + 1), laNodeId[0][(- 1)], laNodeId[0][0], (nidr + 0), ((nidl + nowl) + 1), raNodeId[0][1], laNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
scaleEftNodeValueLabels(eft1, [8], [Node.VALUE_LABEL_D_DS3], [1])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 1):
nids = [(nidl + 1), (nidl + 2), laNodeId[0][0], laNodeId[0][1], ((nidl + nowl) + 1), ((nidl + nowl) + 2), laNodeId[1][0], raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 2):
nids = [(nidl + 2), lvOutletNodeId[0][0], laNodeId[0][1], lvOutletNodeId[1][0], ((nidl + nowl) + 2), raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
tricubichermite.setEftLinearDerivative(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, 2, 4, 1)
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 5, 4, 6, 4]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 3):
nids = [lvOutletNodeId[1][0], lvOutletNodeId[1][1], ((nidl + nowl) + 2), ((nidl + nowl) + 3), raNodeId[0][(- 1)], raNodeId[0][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
ln_map = [1, 2, 1, 2, 3, 4, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e <= 6):
n = (e - 4)
nids = [((nidl + n) + 2), ((nidl + n) + 3), lvOutletNodeId[0][n], lvOutletNodeId[0][(n + 1)], (((nidl + nowl) + n) + 2), (((nidl + nowl) + n) + 3), lvOutletNodeId[1][n], lvOutletNodeId[1][(n + 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
if (e == 4):
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 7):
nids = [(nidl + 5), (nidl + 6), lvOutletNodeId[0][3], lvOutletNodeId[0][4], ((nidl + nowl) + 5), ((nidl + nowl) + 6), lvOutletNodeId[1][3], lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 8):
nids = [(nidl + 6), (nidl + 7), lvOutletNodeId[0][4], lv_crest_nid1, ((nidl + nowl) + 6), ((nidr + norr) - 1), lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 4]
remapEftLocalNodes(eft1, 7, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 9):
nids = [((nidr + norr) - 1), nsdl, (((nidr + nowr) + norr) - 1), (nsdl + nowl), lv_crest_nid1]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 1, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [rvMeshGroup]
elif (e == 10):
nids = [nsdl, (nsdl + 1), laNodeId[0][3], (nsdl + nowl), ((nsdl + nowl) + 1), lv_crest_nid1, laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 6, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 11):
nids = [(nedl - 4), (nedl - 3), laNodeId[0][(- 5)], laNodeId[0][(- 4)], ((nedl + nowl) - 4), ((nedl + nowl) - 3), laNodeId[1][(- 5)], laNodeId[1][(- 4)]]
elif (e == 12):
nids = [(nedl - 3), (nedl - 2), laNodeId[0][(- 4)], laNodeId[0][(- 3)], ((nedl + nowl) - 3), ((nedl + nowl) - 2), laNodeId[1][(- 4)], laNodeId[1][(- 3)]]
elif (e == 13):
nids = [(nedl - 2), (nedl - 1), laNodeId[0][(- 3)], laNodeId[0][(- 2)], ((nedl + nowl) - 2), ((nedl + nowl) - 1), laNodeId[1][(- 3)], laNodeId[1][(- 2)]]
elif (e == 14):
nids = [(nedl - 1), (nidl + 0), laNodeId[0][(- 2)], laNodeId[0][(- 1)], ((nedl + nowl) - 1), ((nidl + nowl) + 0), laNodeId[1][(- 2)], laNodeId[1][(- 1)]]
elif (e == 15):
nids = [(nidl + 6), (nidl + 7), lv_bridge_nid1, laNodeId[0][3], lvOutletNodeId[0][4], lv_crest_nid1, lvOutletNodeId[0][5], laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
elif (e == 16):
nids = [lv_crest_nid1, laNodeId[1][3], lvOutletNodeId[0][4], lvOutletNodeId[0][5], lvOutletNodeId[1][4], lvOutletNodeId[1][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 1, 2, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 17):
nids = [laNodeId[0][3], laNodeId[0][2], lv_bridge_nid1, laNodeId[1][3], lvOutletNodeId[1][5], lvOutletNodeId[0][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, 6, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 4, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 18):
nids = [lv_bridge_nid1, (nidl + elementsCountAtrialSeptum), lvOutletNodeId[0][5], lvOutletNodeId[0][0], laNodeId[0][2], laNodeId[0][1], lvOutletNodeId[1][5], lvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
for e in range(15):
eft1 = eft
nids = None
meshGroups = [rvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidr + 0), laNodeId[0][(- 1)], raNodeId[0][1], ((nidl + nowl) + 0), ((nidr + nowr) + 0), laNodeId[1][(- 1)], raNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [lvMeshGroup]
elif (e == 1):
nids = [(nidr + 0), (nidr + 1), raNodeId[0][1], raNodeId[0][2], ((nidr + nowr) + 0), ((nidr + nowr) + 1), raNodeId[1][1], raNodeId[1][2]]
elif (e == 2):
nids = [(nidr + 1), (nidr + 2), raNodeId[0][2], raNodeId[0][3], ((nidr + nowr) + 1), ((nidr + nowr) + 2), raNodeId[1][2], raNodeId[1][3]]
elif (e == 3):
nids = [(nidr + 2), (nidr + 3), raNodeId[0][3], raNodeId[0][4], ((nidr + nowr) + 2), ((nidr + nowr) + 3), raNodeId[1][3], raNodeId[1][4]]
elif (e == 4):
nids = [(nidr + 3), (nidr + 4), raNodeId[0][4], rv_crest_nid1, ((nidr + nowr) + 3), ((nidr + nowr) + 4), raNodeId[1][4], rv_crest_nid2]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [3, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
elif (e == 5):
nids = [(nidr + 4), (nidr + 5), rv_crest_nid1, rvOutletNodeId[0][2], ((nidr + nowr) + 4), ((nidr + nowr) + 5), rv_crest_nid2, rvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 6):
nids = [(nidr + 5), (nidr + 6), rvOutletNodeId[0][2], rvOutletNodeId[0][3], ((nidr + nowr) + 5), ((nidr + nowr) + 6), rvOutletNodeId[1][2], rvOutletNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 7):
nids = [(nidr + 6), (nidr + 7), rvOutletNodeId[0][3], rvOutletNodeId[0][4], ((nidr + nowr) + 6), ((nidr + nowr) + 7), rvOutletNodeId[1][3], rvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 8):
nids = [raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[0][(- 2)], ((nidl + nowl) + 3), raNodeId[1][(- 3)], lvOutletNodeId[1][2], lvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 9):
nids = [raNodeId[0][(- 4)], rv_crest_nid1, raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[1][(- 4)], rv_crest_nid2, raNodeId[1][(- 3)], lvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
elif (e == 10):
nids = [rv_crest_nid1, rvOutletNodeId[0][2], ((nidl + nowl) + 4), rvOutletNodeId[0][1], rv_crest_nid2, rvOutletNodeId[1][2], lvOutletNodeId[1][2], rvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [2, 3, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 11):
nids = [((nidl + nowl) + 4), rvOutletNodeId[0][1], ((nidl + nowl) + 5), rvOutletNodeId[0][0], lvOutletNodeId[1][2], rvOutletNodeId[1][1], lvOutletNodeId[1][3], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 12):
nids = [((nidl + nowl) + 6), ((nidl + nowl) + 5), rvOutletNodeId[0][(- 1)], rvOutletNodeId[0][0], lvOutletNodeId[1][4], lvOutletNodeId[1][3], rvOutletNodeId[1][(- 1)], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 13):
nids = [(nidr + elementsCountAroundVSeptum), ((nidl + nowl) + 6), rvOutletNodeId[0][(- 2)], rvOutletNodeId[0][(- 1)], lv_crest_nid1, lvOutletNodeId[1][4], rvOutletNodeId[1][(- 2)], rvOutletNodeId[1][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 14):
nids = [(nidr + elementsCountAroundVSeptum), rvOutletNodeId[0][(- 2)], ((nidr + nowr) + elementsCountAroundVSeptum), lv_crest_nid1, rvOutletNodeId[1][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 3, 4, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 1, 2, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [conusArteriosusMeshGroup]
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
bicubichermitelinear = eftfactory_bicubichermitelinear(mesh, useCrossDerivatives, linearAxis=2, d_ds1=Node.VALUE_LABEL_D_DS1, d_ds2=Node.VALUE_LABEL_D_DS3)
eftFibrousRing = bicubichermitelinear.createEftBasic()
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
meshGroupsSide = [lFibrousRingMeshGroup]
else:
baNodeId = raNodeId
taNodeId = traNodeId
meshGroupsSide = [rFibrousRingMeshGroup]
for e1 in range((elementsCountAroundAtria + 2)):
if ((i == 1) and (e1 < 4)):
continue
if (e1 < 4):
meshGroups = [lFibrousRingMeshGroup, rFibrousRingMeshGroup]
else:
meshGroups = meshGroupsSide
eft1 = eftFibrousRing
if ((e1 == 0) or (e1 == 3)):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
if (e1 == 0):
nids = [laNodeId[0][(- 1)], raNodeId[0][1], tlaNodeId[0][(- 1)], traNodeId[0][1], laNodeId[1][(- 1)], raNodeId[1][1], tlaNodeId[1][(- 1)], traNodeId[1][1]]
else:
nids = [raNodeId[0][(- 1)], laNodeId[0][1], traNodeId[0][(- 1)], tlaNodeId[0][1], laNodeId[1][1], tlaNodeId[1][1]]
remapEftNodeValueLabel(eft1, [5, 6, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 5, 6, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e1 == 1):
nids = [laNodeId[0][(- 1)], laNodeId[0][0], tlaNodeId[0][(- 1)], tlaNodeId[0][0], raNodeId[0][1], raNodeId[0][0], traNodeId[0][1], traNodeId[0][0]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [6, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
elif (e1 == 2):
nids = [laNodeId[0][0], laNodeId[0][1], tlaNodeId[0][0], tlaNodeId[0][1], raNodeId[0][0], raNodeId[0][(- 1)], traNodeId[0][0], traNodeId[0][(- 1)]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5, 7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
ea = (e1 - 3)
eb = ((ea + 1) - elementsCountAroundAtria)
nids = [baNodeId[0][ea], baNodeId[0][eb], taNodeId[0][ea], taNodeId[0][eb], baNodeId[1][ea], baNodeId[1][eb], taNodeId[1][ea], taNodeId[1][eb]]
if (((i == 0) and ((e1 == 4) or (e1 == 5))) or ((i == 1) and (e1 >= elementsCountAroundAtria))):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
if (e1 == 4):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == 5):
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == elementsCountAroundAtria):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
fm.endChange()
return annotationGroups | Generate the base tricubic Hermite mesh.
:param region: Zinc region to define model in. Must be empty.
:param options: Dict containing options. See getDefaultOptions().
:return: list of AnnotationGroup | src/scaffoldmaker/meshtypes/meshtype_3d_heartventriclesbase2.py | generateBaseMesh | keeran97/scaffoldmaker | 1 | python | @classmethod
def generateBaseMesh(cls, region, options):
'\n Generate the base tricubic Hermite mesh.\n :param region: Zinc region to define model in. Must be empty.\n :param options: Dict containing options. See getDefaultOptions().\n :return: list of AnnotationGroup\n '
elementsCountAroundLVFreeWall = options['Number of elements around LV free wall']
elementsCountAroundVSeptum = options['Number of elements around ventricular septum']
elementsCountAroundLV = (elementsCountAroundLVFreeWall + elementsCountAroundVSeptum)
elementsCountUpLVApex = options['Number of elements up LV apex']
elementsCountUpVSeptum = options['Number of elements up ventricular septum']
elementsCountUpLV = (elementsCountUpLVApex + elementsCountUpVSeptum)
elementsCountUpRV = (elementsCountUpVSeptum + 1)
elementsCountAroundRV = (elementsCountAroundVSeptum + 2)
elementsCountAroundAtria = options['Number of elements around atria']
elementsCountAtrialSeptum = options['Number of elements around atrial septum']
lvOuterHeight = options['LV outer height']
lvOuterRadius = options['LV outer radius']
lvFreeWallThickness = options['LV free wall thickness']
lvApexThickness = options['LV apex thickness']
rvHeight = options['RV inner height']
rvArcAroundRadians = math.radians(options['RV arc around degrees'])
rvFreeWallThickness = options['RV free wall thickness']
rvWidth = options['RV width']
rvExtraCrossRadiusBase = options['RV extra cross radius base']
vSeptumThickness = options['Ventricular septum thickness']
vSeptumBaseRadialDisplacement = options['Ventricular septum base radial displacement']
useCrossDerivatives = options['Use cross derivatives']
aBaseInnerMajorMag = (0.5 * options['Atria base inner major axis length'])
aBaseInnerMinorMag = (0.5 * options['Atria base inner minor axis length'])
aMajorAxisRadians = math.radians(options['Atria major axis rotation degrees'])
aSeptumThickness = options['Atrial septum thickness']
aBaseWallThickness = options['Atrial base wall thickness']
aBaseSlopeRadians = math.radians(options['Atrial base slope degrees'])
baseHeight = options['Base height']
baseThickness = options['Base thickness']
fibrousRingThickness = options['Fibrous ring thickness']
lvOutletInnerRadius = (options['LV outlet inner diameter'] * 0.5)
lvOutletWallThickness = options['LV outlet wall thickness']
lvOutletOuterRadius = (lvOutletInnerRadius + lvOutletWallThickness)
rvOutletInnerRadius = (options['RV outlet inner diameter'] * 0.5)
rvOutletWallThickness = options['RV outlet wall thickness']
rvOutletOuterRadius = (rvOutletInnerRadius + rvOutletWallThickness)
vOutletElementLength = options['Ventricles outlet element length']
vOutletInclineRadians = math.radians(options['Ventricles outlet incline degrees'])
vOutletSpacing = options['Ventricles outlet spacing']
annotationGroups = MeshType_3d_heartventricles2.generateBaseMesh(region, options)
lvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('left ventricle myocardium'))
rvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('right ventricle myocardium'))
vSeptumGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('interventricular septum'))
conusArteriosusGroup = AnnotationGroup(region, get_heart_term('conus arteriosus'))
annotationGroups += [conusArteriosusGroup]
lFibrousRingGroup = AnnotationGroup(region, get_heart_term('left fibrous ring'))
rFibrousRingGroup = AnnotationGroup(region, get_heart_term('right fibrous ring'))
fm = region.getFieldmodule()
fm.beginChange()
coordinates = findOrCreateFieldCoordinates(fm)
cache = fm.createFieldcache()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS3, 1)
nodetemplateLinearS3 = nodes.createNodetemplate()
nodetemplateLinearS3.defineField(coordinates)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodeIdentifier = (getMaximumNodeIdentifier(nodes) + 1)
norl = elementsCountAroundLV
nowl = (1 + (elementsCountUpLV * norl))
nidl = ((nowl - norl) + 1)
nsdl = (nidl + elementsCountAroundVSeptum)
nedl = (nidl + elementsCountAroundLV)
norr = (elementsCountAroundRV - 1)
nowr = (elementsCountUpRV * norr)
nidr = ((((nowl * 2) + 1) + nowr) - norr)
elementsCountAroundOutlet = 6
defaultOutletScale3 = 0.5
nidca = (((nidl + nowl) + elementsCountAroundVSeptum) - 1)
nidcb = ((nidr + elementsCountAroundVSeptum) - 1)
cache.setNode(nodes.findNodeByIdentifier(nidca))
(result, pxa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(nidcb))
(result, pxb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
px = [(0.5 * (pxa[c] + pxb[c])) for c in range(3)]
node = nodes.findNodeByIdentifier(nidl)
cache.setNode(node)
(result, ax) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
node = nodes.findNodeByIdentifier(nidr)
cache.setNode(node)
(result, bx) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
bx = [(0.5 * (ax[i] + bx[i])) for i in range(2)]
bx.append(ax[2])
ax = [(bx[c] - px[c]) for c in range(3)]
ax = vector.normalise(ax)
baseRotationRadians = math.atan2(ax[1], ax[0])
outletSpacingRadians = (0.25 * math.pi)
outletSpacingHorizontal = (vOutletSpacing * math.cos(outletSpacingRadians))
outletSpacingVertical = (vOutletSpacing * math.sin(outletSpacingRadians))
cruxOffset = ((rvOutletOuterRadius + outletSpacingHorizontal) + (2.0 * lvOutletOuterRadius))
cx = [(px[c] + (ax[c] * cruxOffset)) for c in range(3)]
aBaseSlopeLength = (aBaseWallThickness * math.cos(aBaseSlopeRadians))
aBaseSlopeHeight = (aBaseWallThickness * math.sin(aBaseSlopeRadians))
cosOutletInclineRadians = math.cos(vOutletInclineRadians)
sinOutletInclineRadians = math.sin(vOutletInclineRadians)
lvOutletCentre = [(cx[0] - (ax[0] * lvOutletOuterRadius)), (cx[1] - (ax[1] * lvOutletOuterRadius)), (((baseHeight + baseThickness) - aBaseSlopeHeight) + (sinOutletInclineRadians * lvOutletOuterRadius))]
radiansPerElementAroundOutlet = ((2.0 * math.pi) / elementsCountAroundOutlet)
x = [0.0, 0.0, 0.0]
dx_ds1 = [0.0, 0.0, 0.0]
dx_ds3 = [0.0, 0.0, 0.0]
lvOutletNodeId = []
for n3 in range(2):
radius = (lvOutletInnerRadius if (n3 == 0) else lvOutletOuterRadius)
loAxis1 = [(radius * ax[c]) for c in range(3)]
loAxis2 = [((- loAxis1[1]) * cosOutletInclineRadians), (loAxis1[0] * cosOutletInclineRadians), ((- radius) * sinOutletInclineRadians)]
loAxis3 = vector.crossproduct3(loAxis1, loAxis2)
scale = (vOutletElementLength / vector.magnitude(loAxis3))
dx_ds2 = [(v * scale) for v in loAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 3) else defaultOutletScale3)
for c in range(3):
x[c] = ((lvOutletCentre[c] + (loAxis1[c] * cosRadiansAround)) + (loAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((loAxis1[c] * (- sinRadiansAround)) + (loAxis2[c] * cosRadiansAround)))
node = nodes.createNode(nodeIdentifier, (nodetemplateLinearS3 if (n3 == 0) else nodetemplate))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if (n3 == 1):
dx_ds3 = [(outletScale3 * ((loAxis1[c] * cosRadiansAround) + (loAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [2, 4]):
if (n1 == 2):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = ((- 2.0) * dx_ds3[2])
scale = ((radiansPerElementAroundOutlet * rvOutletOuterRadius) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
cruxCentreNodeId = nodeIdentifier
cruxCentre = [x[0], x[1], x[2]]
elif (n1 == 1):
cruxRightNodeId = nodeIdentifier
cruxRight = [x[0], x[1], x[2]]
elif (n1 == (elementsCountAroundOutlet - 1)):
cruxLeftNodeId = nodeIdentifier
cruxLeft = [x[0], x[1], x[2]]
elif (n1 == 3):
lvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
lvOutletNodeId.append(outletNodeId)
outletCentreSpacing = ((lvOutletOuterRadius + outletSpacingHorizontal) + rvOutletOuterRadius)
rvOutletCentre = [(lvOutletCentre[c] - (outletCentreSpacing * ax[c])) for c in range(3)]
unitCrossX = vector.normalise([(- ax[1]), ax[0]])
rvOutletCentre[0] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[0])
rvOutletCentre[1] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[1])
rvOutletCentre[2] += (outletSpacingVertical * cosOutletInclineRadians)
rvOutletNodeId = []
for n3 in range(2):
radius = (rvOutletInnerRadius if (n3 == 0) else rvOutletOuterRadius)
roAxis1 = [(radius * ax[c]) for c in range(3)]
roAxis2 = [((- roAxis1[1]) * cosOutletInclineRadians), (roAxis1[0] * cosOutletInclineRadians), (radius * sinOutletInclineRadians)]
roAxis3 = vector.crossproduct3(roAxis1, roAxis2)
scale = (vOutletElementLength / vector.magnitude(roAxis3))
dx_ds2 = [(v * scale) for v in roAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 0) else defaultOutletScale3)
for c in range(3):
x[c] = ((rvOutletCentre[c] + (roAxis1[c] * cosRadiansAround)) + (roAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((roAxis1[c] * (- sinRadiansAround)) + (roAxis2[c] * cosRadiansAround)))
hasDerivative3 = ((n3 == 1) and (n1 in [0, 5]))
node = nodes.createNode(nodeIdentifier, (nodetemplate if hasDerivative3 else nodetemplateLinearS3))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if hasDerivative3:
dx_ds3 = [(outletScale3 * ((roAxis1[c] * cosRadiansAround) + (roAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [1, 5]):
if (n1 == 1):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = (4.0 * dx_ds3[2])
dx_ds3 = [(dx_ds1[c] + dx_ds3[c]) for c in range(3)]
mag3 = (radiansPerElementAroundOutlet * rvOutletOuterRadius)
scale = (mag3 / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
rvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
rvOutletNodeId.append(outletNodeId)
cache.setNode(nodes.findNodeByIdentifier(lvOutletNodeId[1][3]))
dx_ds3 = [(rvOutletOuterSpaceX[c] - lvOutletOuterSpaceX[c]) for c in range(3)]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(rvOutletNodeId[1][0]))
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [(- d) for d in dx_ds3])
aInnerMajorMag = aBaseInnerMajorMag
aInnerMinorMag = aBaseInnerMinorMag
aOuterMajorMag = (aInnerMajorMag + aBaseSlopeLength)
aOuterMinorMag = (aInnerMinorMag + aBaseSlopeLength)
laMajorAxisRadians = ((baseRotationRadians + (0.5 * math.pi)) - aMajorAxisRadians)
laInnerMajor = [(aInnerMajorMag * math.cos(laMajorAxisRadians)), (aInnerMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laInnerMinor = [((- aInnerMinorMag) * math.sin(laMajorAxisRadians)), (aInnerMinorMag * math.cos(laMajorAxisRadians)), 0.0]
laOuterMajor = [(aOuterMajorMag * math.cos(laMajorAxisRadians)), (aOuterMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laOuterMinor = [((- aOuterMinorMag) * math.sin(laMajorAxisRadians)), (aOuterMinorMag * math.cos(laMajorAxisRadians)), 0.0]
raMajorAxisRadians = ((baseRotationRadians - (0.5 * math.pi)) + aMajorAxisRadians)
raInnerMajor = [(aInnerMajorMag * math.cos(raMajorAxisRadians)), (aInnerMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raInnerMinor = [((- aInnerMinorMag) * math.sin(raMajorAxisRadians)), (aInnerMinorMag * math.cos(raMajorAxisRadians)), 0.0]
raOuterMajor = [(aOuterMajorMag * math.cos(raMajorAxisRadians)), (aOuterMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raOuterMinor = [((- aOuterMinorMag) * math.sin(raMajorAxisRadians)), (aOuterMinorMag * math.cos(raMajorAxisRadians)), 0.0]
rotRadians = (baseRotationRadians + (0.5 * math.pi))
cosRotRadians = math.cos(rotRadians)
sinRotRadians = math.sin(rotRadians)
cruxLeftModX = (((cruxLeft[0] - cruxCentre[0]) * cosRotRadians) + ((cruxLeft[1] - cruxCentre[1]) * sinRotRadians))
cruxLeftModY = (((cruxLeft[0] - cruxCentre[0]) * (- sinRotRadians)) + ((cruxLeft[1] - cruxCentre[1]) * cosRotRadians))
axInnerMod = (aInnerMajorMag * math.cos(aMajorAxisRadians))
bxInnerMod = (aInnerMinorMag * math.sin(aMajorAxisRadians))
laSeptumRadians = math.atan2(bxInnerMod, axInnerMod)
raSeptumRadians = (- laSeptumRadians)
laCentreModX = ((((- 0.5) * aSeptumThickness) - (axInnerMod * math.cos(laSeptumRadians))) - (bxInnerMod * math.sin(laSeptumRadians)))
axMod = (aOuterMajorMag * math.cos(aMajorAxisRadians))
ayMod = (aOuterMajorMag * (- math.sin(aMajorAxisRadians)))
bxMod = (aOuterMinorMag * math.sin(aMajorAxisRadians))
byMod = (aOuterMinorMag * math.cos(aMajorAxisRadians))
laCruxLeftRadians = getEllipseRadiansToX(axMod, bxMod, (cruxLeftModX - laCentreModX), (math.pi * 0.5))
laCentreModY = ((cruxLeftModY - (ayMod * math.cos(laCruxLeftRadians))) - (byMod * math.sin(laCruxLeftRadians)))
laCentreX = ((cruxCentre[0] + (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
laCentreY = ((cruxCentre[1] + (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
raCruxLeftRadians = (- laCruxLeftRadians)
raCentreX = ((cruxCentre[0] - (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
raCentreY = ((cruxCentre[1] - (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
aCentreOuterZ = (baseHeight + baseThickness)
aCentreInnerZ = (aCentreOuterZ - aBaseSlopeHeight)
atrialPerimeterLength = getApproximateEllipsePerimeter(aOuterMajorMag, aOuterMinorMag)
atrialSeptumCentreToCruxLeftLength = getEllipseArcLength(aOuterMajorMag, aOuterMinorMag, laSeptumRadians, laCruxLeftRadians)
atrialSeptumElementLength = (atrialSeptumCentreToCruxLeftLength / (1.0 + (elementsCountAtrialSeptum * 0.5)))
atrialFreeWallElementLength = ((atrialPerimeterLength - (atrialSeptumElementLength * (elementsCountAtrialSeptum + 2))) / ((elementsCountAroundAtria - elementsCountAtrialSeptum) - 2))
atrialTransitionElementLength = (0.5 * (atrialSeptumElementLength + atrialFreeWallElementLength))
laRadians = []
laOuterDerivatives = []
radiansAround = laSeptumRadians
if ((elementsCountAtrialSeptum % 2) == 1):
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, (0.5 * atrialSeptumElementLength))
outerDerivative = atrialSeptumElementLength
lan1CruxLimit = ((elementsCountAtrialSeptum // 2) + 1)
lan1SeptumLimit = ((elementsCountAroundAtria - ((elementsCountAtrialSeptum + 1) // 2)) - 1)
for n1 in range(elementsCountAroundAtria):
laRadians.append(radiansAround)
laOuterDerivatives.append(outerDerivative)
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
elementLength = atrialSeptumElementLength
outerDerivative = atrialSeptumElementLength
elif (n1 == lan1CruxLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialFreeWallElementLength
elif (n1 == lan1SeptumLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialSeptumElementLength
else:
elementLength = atrialFreeWallElementLength
outerDerivative = atrialFreeWallElementLength
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, elementLength)
laInnerDerivatives = []
finalArcLength = prevArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, (laRadians[(- 1)] - (2.0 * math.pi)), laRadians[0])
for n1 in range(elementsCountAroundAtria):
if (n1 == (elementsCountAroundAtria - 1)):
nextArcLength = finalArcLength
else:
nextArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, laRadians[n1], laRadians[(n1 + 1)])
if (laOuterDerivatives[n1] is atrialSeptumElementLength):
arcLength = min(prevArcLength, nextArcLength)
else:
arcLength = max(prevArcLength, nextArcLength)
laInnerDerivatives.append(arcLength)
prevArcLength = nextArcLength
raRadians = []
raInnerDerivatives = []
raOuterDerivatives = []
for n1 in range(elementsCountAroundAtria):
raRadians.append(((2.0 * math.pi) - laRadians[(- n1)]))
raInnerDerivatives.append(laInnerDerivatives[(- n1)])
raOuterDerivatives.append(laOuterDerivatives[(- n1)])
raRadians[0] = raSeptumRadians
laNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = laRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((laCentreX + (cosRadiansAround * laInnerMajor[0])) + (sinRadiansAround * laInnerMinor[0])), ((laCentreY + (cosRadiansAround * laInnerMajor[1])) + (sinRadiansAround * laInnerMinor[1])), aCentreInnerZ]
outer = [((laCentreX + (cosRadiansAround * laOuterMajor[0])) + (sinRadiansAround * laOuterMinor[0])), ((laCentreY + (cosRadiansAround * laOuterMajor[1])) + (sinRadiansAround * laOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 <= lan1CruxLimit) or (n1 > (lan1SeptumLimit + 2)))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
laNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * laInnerMajor[0]) + (cosRadiansAround * laInnerMinor[0])), (((- sinRadiansAround) * laInnerMajor[1]) + (cosRadiansAround * laInnerMinor[1])), 0.0]
scale1 = laInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * laOuterMajor[0]) + (cosRadiansAround * laOuterMinor[0])), (((- sinRadiansAround) * laOuterMajor[1]) + (cosRadiansAround * laOuterMinor[1])), 0.0]
scale1 = laOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (lan1CruxLimit + 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laInnerMajor[0], laInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laInnerMinor[0], laInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laOuterMajor[0], laOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laOuterMinor[0], laOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
ran1SeptumLimit = (elementsCountAtrialSeptum // 2)
ran1CruxLimit = ((elementsCountAroundAtria - ran1SeptumLimit) - 1)
raNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
raNodeId[1][0] = laNodeId[0][0]
raNodeId[1][(- 2)] = lvOutletNodeId[1][1]
raNodeId[1][(- 1)] = lvOutletNodeId[1][0]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = raRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((raCentreX + (cosRadiansAround * raInnerMajor[0])) + (sinRadiansAround * raInnerMinor[0])), ((raCentreY + (cosRadiansAround * raInnerMajor[1])) + (sinRadiansAround * raInnerMinor[1])), aCentreInnerZ]
outer = [((raCentreX + (cosRadiansAround * raOuterMajor[0])) + (sinRadiansAround * raOuterMinor[0])), ((raCentreY + (cosRadiansAround * raOuterMajor[1])) + (sinRadiansAround * raOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 < ran1SeptumLimit) or (n1 >= ran1CruxLimit))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
raNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * raInnerMajor[0]) + (cosRadiansAround * raInnerMinor[0])), (((- sinRadiansAround) * raInnerMajor[1]) + (cosRadiansAround * raInnerMinor[1])), 0.0]
scale1 = raInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * raOuterMajor[0]) + (cosRadiansAround * raOuterMinor[0])), (((- sinRadiansAround) * raOuterMajor[1]) + (cosRadiansAround * raOuterMinor[1])), 0.0]
scale1 = raOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 <= ran1SeptumLimit) or (n1 >= ran1CruxLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (ran1CruxLimit - 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raInnerMajor[0], raInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raInnerMinor[0], raInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raOuterMajor[0], raOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raOuterMinor[0], raOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
laNodeId[1][0] = raNodeId[0][0]
laNodeId[1][1] = lvOutletNodeId[1][0]
laNodeId[1][2] = lvOutletNodeId[1][(- 1)]
for i in range(2):
aNodeId = (laNodeId if (i == 0) else raNodeId)
for n1 in range(elementsCountAroundAtria):
nid2 = aNodeId[1][n1]
node2 = nodes.findNodeByIdentifier(nid2)
cache.setNode(node2)
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
nid1 = aNodeId[0][n1]
node1 = nodes.findNodeByIdentifier(nid1)
cache.setNode(node1)
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
dx_ds3 = [(x2[0] - x1[0]), (x2[1] - x1[1]), (x2[2] - x1[2])]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if ((i == 1) and ((n1 == 0) or (nid2 == cruxCentreNodeId))):
continue
if (nid2 in [cruxLeftNodeId, cruxRightNodeId]):
dx_ds3 = [(- d) for d in dx_ds3]
cache.setNode(node2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][1]))
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(raNodeId[0][(- 1)]))
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(cruxCentreNodeId))
(result, xc) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
d1 = [(x1[c] - xc[c]) for c in range(3)]
d2 = [(x2[c] - xc[c]) for c in range(3)]
dx_ds3 = [(d1[0] + d2[0]), (d1[1] + d2[1]), (d1[2] + d2[2])]
scale = (vector.magnitude(d1) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][2]))
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
dx_ds2 = vector.crossproduct3(dx_ds3, dx_ds1)
scale2 = ((0.5 * vector.magnitude(dx_ds3)) / vector.magnitude(dx_ds2))
dx_ds2 = [(scale2 * d) for d in dx_ds2]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
nida = (nsdl + nowl)
nidb = lvOutletNodeId[1][(- 2)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.5, baseThickness, node1, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0, node2, Node.VALUE_LABEL_D_DS3, (- 1.0), Node.VALUE_LABEL_D_DS1, 1.0)
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
lv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
nida = ((nidr + nowr) + 4)
nidb = lvOutletNodeId[1][2]
node = nodes.findNodeByIdentifier(nida)
cache.setNode(node)
(result, xa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
node = nodes.findNodeByIdentifier(nidb)
cache.setNode(node)
(result, xb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
d2b = [((- 2.0) * d) for d in d2b]
scale = ((4.0 * (baseHeight + baseThickness)) / vector.magnitude(d2a))
d2a = [(scale * d) for d in d2a]
xi = 0.5
xr = (1.0 - xi)
x = interp.interpolateCubicHermite(xa, d2a, xb, d2b, xi)
dx_ds1 = [((xr * d1a[c]) + (xi * d1b[c])) for c in range(3)]
dx_ds2 = interp.interpolateCubicHermiteDerivative(xa, d2a, xb, d2b, xi)
dx_ds2 = [(xr * d) for d in dx_ds2]
radialVector = vector.normalise(vector.crossproduct3(dx_ds1, dx_ds2))
dx_ds3 = [(baseThickness * d) for d in radialVector]
x_inner = [(x[c] - dx_ds3[c]) for c in range(3)]
curvatureScale = (1.0 - (baseThickness * interp.getCubicHermiteCurvature(xa, d2a, x, dx_ds2, radialVector, 1.0)))
dx_ds2_inner = [(curvatureScale * d) for d in dx_ds2]
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid2 = nodeIdentifier
nodeIdentifier += 1
nida = laNodeId[0][2]
nidb = lvOutletNodeId[0][(- 1)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.4, lvOutletWallThickness, node1, Node.VALUE_LABEL_D_DS2, (- 1.0), Node.VALUE_LABEL_D_DS1, (- 1.0), node2, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0)
dx_ds1 = [(2.0 * d) for d in dx_ds1]
node = nodes.createNode(nodeIdentifier, nodetemplateLinearS3)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
lv_bridge_nid1 = nodeIdentifier
nodeIdentifier += 1
tlaNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
traNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
else:
baNodeId = raNodeId
taNodeId = traNodeId
for n1 in range(elementsCountAroundAtria):
if ((n3 == 1) and (((i == 0) and ((n1 < (lan1CruxLimit - 1)) or (n1 > (lan1SeptumLimit + 2)))) or ((i == 1) and ((n1 < ran1SeptumLimit) or (n1 > ran1CruxLimit))))):
continue
node = nodes.findNodeByIdentifier(baNodeId[n3][n1])
cache.setNode(node)
(result, x) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
x[2] += fibrousRingThickness
if ((n3 == 1) and (((i == 0) and ((n1 == 1) or (n1 == 2))) or ((i == 1) and (n1 == (elementsCountAroundAtria - 2))))):
dx_ds1 = [(- d) for d in dx_ds1]
dx_ds3 = [(- d) for d in dx_ds3]
taNodeId[n3][n1] = nodeIdentifier
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
tlaNodeId[1][0] = traNodeId[0][0]
traNodeId[1][(- 1)] = tlaNodeId[1][1]
traNodeId[1][0] = tlaNodeId[0][0]
mesh = fm.findMeshByDimension(3)
lvMeshGroup = lvGroup.getMeshGroup(mesh)
rvMeshGroup = rvGroup.getMeshGroup(mesh)
vSeptumMeshGroup = vSeptumGroup.getMeshGroup(mesh)
conusArteriosusMeshGroup = conusArteriosusGroup.getMeshGroup(mesh)
lFibrousRingMeshGroup = lFibrousRingGroup.getMeshGroup(mesh)
rFibrousRingMeshGroup = rFibrousRingGroup.getMeshGroup(mesh)
tricubichermite = eftfactory_tricubichermite(mesh, useCrossDerivatives)
eft = tricubichermite.createEftNoCrossDerivatives()
elementIdentifier = (getMaximumElementIdentifier(mesh) + 1)
elementtemplate1 = mesh.createElementtemplate()
elementtemplate1.setElementShapeType(Element.SHAPE_TYPE_CUBE)
for e in range(19):
eft1 = eft
nids = None
meshGroups = [lvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidl + 1), laNodeId[0][(- 1)], laNodeId[0][0], (nidr + 0), ((nidl + nowl) + 1), raNodeId[0][1], laNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
scaleEftNodeValueLabels(eft1, [8], [Node.VALUE_LABEL_D_DS3], [1])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 1):
nids = [(nidl + 1), (nidl + 2), laNodeId[0][0], laNodeId[0][1], ((nidl + nowl) + 1), ((nidl + nowl) + 2), laNodeId[1][0], raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 2):
nids = [(nidl + 2), lvOutletNodeId[0][0], laNodeId[0][1], lvOutletNodeId[1][0], ((nidl + nowl) + 2), raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
tricubichermite.setEftLinearDerivative(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, 2, 4, 1)
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 5, 4, 6, 4]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 3):
nids = [lvOutletNodeId[1][0], lvOutletNodeId[1][1], ((nidl + nowl) + 2), ((nidl + nowl) + 3), raNodeId[0][(- 1)], raNodeId[0][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
ln_map = [1, 2, 1, 2, 3, 4, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e <= 6):
n = (e - 4)
nids = [((nidl + n) + 2), ((nidl + n) + 3), lvOutletNodeId[0][n], lvOutletNodeId[0][(n + 1)], (((nidl + nowl) + n) + 2), (((nidl + nowl) + n) + 3), lvOutletNodeId[1][n], lvOutletNodeId[1][(n + 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
if (e == 4):
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 7):
nids = [(nidl + 5), (nidl + 6), lvOutletNodeId[0][3], lvOutletNodeId[0][4], ((nidl + nowl) + 5), ((nidl + nowl) + 6), lvOutletNodeId[1][3], lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 8):
nids = [(nidl + 6), (nidl + 7), lvOutletNodeId[0][4], lv_crest_nid1, ((nidl + nowl) + 6), ((nidr + norr) - 1), lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 4]
remapEftLocalNodes(eft1, 7, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 9):
nids = [((nidr + norr) - 1), nsdl, (((nidr + nowr) + norr) - 1), (nsdl + nowl), lv_crest_nid1]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 1, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [rvMeshGroup]
elif (e == 10):
nids = [nsdl, (nsdl + 1), laNodeId[0][3], (nsdl + nowl), ((nsdl + nowl) + 1), lv_crest_nid1, laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 6, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 11):
nids = [(nedl - 4), (nedl - 3), laNodeId[0][(- 5)], laNodeId[0][(- 4)], ((nedl + nowl) - 4), ((nedl + nowl) - 3), laNodeId[1][(- 5)], laNodeId[1][(- 4)]]
elif (e == 12):
nids = [(nedl - 3), (nedl - 2), laNodeId[0][(- 4)], laNodeId[0][(- 3)], ((nedl + nowl) - 3), ((nedl + nowl) - 2), laNodeId[1][(- 4)], laNodeId[1][(- 3)]]
elif (e == 13):
nids = [(nedl - 2), (nedl - 1), laNodeId[0][(- 3)], laNodeId[0][(- 2)], ((nedl + nowl) - 2), ((nedl + nowl) - 1), laNodeId[1][(- 3)], laNodeId[1][(- 2)]]
elif (e == 14):
nids = [(nedl - 1), (nidl + 0), laNodeId[0][(- 2)], laNodeId[0][(- 1)], ((nedl + nowl) - 1), ((nidl + nowl) + 0), laNodeId[1][(- 2)], laNodeId[1][(- 1)]]
elif (e == 15):
nids = [(nidl + 6), (nidl + 7), lv_bridge_nid1, laNodeId[0][3], lvOutletNodeId[0][4], lv_crest_nid1, lvOutletNodeId[0][5], laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
elif (e == 16):
nids = [lv_crest_nid1, laNodeId[1][3], lvOutletNodeId[0][4], lvOutletNodeId[0][5], lvOutletNodeId[1][4], lvOutletNodeId[1][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 1, 2, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 17):
nids = [laNodeId[0][3], laNodeId[0][2], lv_bridge_nid1, laNodeId[1][3], lvOutletNodeId[1][5], lvOutletNodeId[0][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, 6, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 4, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 18):
nids = [lv_bridge_nid1, (nidl + elementsCountAtrialSeptum), lvOutletNodeId[0][5], lvOutletNodeId[0][0], laNodeId[0][2], laNodeId[0][1], lvOutletNodeId[1][5], lvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
for e in range(15):
eft1 = eft
nids = None
meshGroups = [rvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidr + 0), laNodeId[0][(- 1)], raNodeId[0][1], ((nidl + nowl) + 0), ((nidr + nowr) + 0), laNodeId[1][(- 1)], raNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [lvMeshGroup]
elif (e == 1):
nids = [(nidr + 0), (nidr + 1), raNodeId[0][1], raNodeId[0][2], ((nidr + nowr) + 0), ((nidr + nowr) + 1), raNodeId[1][1], raNodeId[1][2]]
elif (e == 2):
nids = [(nidr + 1), (nidr + 2), raNodeId[0][2], raNodeId[0][3], ((nidr + nowr) + 1), ((nidr + nowr) + 2), raNodeId[1][2], raNodeId[1][3]]
elif (e == 3):
nids = [(nidr + 2), (nidr + 3), raNodeId[0][3], raNodeId[0][4], ((nidr + nowr) + 2), ((nidr + nowr) + 3), raNodeId[1][3], raNodeId[1][4]]
elif (e == 4):
nids = [(nidr + 3), (nidr + 4), raNodeId[0][4], rv_crest_nid1, ((nidr + nowr) + 3), ((nidr + nowr) + 4), raNodeId[1][4], rv_crest_nid2]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [3, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
elif (e == 5):
nids = [(nidr + 4), (nidr + 5), rv_crest_nid1, rvOutletNodeId[0][2], ((nidr + nowr) + 4), ((nidr + nowr) + 5), rv_crest_nid2, rvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 6):
nids = [(nidr + 5), (nidr + 6), rvOutletNodeId[0][2], rvOutletNodeId[0][3], ((nidr + nowr) + 5), ((nidr + nowr) + 6), rvOutletNodeId[1][2], rvOutletNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 7):
nids = [(nidr + 6), (nidr + 7), rvOutletNodeId[0][3], rvOutletNodeId[0][4], ((nidr + nowr) + 6), ((nidr + nowr) + 7), rvOutletNodeId[1][3], rvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 8):
nids = [raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[0][(- 2)], ((nidl + nowl) + 3), raNodeId[1][(- 3)], lvOutletNodeId[1][2], lvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 9):
nids = [raNodeId[0][(- 4)], rv_crest_nid1, raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[1][(- 4)], rv_crest_nid2, raNodeId[1][(- 3)], lvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
elif (e == 10):
nids = [rv_crest_nid1, rvOutletNodeId[0][2], ((nidl + nowl) + 4), rvOutletNodeId[0][1], rv_crest_nid2, rvOutletNodeId[1][2], lvOutletNodeId[1][2], rvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [2, 3, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 11):
nids = [((nidl + nowl) + 4), rvOutletNodeId[0][1], ((nidl + nowl) + 5), rvOutletNodeId[0][0], lvOutletNodeId[1][2], rvOutletNodeId[1][1], lvOutletNodeId[1][3], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 12):
nids = [((nidl + nowl) + 6), ((nidl + nowl) + 5), rvOutletNodeId[0][(- 1)], rvOutletNodeId[0][0], lvOutletNodeId[1][4], lvOutletNodeId[1][3], rvOutletNodeId[1][(- 1)], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 13):
nids = [(nidr + elementsCountAroundVSeptum), ((nidl + nowl) + 6), rvOutletNodeId[0][(- 2)], rvOutletNodeId[0][(- 1)], lv_crest_nid1, lvOutletNodeId[1][4], rvOutletNodeId[1][(- 2)], rvOutletNodeId[1][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 14):
nids = [(nidr + elementsCountAroundVSeptum), rvOutletNodeId[0][(- 2)], ((nidr + nowr) + elementsCountAroundVSeptum), lv_crest_nid1, rvOutletNodeId[1][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 3, 4, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 1, 2, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [conusArteriosusMeshGroup]
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
bicubichermitelinear = eftfactory_bicubichermitelinear(mesh, useCrossDerivatives, linearAxis=2, d_ds1=Node.VALUE_LABEL_D_DS1, d_ds2=Node.VALUE_LABEL_D_DS3)
eftFibrousRing = bicubichermitelinear.createEftBasic()
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
meshGroupsSide = [lFibrousRingMeshGroup]
else:
baNodeId = raNodeId
taNodeId = traNodeId
meshGroupsSide = [rFibrousRingMeshGroup]
for e1 in range((elementsCountAroundAtria + 2)):
if ((i == 1) and (e1 < 4)):
continue
if (e1 < 4):
meshGroups = [lFibrousRingMeshGroup, rFibrousRingMeshGroup]
else:
meshGroups = meshGroupsSide
eft1 = eftFibrousRing
if ((e1 == 0) or (e1 == 3)):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
if (e1 == 0):
nids = [laNodeId[0][(- 1)], raNodeId[0][1], tlaNodeId[0][(- 1)], traNodeId[0][1], laNodeId[1][(- 1)], raNodeId[1][1], tlaNodeId[1][(- 1)], traNodeId[1][1]]
else:
nids = [raNodeId[0][(- 1)], laNodeId[0][1], traNodeId[0][(- 1)], tlaNodeId[0][1], laNodeId[1][1], tlaNodeId[1][1]]
remapEftNodeValueLabel(eft1, [5, 6, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 5, 6, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e1 == 1):
nids = [laNodeId[0][(- 1)], laNodeId[0][0], tlaNodeId[0][(- 1)], tlaNodeId[0][0], raNodeId[0][1], raNodeId[0][0], traNodeId[0][1], traNodeId[0][0]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [6, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
elif (e1 == 2):
nids = [laNodeId[0][0], laNodeId[0][1], tlaNodeId[0][0], tlaNodeId[0][1], raNodeId[0][0], raNodeId[0][(- 1)], traNodeId[0][0], traNodeId[0][(- 1)]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5, 7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
ea = (e1 - 3)
eb = ((ea + 1) - elementsCountAroundAtria)
nids = [baNodeId[0][ea], baNodeId[0][eb], taNodeId[0][ea], taNodeId[0][eb], baNodeId[1][ea], baNodeId[1][eb], taNodeId[1][ea], taNodeId[1][eb]]
if (((i == 0) and ((e1 == 4) or (e1 == 5))) or ((i == 1) and (e1 >= elementsCountAroundAtria))):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
if (e1 == 4):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == 5):
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == elementsCountAroundAtria):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
fm.endChange()
return annotationGroups | @classmethod
def generateBaseMesh(cls, region, options):
'\n Generate the base tricubic Hermite mesh.\n :param region: Zinc region to define model in. Must be empty.\n :param options: Dict containing options. See getDefaultOptions().\n :return: list of AnnotationGroup\n '
elementsCountAroundLVFreeWall = options['Number of elements around LV free wall']
elementsCountAroundVSeptum = options['Number of elements around ventricular septum']
elementsCountAroundLV = (elementsCountAroundLVFreeWall + elementsCountAroundVSeptum)
elementsCountUpLVApex = options['Number of elements up LV apex']
elementsCountUpVSeptum = options['Number of elements up ventricular septum']
elementsCountUpLV = (elementsCountUpLVApex + elementsCountUpVSeptum)
elementsCountUpRV = (elementsCountUpVSeptum + 1)
elementsCountAroundRV = (elementsCountAroundVSeptum + 2)
elementsCountAroundAtria = options['Number of elements around atria']
elementsCountAtrialSeptum = options['Number of elements around atrial septum']
lvOuterHeight = options['LV outer height']
lvOuterRadius = options['LV outer radius']
lvFreeWallThickness = options['LV free wall thickness']
lvApexThickness = options['LV apex thickness']
rvHeight = options['RV inner height']
rvArcAroundRadians = math.radians(options['RV arc around degrees'])
rvFreeWallThickness = options['RV free wall thickness']
rvWidth = options['RV width']
rvExtraCrossRadiusBase = options['RV extra cross radius base']
vSeptumThickness = options['Ventricular septum thickness']
vSeptumBaseRadialDisplacement = options['Ventricular septum base radial displacement']
useCrossDerivatives = options['Use cross derivatives']
aBaseInnerMajorMag = (0.5 * options['Atria base inner major axis length'])
aBaseInnerMinorMag = (0.5 * options['Atria base inner minor axis length'])
aMajorAxisRadians = math.radians(options['Atria major axis rotation degrees'])
aSeptumThickness = options['Atrial septum thickness']
aBaseWallThickness = options['Atrial base wall thickness']
aBaseSlopeRadians = math.radians(options['Atrial base slope degrees'])
baseHeight = options['Base height']
baseThickness = options['Base thickness']
fibrousRingThickness = options['Fibrous ring thickness']
lvOutletInnerRadius = (options['LV outlet inner diameter'] * 0.5)
lvOutletWallThickness = options['LV outlet wall thickness']
lvOutletOuterRadius = (lvOutletInnerRadius + lvOutletWallThickness)
rvOutletInnerRadius = (options['RV outlet inner diameter'] * 0.5)
rvOutletWallThickness = options['RV outlet wall thickness']
rvOutletOuterRadius = (rvOutletInnerRadius + rvOutletWallThickness)
vOutletElementLength = options['Ventricles outlet element length']
vOutletInclineRadians = math.radians(options['Ventricles outlet incline degrees'])
vOutletSpacing = options['Ventricles outlet spacing']
annotationGroups = MeshType_3d_heartventricles2.generateBaseMesh(region, options)
lvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('left ventricle myocardium'))
rvGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('right ventricle myocardium'))
vSeptumGroup = getAnnotationGroupForTerm(annotationGroups, get_heart_term('interventricular septum'))
conusArteriosusGroup = AnnotationGroup(region, get_heart_term('conus arteriosus'))
annotationGroups += [conusArteriosusGroup]
lFibrousRingGroup = AnnotationGroup(region, get_heart_term('left fibrous ring'))
rFibrousRingGroup = AnnotationGroup(region, get_heart_term('right fibrous ring'))
fm = region.getFieldmodule()
fm.beginChange()
coordinates = findOrCreateFieldCoordinates(fm)
cache = fm.createFieldcache()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodetemplate = nodes.createNodetemplate()
nodetemplate.defineField(coordinates)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodetemplate.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS3, 1)
nodetemplateLinearS3 = nodes.createNodetemplate()
nodetemplateLinearS3.defineField(coordinates)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_VALUE, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS1, 1)
nodetemplateLinearS3.setValueNumberOfVersions(coordinates, (- 1), Node.VALUE_LABEL_D_DS2, 1)
nodeIdentifier = (getMaximumNodeIdentifier(nodes) + 1)
norl = elementsCountAroundLV
nowl = (1 + (elementsCountUpLV * norl))
nidl = ((nowl - norl) + 1)
nsdl = (nidl + elementsCountAroundVSeptum)
nedl = (nidl + elementsCountAroundLV)
norr = (elementsCountAroundRV - 1)
nowr = (elementsCountUpRV * norr)
nidr = ((((nowl * 2) + 1) + nowr) - norr)
elementsCountAroundOutlet = 6
defaultOutletScale3 = 0.5
nidca = (((nidl + nowl) + elementsCountAroundVSeptum) - 1)
nidcb = ((nidr + elementsCountAroundVSeptum) - 1)
cache.setNode(nodes.findNodeByIdentifier(nidca))
(result, pxa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(nidcb))
(result, pxb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
px = [(0.5 * (pxa[c] + pxb[c])) for c in range(3)]
node = nodes.findNodeByIdentifier(nidl)
cache.setNode(node)
(result, ax) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
node = nodes.findNodeByIdentifier(nidr)
cache.setNode(node)
(result, bx) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
bx = [(0.5 * (ax[i] + bx[i])) for i in range(2)]
bx.append(ax[2])
ax = [(bx[c] - px[c]) for c in range(3)]
ax = vector.normalise(ax)
baseRotationRadians = math.atan2(ax[1], ax[0])
outletSpacingRadians = (0.25 * math.pi)
outletSpacingHorizontal = (vOutletSpacing * math.cos(outletSpacingRadians))
outletSpacingVertical = (vOutletSpacing * math.sin(outletSpacingRadians))
cruxOffset = ((rvOutletOuterRadius + outletSpacingHorizontal) + (2.0 * lvOutletOuterRadius))
cx = [(px[c] + (ax[c] * cruxOffset)) for c in range(3)]
aBaseSlopeLength = (aBaseWallThickness * math.cos(aBaseSlopeRadians))
aBaseSlopeHeight = (aBaseWallThickness * math.sin(aBaseSlopeRadians))
cosOutletInclineRadians = math.cos(vOutletInclineRadians)
sinOutletInclineRadians = math.sin(vOutletInclineRadians)
lvOutletCentre = [(cx[0] - (ax[0] * lvOutletOuterRadius)), (cx[1] - (ax[1] * lvOutletOuterRadius)), (((baseHeight + baseThickness) - aBaseSlopeHeight) + (sinOutletInclineRadians * lvOutletOuterRadius))]
radiansPerElementAroundOutlet = ((2.0 * math.pi) / elementsCountAroundOutlet)
x = [0.0, 0.0, 0.0]
dx_ds1 = [0.0, 0.0, 0.0]
dx_ds3 = [0.0, 0.0, 0.0]
lvOutletNodeId = []
for n3 in range(2):
radius = (lvOutletInnerRadius if (n3 == 0) else lvOutletOuterRadius)
loAxis1 = [(radius * ax[c]) for c in range(3)]
loAxis2 = [((- loAxis1[1]) * cosOutletInclineRadians), (loAxis1[0] * cosOutletInclineRadians), ((- radius) * sinOutletInclineRadians)]
loAxis3 = vector.crossproduct3(loAxis1, loAxis2)
scale = (vOutletElementLength / vector.magnitude(loAxis3))
dx_ds2 = [(v * scale) for v in loAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 3) else defaultOutletScale3)
for c in range(3):
x[c] = ((lvOutletCentre[c] + (loAxis1[c] * cosRadiansAround)) + (loAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((loAxis1[c] * (- sinRadiansAround)) + (loAxis2[c] * cosRadiansAround)))
node = nodes.createNode(nodeIdentifier, (nodetemplateLinearS3 if (n3 == 0) else nodetemplate))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if (n3 == 1):
dx_ds3 = [(outletScale3 * ((loAxis1[c] * cosRadiansAround) + (loAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [2, 4]):
if (n1 == 2):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = ((- 2.0) * dx_ds3[2])
scale = ((radiansPerElementAroundOutlet * rvOutletOuterRadius) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
cruxCentreNodeId = nodeIdentifier
cruxCentre = [x[0], x[1], x[2]]
elif (n1 == 1):
cruxRightNodeId = nodeIdentifier
cruxRight = [x[0], x[1], x[2]]
elif (n1 == (elementsCountAroundOutlet - 1)):
cruxLeftNodeId = nodeIdentifier
cruxLeft = [x[0], x[1], x[2]]
elif (n1 == 3):
lvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
lvOutletNodeId.append(outletNodeId)
outletCentreSpacing = ((lvOutletOuterRadius + outletSpacingHorizontal) + rvOutletOuterRadius)
rvOutletCentre = [(lvOutletCentre[c] - (outletCentreSpacing * ax[c])) for c in range(3)]
unitCrossX = vector.normalise([(- ax[1]), ax[0]])
rvOutletCentre[0] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[0])
rvOutletCentre[1] -= ((outletSpacingVertical * sinOutletInclineRadians) * unitCrossX[1])
rvOutletCentre[2] += (outletSpacingVertical * cosOutletInclineRadians)
rvOutletNodeId = []
for n3 in range(2):
radius = (rvOutletInnerRadius if (n3 == 0) else rvOutletOuterRadius)
roAxis1 = [(radius * ax[c]) for c in range(3)]
roAxis2 = [((- roAxis1[1]) * cosOutletInclineRadians), (roAxis1[0] * cosOutletInclineRadians), (radius * sinOutletInclineRadians)]
roAxis3 = vector.crossproduct3(roAxis1, roAxis2)
scale = (vOutletElementLength / vector.magnitude(roAxis3))
dx_ds2 = [(v * scale) for v in roAxis3]
outletNodeId = []
for n1 in range(elementsCountAroundOutlet):
radiansAround = (n1 * radiansPerElementAroundOutlet)
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
outletScale3 = ((vOutletSpacing / radius) if (n1 == 0) else defaultOutletScale3)
for c in range(3):
x[c] = ((rvOutletCentre[c] + (roAxis1[c] * cosRadiansAround)) + (roAxis2[c] * sinRadiansAround))
dx_ds1[c] = (radiansPerElementAroundOutlet * ((roAxis1[c] * (- sinRadiansAround)) + (roAxis2[c] * cosRadiansAround)))
hasDerivative3 = ((n3 == 1) and (n1 in [0, 5]))
node = nodes.createNode(nodeIdentifier, (nodetemplate if hasDerivative3 else nodetemplateLinearS3))
outletNodeId.append(nodeIdentifier)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
if hasDerivative3:
dx_ds3 = [(outletScale3 * ((roAxis1[c] * cosRadiansAround) + (roAxis2[c] * sinRadiansAround))) for c in range(3)]
if (n1 in [1, 5]):
if (n1 == 1):
dx_ds3[2] = (- dx_ds3[2])
else:
dx_ds3[2] = (4.0 * dx_ds3[2])
dx_ds3 = [(dx_ds1[c] + dx_ds3[c]) for c in range(3)]
mag3 = (radiansPerElementAroundOutlet * rvOutletOuterRadius)
scale = (mag3 / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if (n1 == 0):
rvOutletOuterSpaceX = [x[0], x[1], x[2]]
nodeIdentifier += 1
rvOutletNodeId.append(outletNodeId)
cache.setNode(nodes.findNodeByIdentifier(lvOutletNodeId[1][3]))
dx_ds3 = [(rvOutletOuterSpaceX[c] - lvOutletOuterSpaceX[c]) for c in range(3)]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(rvOutletNodeId[1][0]))
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [(- d) for d in dx_ds3])
aInnerMajorMag = aBaseInnerMajorMag
aInnerMinorMag = aBaseInnerMinorMag
aOuterMajorMag = (aInnerMajorMag + aBaseSlopeLength)
aOuterMinorMag = (aInnerMinorMag + aBaseSlopeLength)
laMajorAxisRadians = ((baseRotationRadians + (0.5 * math.pi)) - aMajorAxisRadians)
laInnerMajor = [(aInnerMajorMag * math.cos(laMajorAxisRadians)), (aInnerMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laInnerMinor = [((- aInnerMinorMag) * math.sin(laMajorAxisRadians)), (aInnerMinorMag * math.cos(laMajorAxisRadians)), 0.0]
laOuterMajor = [(aOuterMajorMag * math.cos(laMajorAxisRadians)), (aOuterMajorMag * math.sin(laMajorAxisRadians)), 0.0]
laOuterMinor = [((- aOuterMinorMag) * math.sin(laMajorAxisRadians)), (aOuterMinorMag * math.cos(laMajorAxisRadians)), 0.0]
raMajorAxisRadians = ((baseRotationRadians - (0.5 * math.pi)) + aMajorAxisRadians)
raInnerMajor = [(aInnerMajorMag * math.cos(raMajorAxisRadians)), (aInnerMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raInnerMinor = [((- aInnerMinorMag) * math.sin(raMajorAxisRadians)), (aInnerMinorMag * math.cos(raMajorAxisRadians)), 0.0]
raOuterMajor = [(aOuterMajorMag * math.cos(raMajorAxisRadians)), (aOuterMajorMag * math.sin(raMajorAxisRadians)), 0.0]
raOuterMinor = [((- aOuterMinorMag) * math.sin(raMajorAxisRadians)), (aOuterMinorMag * math.cos(raMajorAxisRadians)), 0.0]
rotRadians = (baseRotationRadians + (0.5 * math.pi))
cosRotRadians = math.cos(rotRadians)
sinRotRadians = math.sin(rotRadians)
cruxLeftModX = (((cruxLeft[0] - cruxCentre[0]) * cosRotRadians) + ((cruxLeft[1] - cruxCentre[1]) * sinRotRadians))
cruxLeftModY = (((cruxLeft[0] - cruxCentre[0]) * (- sinRotRadians)) + ((cruxLeft[1] - cruxCentre[1]) * cosRotRadians))
axInnerMod = (aInnerMajorMag * math.cos(aMajorAxisRadians))
bxInnerMod = (aInnerMinorMag * math.sin(aMajorAxisRadians))
laSeptumRadians = math.atan2(bxInnerMod, axInnerMod)
raSeptumRadians = (- laSeptumRadians)
laCentreModX = ((((- 0.5) * aSeptumThickness) - (axInnerMod * math.cos(laSeptumRadians))) - (bxInnerMod * math.sin(laSeptumRadians)))
axMod = (aOuterMajorMag * math.cos(aMajorAxisRadians))
ayMod = (aOuterMajorMag * (- math.sin(aMajorAxisRadians)))
bxMod = (aOuterMinorMag * math.sin(aMajorAxisRadians))
byMod = (aOuterMinorMag * math.cos(aMajorAxisRadians))
laCruxLeftRadians = getEllipseRadiansToX(axMod, bxMod, (cruxLeftModX - laCentreModX), (math.pi * 0.5))
laCentreModY = ((cruxLeftModY - (ayMod * math.cos(laCruxLeftRadians))) - (byMod * math.sin(laCruxLeftRadians)))
laCentreX = ((cruxCentre[0] + (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
laCentreY = ((cruxCentre[1] + (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
raCruxLeftRadians = (- laCruxLeftRadians)
raCentreX = ((cruxCentre[0] - (laCentreModX * cosRotRadians)) + (laCentreModY * (- sinRotRadians)))
raCentreY = ((cruxCentre[1] - (laCentreModX * sinRotRadians)) + (laCentreModY * cosRotRadians))
aCentreOuterZ = (baseHeight + baseThickness)
aCentreInnerZ = (aCentreOuterZ - aBaseSlopeHeight)
atrialPerimeterLength = getApproximateEllipsePerimeter(aOuterMajorMag, aOuterMinorMag)
atrialSeptumCentreToCruxLeftLength = getEllipseArcLength(aOuterMajorMag, aOuterMinorMag, laSeptumRadians, laCruxLeftRadians)
atrialSeptumElementLength = (atrialSeptumCentreToCruxLeftLength / (1.0 + (elementsCountAtrialSeptum * 0.5)))
atrialFreeWallElementLength = ((atrialPerimeterLength - (atrialSeptumElementLength * (elementsCountAtrialSeptum + 2))) / ((elementsCountAroundAtria - elementsCountAtrialSeptum) - 2))
atrialTransitionElementLength = (0.5 * (atrialSeptumElementLength + atrialFreeWallElementLength))
laRadians = []
laOuterDerivatives = []
radiansAround = laSeptumRadians
if ((elementsCountAtrialSeptum % 2) == 1):
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, (0.5 * atrialSeptumElementLength))
outerDerivative = atrialSeptumElementLength
lan1CruxLimit = ((elementsCountAtrialSeptum // 2) + 1)
lan1SeptumLimit = ((elementsCountAroundAtria - ((elementsCountAtrialSeptum + 1) // 2)) - 1)
for n1 in range(elementsCountAroundAtria):
laRadians.append(radiansAround)
laOuterDerivatives.append(outerDerivative)
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
elementLength = atrialSeptumElementLength
outerDerivative = atrialSeptumElementLength
elif (n1 == lan1CruxLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialFreeWallElementLength
elif (n1 == lan1SeptumLimit):
elementLength = atrialTransitionElementLength
outerDerivative = atrialSeptumElementLength
else:
elementLength = atrialFreeWallElementLength
outerDerivative = atrialFreeWallElementLength
radiansAround = updateEllipseAngleByArcLength(aOuterMajorMag, aOuterMinorMag, radiansAround, elementLength)
laInnerDerivatives = []
finalArcLength = prevArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, (laRadians[(- 1)] - (2.0 * math.pi)), laRadians[0])
for n1 in range(elementsCountAroundAtria):
if (n1 == (elementsCountAroundAtria - 1)):
nextArcLength = finalArcLength
else:
nextArcLength = getEllipseArcLength(aInnerMajorMag, aInnerMinorMag, laRadians[n1], laRadians[(n1 + 1)])
if (laOuterDerivatives[n1] is atrialSeptumElementLength):
arcLength = min(prevArcLength, nextArcLength)
else:
arcLength = max(prevArcLength, nextArcLength)
laInnerDerivatives.append(arcLength)
prevArcLength = nextArcLength
raRadians = []
raInnerDerivatives = []
raOuterDerivatives = []
for n1 in range(elementsCountAroundAtria):
raRadians.append(((2.0 * math.pi) - laRadians[(- n1)]))
raInnerDerivatives.append(laInnerDerivatives[(- n1)])
raOuterDerivatives.append(laOuterDerivatives[(- n1)])
raRadians[0] = raSeptumRadians
laNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = laRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((laCentreX + (cosRadiansAround * laInnerMajor[0])) + (sinRadiansAround * laInnerMinor[0])), ((laCentreY + (cosRadiansAround * laInnerMajor[1])) + (sinRadiansAround * laInnerMinor[1])), aCentreInnerZ]
outer = [((laCentreX + (cosRadiansAround * laOuterMajor[0])) + (sinRadiansAround * laOuterMinor[0])), ((laCentreY + (cosRadiansAround * laOuterMajor[1])) + (sinRadiansAround * laOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 <= lan1CruxLimit) or (n1 > (lan1SeptumLimit + 2)))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
laNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * laInnerMajor[0]) + (cosRadiansAround * laInnerMinor[0])), (((- sinRadiansAround) * laInnerMajor[1]) + (cosRadiansAround * laInnerMinor[1])), 0.0]
scale1 = laInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * laOuterMajor[0]) + (cosRadiansAround * laOuterMinor[0])), (((- sinRadiansAround) * laOuterMajor[1]) + (cosRadiansAround * laOuterMinor[1])), 0.0]
scale1 = laOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 < lan1CruxLimit) or (n1 > lan1SeptumLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (lan1CruxLimit + 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laInnerMajor[0], laInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laInnerMinor[0], laInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [laCentreX, laCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [laOuterMajor[0], laOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [laOuterMinor[0], laOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
ran1SeptumLimit = (elementsCountAtrialSeptum // 2)
ran1CruxLimit = ((elementsCountAroundAtria - ran1SeptumLimit) - 1)
raNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
raNodeId[1][0] = laNodeId[0][0]
raNodeId[1][(- 2)] = lvOutletNodeId[1][1]
raNodeId[1][(- 1)] = lvOutletNodeId[1][0]
for n3 in range(2):
for n1 in range(elementsCountAroundAtria):
radiansAround = raRadians[n1]
cosRadiansAround = math.cos(radiansAround)
sinRadiansAround = math.sin(radiansAround)
inner = [((raCentreX + (cosRadiansAround * raInnerMajor[0])) + (sinRadiansAround * raInnerMinor[0])), ((raCentreY + (cosRadiansAround * raInnerMajor[1])) + (sinRadiansAround * raInnerMinor[1])), aCentreInnerZ]
outer = [((raCentreX + (cosRadiansAround * raOuterMajor[0])) + (sinRadiansAround * raOuterMinor[0])), ((raCentreY + (cosRadiansAround * raOuterMajor[1])) + (sinRadiansAround * raOuterMinor[1])), aCentreOuterZ]
if ((n3 == 1) and ((n1 < ran1SeptumLimit) or (n1 >= ran1CruxLimit))):
continue
node = nodes.createNode(nodeIdentifier, nodetemplate)
raNodeId[n3][n1] = nodeIdentifier
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, (inner if (n3 == 0) else outer))
if (n3 == 0):
dx_ds1 = [(((- sinRadiansAround) * raInnerMajor[0]) + (cosRadiansAround * raInnerMinor[0])), (((- sinRadiansAround) * raInnerMajor[1]) + (cosRadiansAround * raInnerMinor[1])), 0.0]
scale1 = raInnerDerivatives[n1]
else:
dx_ds1 = [(((- sinRadiansAround) * raOuterMajor[0]) + (cosRadiansAround * raOuterMinor[0])), (((- sinRadiansAround) * raOuterMajor[1]) + (cosRadiansAround * raOuterMinor[1])), 0.0]
scale1 = raOuterDerivatives[n1]
scale1 /= vector.magnitude(dx_ds1)
dx_ds1 = [(d * scale1) for d in dx_ds1]
dx_ds3 = [(outer[0] - inner[0]), (outer[1] - inner[1]), (outer[2] - inner[2])]
if ((n1 <= ran1SeptumLimit) or (n1 >= ran1CruxLimit)):
dx_ds2 = [0.0, 0.0, aCentreInnerZ]
else:
dx_ds2 = [((dx_ds3[1] * dx_ds1[2]) - (dx_ds3[2] * dx_ds1[1])), ((dx_ds3[2] * dx_ds1[0]) - (dx_ds3[0] * dx_ds1[2])), ((dx_ds3[0] * dx_ds1[1]) - (dx_ds3[1] * dx_ds1[0]))]
if (n1 == (ran1CruxLimit - 1)):
dx_ds2[2] *= (0.5 if (n3 == 0) else 0.25)
mag2 = (1.5 * (baseHeight + baseThickness))
else:
mag2 = inner[2]
scale2 = (mag2 / vector.magnitude(dx_ds2))
dx_ds2 = [(d * scale2) for d in dx_ds2]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
if False:
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreInnerZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raInnerMajor[0], raInnerMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raInnerMinor[0], raInnerMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreInnerZ])
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, [raCentreX, raCentreY, aCentreOuterZ])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, [raOuterMajor[0], raOuterMajor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, [raOuterMinor[0], raOuterMinor[1], 0.0])
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, [0.0, 0.0, aCentreOuterZ])
nodeIdentifier += 1
laNodeId[1][0] = raNodeId[0][0]
laNodeId[1][1] = lvOutletNodeId[1][0]
laNodeId[1][2] = lvOutletNodeId[1][(- 1)]
for i in range(2):
aNodeId = (laNodeId if (i == 0) else raNodeId)
for n1 in range(elementsCountAroundAtria):
nid2 = aNodeId[1][n1]
node2 = nodes.findNodeByIdentifier(nid2)
cache.setNode(node2)
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
nid1 = aNodeId[0][n1]
node1 = nodes.findNodeByIdentifier(nid1)
cache.setNode(node1)
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
dx_ds3 = [(x2[0] - x1[0]), (x2[1] - x1[1]), (x2[2] - x1[2])]
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
if ((i == 1) and ((n1 == 0) or (nid2 == cruxCentreNodeId))):
continue
if (nid2 in [cruxLeftNodeId, cruxRightNodeId]):
dx_ds3 = [(- d) for d in dx_ds3]
cache.setNode(node2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][1]))
(result, x1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(raNodeId[0][(- 1)]))
(result, x2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
cache.setNode(nodes.findNodeByIdentifier(cruxCentreNodeId))
(result, xc) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
d1 = [(x1[c] - xc[c]) for c in range(3)]
d2 = [(x2[c] - xc[c]) for c in range(3)]
dx_ds3 = [(d1[0] + d2[0]), (d1[1] + d2[1]), (d1[2] + d2[2])]
scale = (vector.magnitude(d1) / vector.magnitude(dx_ds3))
dx_ds3 = [(d * scale) for d in dx_ds3]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
cache.setNode(nodes.findNodeByIdentifier(laNodeId[0][2]))
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
dx_ds2 = vector.crossproduct3(dx_ds3, dx_ds1)
scale2 = ((0.5 * vector.magnitude(dx_ds3)) / vector.magnitude(dx_ds2))
dx_ds2 = [(scale2 * d) for d in dx_ds2]
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
nida = (nsdl + nowl)
nidb = lvOutletNodeId[1][(- 2)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.5, baseThickness, node1, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0, node2, Node.VALUE_LABEL_D_DS3, (- 1.0), Node.VALUE_LABEL_D_DS1, 1.0)
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
lv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
nida = ((nidr + nowr) + 4)
nidb = lvOutletNodeId[1][2]
node = nodes.findNodeByIdentifier(nida)
cache.setNode(node)
(result, xa) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2a) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
node = nodes.findNodeByIdentifier(nidb)
cache.setNode(node)
(result, xb) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, d1b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, d2b) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
d2b = [((- 2.0) * d) for d in d2b]
scale = ((4.0 * (baseHeight + baseThickness)) / vector.magnitude(d2a))
d2a = [(scale * d) for d in d2a]
xi = 0.5
xr = (1.0 - xi)
x = interp.interpolateCubicHermite(xa, d2a, xb, d2b, xi)
dx_ds1 = [((xr * d1a[c]) + (xi * d1b[c])) for c in range(3)]
dx_ds2 = interp.interpolateCubicHermiteDerivative(xa, d2a, xb, d2b, xi)
dx_ds2 = [(xr * d) for d in dx_ds2]
radialVector = vector.normalise(vector.crossproduct3(dx_ds1, dx_ds2))
dx_ds3 = [(baseThickness * d) for d in radialVector]
x_inner = [(x[c] - dx_ds3[c]) for c in range(3)]
curvatureScale = (1.0 - (baseThickness * interp.getCubicHermiteCurvature(xa, d2a, x, dx_ds2, radialVector, 1.0)))
dx_ds2_inner = [(curvatureScale * d) for d in dx_ds2]
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2_inner)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid1 = nodeIdentifier
nodeIdentifier += 1
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
rv_crest_nid2 = nodeIdentifier
nodeIdentifier += 1
nida = laNodeId[0][2]
nidb = lvOutletNodeId[0][(- 1)]
node1 = nodes.findNodeByIdentifier(nida)
node2 = nodes.findNodeByIdentifier(nidb)
(x, dx_ds2, dx_ds1, dx_ds3) = interpolateNodesCubicHermite(cache, coordinates, 0.4, lvOutletWallThickness, node1, Node.VALUE_LABEL_D_DS2, (- 1.0), Node.VALUE_LABEL_D_DS1, (- 1.0), node2, Node.VALUE_LABEL_D_DS2, 1.0, Node.VALUE_LABEL_D_DS1, 1.0)
dx_ds1 = [(2.0 * d) for d in dx_ds1]
node = nodes.createNode(nodeIdentifier, nodetemplateLinearS3)
cache.setNode(node)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
lv_bridge_nid1 = nodeIdentifier
nodeIdentifier += 1
tlaNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
traNodeId = [([(- 1)] * elementsCountAroundAtria), ([(- 1)] * elementsCountAroundAtria)]
for n3 in range(2):
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
else:
baNodeId = raNodeId
taNodeId = traNodeId
for n1 in range(elementsCountAroundAtria):
if ((n3 == 1) and (((i == 0) and ((n1 < (lan1CruxLimit - 1)) or (n1 > (lan1SeptumLimit + 2)))) or ((i == 1) and ((n1 < ran1SeptumLimit) or (n1 > ran1CruxLimit))))):
continue
node = nodes.findNodeByIdentifier(baNodeId[n3][n1])
cache.setNode(node)
(result, x) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, 3)
(result, dx_ds1) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, 3)
(result, dx_ds2) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, 3)
(result, dx_ds3) = coordinates.getNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, 3)
x[2] += fibrousRingThickness
if ((n3 == 1) and (((i == 0) and ((n1 == 1) or (n1 == 2))) or ((i == 1) and (n1 == (elementsCountAroundAtria - 2))))):
dx_ds1 = [(- d) for d in dx_ds1]
dx_ds3 = [(- d) for d in dx_ds3]
taNodeId[n3][n1] = nodeIdentifier
node = nodes.createNode(nodeIdentifier, nodetemplate)
cache.setNode(node)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_VALUE, 1, x)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS1, 1, dx_ds1)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS2, 1, dx_ds2)
result = coordinates.setNodeParameters(cache, (- 1), Node.VALUE_LABEL_D_DS3, 1, dx_ds3)
nodeIdentifier += 1
tlaNodeId[1][0] = traNodeId[0][0]
traNodeId[1][(- 1)] = tlaNodeId[1][1]
traNodeId[1][0] = tlaNodeId[0][0]
mesh = fm.findMeshByDimension(3)
lvMeshGroup = lvGroup.getMeshGroup(mesh)
rvMeshGroup = rvGroup.getMeshGroup(mesh)
vSeptumMeshGroup = vSeptumGroup.getMeshGroup(mesh)
conusArteriosusMeshGroup = conusArteriosusGroup.getMeshGroup(mesh)
lFibrousRingMeshGroup = lFibrousRingGroup.getMeshGroup(mesh)
rFibrousRingMeshGroup = rFibrousRingGroup.getMeshGroup(mesh)
tricubichermite = eftfactory_tricubichermite(mesh, useCrossDerivatives)
eft = tricubichermite.createEftNoCrossDerivatives()
elementIdentifier = (getMaximumElementIdentifier(mesh) + 1)
elementtemplate1 = mesh.createElementtemplate()
elementtemplate1.setElementShapeType(Element.SHAPE_TYPE_CUBE)
for e in range(19):
eft1 = eft
nids = None
meshGroups = [lvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidl + 1), laNodeId[0][(- 1)], laNodeId[0][0], (nidr + 0), ((nidl + nowl) + 1), raNodeId[0][1], laNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
scaleEftNodeValueLabels(eft1, [8], [Node.VALUE_LABEL_D_DS3], [1])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 1):
nids = [(nidl + 1), (nidl + 2), laNodeId[0][0], laNodeId[0][1], ((nidl + nowl) + 1), ((nidl + nowl) + 2), laNodeId[1][0], raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 2):
nids = [(nidl + 2), lvOutletNodeId[0][0], laNodeId[0][1], lvOutletNodeId[1][0], ((nidl + nowl) + 2), raNodeId[0][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
tricubichermite.setEftLinearDerivative(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, 2, 4, 1)
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 5, 4, 6, 4]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 3):
nids = [lvOutletNodeId[1][0], lvOutletNodeId[1][1], ((nidl + nowl) + 2), ((nidl + nowl) + 3), raNodeId[0][(- 1)], raNodeId[0][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
scaleEftNodeValueLabels(eft1, [7, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
ln_map = [1, 2, 1, 2, 3, 4, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e <= 6):
n = (e - 4)
nids = [((nidl + n) + 2), ((nidl + n) + 3), lvOutletNodeId[0][n], lvOutletNodeId[0][(n + 1)], (((nidl + nowl) + n) + 2), (((nidl + nowl) + n) + 3), lvOutletNodeId[1][n], lvOutletNodeId[1][(n + 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
if (e == 4):
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 7):
nids = [(nidl + 5), (nidl + 6), lvOutletNodeId[0][3], lvOutletNodeId[0][4], ((nidl + nowl) + 5), ((nidl + nowl) + 6), lvOutletNodeId[1][3], lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 8):
nids = [(nidl + 6), (nidl + 7), lvOutletNodeId[0][4], lv_crest_nid1, ((nidl + nowl) + 6), ((nidr + norr) - 1), lvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 4]
remapEftLocalNodes(eft1, 7, ln_map)
meshGroups += [rvMeshGroup, vSeptumMeshGroup]
elif (e == 9):
nids = [((nidr + norr) - 1), nsdl, (((nidr + nowr) + norr) - 1), (nsdl + nowl), lv_crest_nid1]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [1, 2, 3, 4], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 1, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [rvMeshGroup]
elif (e == 10):
nids = [nsdl, (nsdl + 1), laNodeId[0][3], (nsdl + nowl), ((nsdl + nowl) + 1), lv_crest_nid1, laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 6, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 11):
nids = [(nedl - 4), (nedl - 3), laNodeId[0][(- 5)], laNodeId[0][(- 4)], ((nedl + nowl) - 4), ((nedl + nowl) - 3), laNodeId[1][(- 5)], laNodeId[1][(- 4)]]
elif (e == 12):
nids = [(nedl - 3), (nedl - 2), laNodeId[0][(- 4)], laNodeId[0][(- 3)], ((nedl + nowl) - 3), ((nedl + nowl) - 2), laNodeId[1][(- 4)], laNodeId[1][(- 3)]]
elif (e == 13):
nids = [(nedl - 2), (nedl - 1), laNodeId[0][(- 3)], laNodeId[0][(- 2)], ((nedl + nowl) - 2), ((nedl + nowl) - 1), laNodeId[1][(- 3)], laNodeId[1][(- 2)]]
elif (e == 14):
nids = [(nedl - 1), (nidl + 0), laNodeId[0][(- 2)], laNodeId[0][(- 1)], ((nedl + nowl) - 1), ((nidl + nowl) + 0), laNodeId[1][(- 2)], laNodeId[1][(- 1)]]
elif (e == 15):
nids = [(nidl + 6), (nidl + 7), lv_bridge_nid1, laNodeId[0][3], lvOutletNodeId[0][4], lv_crest_nid1, lvOutletNodeId[0][5], laNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
elif (e == 16):
nids = [lv_crest_nid1, laNodeId[1][3], lvOutletNodeId[0][4], lvOutletNodeId[0][5], lvOutletNodeId[1][4], lvOutletNodeId[1][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS3, [])
remapEftNodeValueLabel(eft1, [2, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
ln_map = [1, 2, 3, 4, 1, 2, 5, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 17):
nids = [laNodeId[0][3], laNodeId[0][2], lv_bridge_nid1, laNodeId[1][3], lvOutletNodeId[1][5], lvOutletNodeId[0][5]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [6, 8], Node.VALUE_LABEL_D_DS2, 6, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 1, 3, 4, 5, 4, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e == 18):
nids = [lv_bridge_nid1, (nidl + elementsCountAtrialSeptum), lvOutletNodeId[0][5], lvOutletNodeId[0][0], laNodeId[0][2], laNodeId[0][1], lvOutletNodeId[1][5], lvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
for e in range(15):
eft1 = eft
nids = None
meshGroups = [rvMeshGroup]
if (e == 0):
nids = [(nidl + 0), (nidr + 0), laNodeId[0][(- 1)], raNodeId[0][1], ((nidl + nowl) + 0), ((nidr + nowr) + 0), laNodeId[1][(- 1)], raNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [lvMeshGroup]
elif (e == 1):
nids = [(nidr + 0), (nidr + 1), raNodeId[0][1], raNodeId[0][2], ((nidr + nowr) + 0), ((nidr + nowr) + 1), raNodeId[1][1], raNodeId[1][2]]
elif (e == 2):
nids = [(nidr + 1), (nidr + 2), raNodeId[0][2], raNodeId[0][3], ((nidr + nowr) + 1), ((nidr + nowr) + 2), raNodeId[1][2], raNodeId[1][3]]
elif (e == 3):
nids = [(nidr + 2), (nidr + 3), raNodeId[0][3], raNodeId[0][4], ((nidr + nowr) + 2), ((nidr + nowr) + 3), raNodeId[1][3], raNodeId[1][4]]
elif (e == 4):
nids = [(nidr + 3), (nidr + 4), raNodeId[0][4], rv_crest_nid1, ((nidr + nowr) + 3), ((nidr + nowr) + 4), raNodeId[1][4], rv_crest_nid2]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [3, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
elif (e == 5):
nids = [(nidr + 4), (nidr + 5), rv_crest_nid1, rvOutletNodeId[0][2], ((nidr + nowr) + 4), ((nidr + nowr) + 5), rv_crest_nid2, rvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [4, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 6):
nids = [(nidr + 5), (nidr + 6), rvOutletNodeId[0][2], rvOutletNodeId[0][3], ((nidr + nowr) + 5), ((nidr + nowr) + 6), rvOutletNodeId[1][2], rvOutletNodeId[1][3]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 7):
nids = [(nidr + 6), (nidr + 7), rvOutletNodeId[0][3], rvOutletNodeId[0][4], ((nidr + nowr) + 6), ((nidr + nowr) + 7), rvOutletNodeId[1][3], rvOutletNodeId[1][4]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
meshGroups += [conusArteriosusMeshGroup]
elif (e == 8):
nids = [raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[0][(- 2)], ((nidl + nowl) + 3), raNodeId[1][(- 3)], lvOutletNodeId[1][2], lvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 2, 3, 4, 5, 6, 7, 7]
remapEftLocalNodes(eft1, 7, ln_map)
elif (e == 9):
nids = [raNodeId[0][(- 4)], rv_crest_nid1, raNodeId[0][(- 3)], ((nidl + nowl) + 4), raNodeId[1][(- 4)], rv_crest_nid2, raNodeId[1][(- 3)], lvOutletNodeId[1][2]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [1, 3, 5, 7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
elif (e == 10):
nids = [rv_crest_nid1, rvOutletNodeId[0][2], ((nidl + nowl) + 4), rvOutletNodeId[0][1], rv_crest_nid2, rvOutletNodeId[1][2], lvOutletNodeId[1][2], rvOutletNodeId[1][1]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [2, 3, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4, 6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS2, [1])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 11):
nids = [((nidl + nowl) + 4), rvOutletNodeId[0][1], ((nidl + nowl) + 5), rvOutletNodeId[0][0], lvOutletNodeId[1][2], rvOutletNodeId[1][1], lvOutletNodeId[1][3], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [2, 6], Node.VALUE_LABEL_D_DS3, 2, 6, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [3], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 12):
nids = [((nidl + nowl) + 6), ((nidl + nowl) + 5), rvOutletNodeId[0][(- 1)], rvOutletNodeId[0][0], lvOutletNodeId[1][4], lvOutletNodeId[1][3], rvOutletNodeId[1][(- 1)], rvOutletNodeId[1][0]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 5, 6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [1, 2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D2_DS1DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D2_DS1DS2, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 13):
nids = [(nidr + elementsCountAroundVSeptum), ((nidl + nowl) + 6), rvOutletNodeId[0][(- 2)], rvOutletNodeId[0][(- 1)], lv_crest_nid1, lvOutletNodeId[1][4], rvOutletNodeId[1][(- 2)], rvOutletNodeId[1][(- 1)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS3, [1])])
meshGroups += [conusArteriosusMeshGroup]
elif (e == 14):
nids = [(nidr + elementsCountAroundVSeptum), rvOutletNodeId[0][(- 2)], ((nidr + nowr) + elementsCountAroundVSeptum), lv_crest_nid1, rvOutletNodeId[1][(- 2)]]
eft1 = tricubichermite.createEftNoCrossDerivatives()
setEftScaleFactorIds(eft1, [1], [])
tricubichermite.setEftLinearDerivative(eft1, [3, 7], Node.VALUE_LABEL_D_DS3, 3, 7, 1)
tricubichermite.setEftLinearDerivative(eft1, [4, 8], Node.VALUE_LABEL_D_DS3, 4, 8, 1)
remapEftNodeValueLabel(eft1, [1, 2, 3, 4, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [2], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS2, [])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS2, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS2, [])])
ln_map = [1, 1, 2, 2, 3, 4, 5, 5]
remapEftLocalNodes(eft1, 5, ln_map)
meshGroups += [conusArteriosusMeshGroup]
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
bicubichermitelinear = eftfactory_bicubichermitelinear(mesh, useCrossDerivatives, linearAxis=2, d_ds1=Node.VALUE_LABEL_D_DS1, d_ds2=Node.VALUE_LABEL_D_DS3)
eftFibrousRing = bicubichermitelinear.createEftBasic()
for i in range(2):
if (i == 0):
baNodeId = laNodeId
taNodeId = tlaNodeId
meshGroupsSide = [lFibrousRingMeshGroup]
else:
baNodeId = raNodeId
taNodeId = traNodeId
meshGroupsSide = [rFibrousRingMeshGroup]
for e1 in range((elementsCountAroundAtria + 2)):
if ((i == 1) and (e1 < 4)):
continue
if (e1 < 4):
meshGroups = [lFibrousRingMeshGroup, rFibrousRingMeshGroup]
else:
meshGroups = meshGroupsSide
eft1 = eftFibrousRing
if ((e1 == 0) or (e1 == 3)):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
if (e1 == 0):
nids = [laNodeId[0][(- 1)], raNodeId[0][1], tlaNodeId[0][(- 1)], traNodeId[0][1], laNodeId[1][(- 1)], raNodeId[1][1], tlaNodeId[1][(- 1)], traNodeId[1][1]]
else:
nids = [raNodeId[0][(- 1)], laNodeId[0][1], traNodeId[0][(- 1)], tlaNodeId[0][1], laNodeId[1][1], tlaNodeId[1][1]]
remapEftNodeValueLabel(eft1, [5, 6, 7, 8], Node.VALUE_LABEL_D_DS1, [])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
ln_map = [1, 2, 3, 4, 5, 5, 6, 6]
remapEftLocalNodes(eft1, 6, ln_map)
elif (e1 == 1):
nids = [laNodeId[0][(- 1)], laNodeId[0][0], tlaNodeId[0][(- 1)], tlaNodeId[0][0], raNodeId[0][1], raNodeId[0][0], traNodeId[0][1], traNodeId[0][0]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [6, 8], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [1, 3], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5, 7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
elif (e1 == 2):
nids = [laNodeId[0][0], laNodeId[0][1], tlaNodeId[0][0], tlaNodeId[0][1], raNodeId[0][0], raNodeId[0][(- 1)], traNodeId[0][0], traNodeId[0][(- 1)]]
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
scaleEftNodeValueLabels(eft1, [5, 7], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [2, 4], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6, 8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
ea = (e1 - 3)
eb = ((ea + 1) - elementsCountAroundAtria)
nids = [baNodeId[0][ea], baNodeId[0][eb], taNodeId[0][ea], taNodeId[0][eb], baNodeId[1][ea], baNodeId[1][eb], taNodeId[1][ea], taNodeId[1][eb]]
if (((i == 0) and ((e1 == 4) or (e1 == 5))) or ((i == 1) and (e1 >= elementsCountAroundAtria))):
eft1 = bicubichermitelinear.createEftBasic()
setEftScaleFactorIds(eft1, [1], [])
if (e1 == 4):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [7], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == 5):
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [5], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [])])
elif (e1 == elementsCountAroundAtria):
scaleEftNodeValueLabels(eft1, [6], [Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
else:
scaleEftNodeValueLabels(eft1, [5], [Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS3], [1])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS1, [(Node.VALUE_LABEL_D_DS1, [1])])
remapEftNodeValueLabel(eft1, [6], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, [1]), (Node.VALUE_LABEL_D_DS3, [1])])
remapEftNodeValueLabel(eft1, [8], Node.VALUE_LABEL_D_DS3, [(Node.VALUE_LABEL_D_DS1, []), (Node.VALUE_LABEL_D_DS3, [])])
result = elementtemplate1.defineField(coordinates, (- 1), eft1)
element = mesh.createElement(elementIdentifier, elementtemplate1)
result2 = element.setNodesByIdentifier(eft1, nids)
if (eft1.getNumberOfLocalScaleFactors() == 1):
result3 = element.setScaleFactors(eft1, [(- 1.0)])
else:
result3 = 1
elementIdentifier += 1
for meshGroup in meshGroups:
meshGroup.addElement(element)
fm.endChange()
return annotationGroups<|docstring|>Generate the base tricubic Hermite mesh.
:param region: Zinc region to define model in. Must be empty.
:param options: Dict containing options. See getDefaultOptions().
:return: list of AnnotationGroup<|endoftext|> |
b18354ca7a193c698cedf67e0bdf305af0b03bb16de776a2a890be4c87acb391 | @classmethod
def refineMesh(cls, meshrefinement, options):
'\n Refine source mesh into separate region, with change of basis.\n :param meshrefinement: MeshRefinement, which knows source and target region.\n :param options: Dict containing options. See getDefaultOptions().\n '
assert isinstance(meshrefinement, MeshRefinement)
refineElementsCountSurface = options['Refine number of elements surface']
refineElementsCountThroughLVWall = options['Refine number of elements through LV wall']
refineElementsCountThroughRVWall = options['Refine number of elements through RV wall']
MeshType_3d_heartventricles2.refineMesh(meshrefinement, options)
element = meshrefinement._sourceElementiterator.next()
startBaseLvElementIdentifier = element.getIdentifier()
startBaseRvElementIdentifier = (startBaseLvElementIdentifier + 19)
limitBaseRvElementIdentifier = (startBaseRvElementIdentifier + 15)
limitFibrousRingElementIdentifier = (limitBaseRvElementIdentifier + 16)
while element.isValid():
numberInXi1 = refineElementsCountSurface
numberInXi2 = refineElementsCountSurface
elementId = element.getIdentifier()
if (elementId < startBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughLVWall
elif (elementId < limitBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughRVWall
else:
numberInXi2 = 1
numberInXi3 = refineElementsCountThroughRVWall
meshrefinement.refineElementCubeStandard3d(element, numberInXi1, numberInXi2, numberInXi3)
if (elementId == (limitFibrousRingElementIdentifier - 1)):
return
element = meshrefinement._sourceElementiterator.next() | Refine source mesh into separate region, with change of basis.
:param meshrefinement: MeshRefinement, which knows source and target region.
:param options: Dict containing options. See getDefaultOptions(). | src/scaffoldmaker/meshtypes/meshtype_3d_heartventriclesbase2.py | refineMesh | keeran97/scaffoldmaker | 1 | python | @classmethod
def refineMesh(cls, meshrefinement, options):
'\n Refine source mesh into separate region, with change of basis.\n :param meshrefinement: MeshRefinement, which knows source and target region.\n :param options: Dict containing options. See getDefaultOptions().\n '
assert isinstance(meshrefinement, MeshRefinement)
refineElementsCountSurface = options['Refine number of elements surface']
refineElementsCountThroughLVWall = options['Refine number of elements through LV wall']
refineElementsCountThroughRVWall = options['Refine number of elements through RV wall']
MeshType_3d_heartventricles2.refineMesh(meshrefinement, options)
element = meshrefinement._sourceElementiterator.next()
startBaseLvElementIdentifier = element.getIdentifier()
startBaseRvElementIdentifier = (startBaseLvElementIdentifier + 19)
limitBaseRvElementIdentifier = (startBaseRvElementIdentifier + 15)
limitFibrousRingElementIdentifier = (limitBaseRvElementIdentifier + 16)
while element.isValid():
numberInXi1 = refineElementsCountSurface
numberInXi2 = refineElementsCountSurface
elementId = element.getIdentifier()
if (elementId < startBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughLVWall
elif (elementId < limitBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughRVWall
else:
numberInXi2 = 1
numberInXi3 = refineElementsCountThroughRVWall
meshrefinement.refineElementCubeStandard3d(element, numberInXi1, numberInXi2, numberInXi3)
if (elementId == (limitFibrousRingElementIdentifier - 1)):
return
element = meshrefinement._sourceElementiterator.next() | @classmethod
def refineMesh(cls, meshrefinement, options):
'\n Refine source mesh into separate region, with change of basis.\n :param meshrefinement: MeshRefinement, which knows source and target region.\n :param options: Dict containing options. See getDefaultOptions().\n '
assert isinstance(meshrefinement, MeshRefinement)
refineElementsCountSurface = options['Refine number of elements surface']
refineElementsCountThroughLVWall = options['Refine number of elements through LV wall']
refineElementsCountThroughRVWall = options['Refine number of elements through RV wall']
MeshType_3d_heartventricles2.refineMesh(meshrefinement, options)
element = meshrefinement._sourceElementiterator.next()
startBaseLvElementIdentifier = element.getIdentifier()
startBaseRvElementIdentifier = (startBaseLvElementIdentifier + 19)
limitBaseRvElementIdentifier = (startBaseRvElementIdentifier + 15)
limitFibrousRingElementIdentifier = (limitBaseRvElementIdentifier + 16)
while element.isValid():
numberInXi1 = refineElementsCountSurface
numberInXi2 = refineElementsCountSurface
elementId = element.getIdentifier()
if (elementId < startBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughLVWall
elif (elementId < limitBaseRvElementIdentifier):
numberInXi3 = refineElementsCountThroughRVWall
else:
numberInXi2 = 1
numberInXi3 = refineElementsCountThroughRVWall
meshrefinement.refineElementCubeStandard3d(element, numberInXi1, numberInXi2, numberInXi3)
if (elementId == (limitFibrousRingElementIdentifier - 1)):
return
element = meshrefinement._sourceElementiterator.next()<|docstring|>Refine source mesh into separate region, with change of basis.
:param meshrefinement: MeshRefinement, which knows source and target region.
:param options: Dict containing options. See getDefaultOptions().<|endoftext|> |
86b5422cc0253487cd0c453c65dd3be1280de7b8971eb641c3a5d561933b4d75 | def verify(self, options, secret_data):
' Check connection\n '
azure_vm_connector = self.locator.get_connector('AzureVMConnector')
r = azure_vm_connector.verify(options, secret_data)
return r | Check connection | src/spaceone/inventory/manager/collector_manager.py | verify | jihyungSong/plugin-azure-vm-inven-collector | 0 | python | def verify(self, options, secret_data):
' \n '
azure_vm_connector = self.locator.get_connector('AzureVMConnector')
r = azure_vm_connector.verify(options, secret_data)
return r | def verify(self, options, secret_data):
' \n '
azure_vm_connector = self.locator.get_connector('AzureVMConnector')
r = azure_vm_connector.verify(options, secret_data)
return r<|docstring|>Check connection<|endoftext|> |
e65593327b704915b135357729d70e3b323d5bb29c2032b335e421575c524b77 | def list_resources(self, params):
' Get list of resources\n Args:\n params:\n - resource_group\n - vms\n\n Returns: list of resources\n '
start_time = time.time()
total_resources = []
try:
(resources, error_resources) = self.list_all_resources(params)
total_resources.extend(resources)
total_resources.extend(error_resources)
_LOGGER.debug(f"[{params['resource_group'].name}] Finished {(time.time() - start_time)} Seconds")
return total_resources
except Exception as e:
_LOGGER.debug(f"[list_resources]: {params['resource_group'].name}] : {e}")
if (type(e) is dict):
error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
else:
error_resource_response = ErrorResourceResponse({'message': str(e)})
total_resources.append(error_resource_response)
return total_resources | Get list of resources
Args:
params:
- resource_group
- vms
Returns: list of resources | src/spaceone/inventory/manager/collector_manager.py | list_resources | jihyungSong/plugin-azure-vm-inven-collector | 0 | python | def list_resources(self, params):
' Get list of resources\n Args:\n params:\n - resource_group\n - vms\n\n Returns: list of resources\n '
start_time = time.time()
total_resources = []
try:
(resources, error_resources) = self.list_all_resources(params)
total_resources.extend(resources)
total_resources.extend(error_resources)
_LOGGER.debug(f"[{params['resource_group'].name}] Finished {(time.time() - start_time)} Seconds")
return total_resources
except Exception as e:
_LOGGER.debug(f"[list_resources]: {params['resource_group'].name}] : {e}")
if (type(e) is dict):
error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
else:
error_resource_response = ErrorResourceResponse({'message': str(e)})
total_resources.append(error_resource_response)
return total_resources | def list_resources(self, params):
' Get list of resources\n Args:\n params:\n - resource_group\n - vms\n\n Returns: list of resources\n '
start_time = time.time()
total_resources = []
try:
(resources, error_resources) = self.list_all_resources(params)
total_resources.extend(resources)
total_resources.extend(error_resources)
_LOGGER.debug(f"[{params['resource_group'].name}] Finished {(time.time() - start_time)} Seconds")
return total_resources
except Exception as e:
_LOGGER.debug(f"[list_resources]: {params['resource_group'].name}] : {e}")
if (type(e) is dict):
error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
else:
error_resource_response = ErrorResourceResponse({'message': str(e)})
total_resources.append(error_resource_response)
return total_resources<|docstring|>Get list of resources
Args:
params:
- resource_group
- vms
Returns: list of resources<|endoftext|> |
49fb2008dc5853532fbb8bc503948702870c88be84c227212daf75f5af69472a | def denseUnet121(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if (d_block_type == 'dense'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
elif (d_block_type == 'densev02'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense_v02(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
else:
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if (version == 2):
d_block = get_decoder_block(d_block_type)
model = DenseUNet_v2(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | networks/dense_decoders.py | denseUnet121 | marcelampc/aerial_mtl | 58 | python | def denseUnet121(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if (d_block_type == 'dense'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
elif (d_block_type == 'densev02'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense_v02(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
else:
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if (version == 2):
d_block = get_decoder_block(d_block_type)
model = DenseUNet_v2(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | def denseUnet121(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
if (d_block_type == 'dense'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
elif (d_block_type == 'densev02'):
d_block = get_decoder_block('basic')
model = DenseUNet_Dense_v02(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
else:
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if (version == 2):
d_block = get_decoder_block(d_block_type)
model = DenseUNet_v2(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model<|docstring|>Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet<|endoftext|> |
41f0cf74cfab50699387589daeffec591a70be840a984e51e2cffcdce0bf4cda | def D3net_shared_weights(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = D3netSharedWeights(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | networks/dense_decoders.py | D3net_shared_weights | marcelampc/aerial_mtl | 58 | python | def D3net_shared_weights(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = D3netSharedWeights(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | def D3net_shared_weights(pretrained=False, d_block_type='basic', init_method='normal', version=1, **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = D3netSharedWeights(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet121(pretrained=True).state_dict()
model_shapes = [v.shape for (k, v) in model_dict.items()]
exclude_model_dict = []
exclude_model_dict = [k for (k, v) in pretrained_dict.items() if (v.shape not in model_shapes)]
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (k not in exclude_model_dict))}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model<|docstring|>Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet<|endoftext|> |
65e0acef04f3a880aceb3625c10776cb8ab50a36f836646a38108bb0e3b8c5a8 | def denseUnet169(pretrained=False, d_block_type='basic', init_method='normal', **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet169(pretrained=True).state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | networks/dense_decoders.py | denseUnet169 | marcelampc/aerial_mtl | 58 | python | def denseUnet169(pretrained=False, d_block_type='basic', init_method='normal', **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet169(pretrained=True).state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | def denseUnet169(pretrained=False, d_block_type='basic', init_method='normal', **kwargs):
'Densenet-121 model from\n `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
d_block = get_decoder_block(d_block_type)
model = DenseUNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), d_block=d_block, **kwargs)
if pretrained:
w_init.init_weights(model, init_method)
model_dict = model.state_dict()
pretrained_dict = models.densenet169(pretrained=True).state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
for key in list(pretrained_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
pretrained_dict[new_key] = pretrained_dict[key]
del pretrained_dict[key]
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model<|docstring|>Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet<|endoftext|> |
074071667e1821e5d077bd0db2f63b34402c0d65c8d9911baab8209179d51d30 | def add_data_available_callback(self, cb):
'Warning: callback is called back in another thread!'
if (not self._callbacks):
self._enable_listener()
ref = (max(self._callbacks) if self._callbacks else 0)
self._callbacks[ref] = cb
return ref | Warning: callback is called back in another thread! | dds/__init__.py | add_data_available_callback | urielka/pydds-rti-xml | 0 | python | def add_data_available_callback(self, cb):
if (not self._callbacks):
self._enable_listener()
ref = (max(self._callbacks) if self._callbacks else 0)
self._callbacks[ref] = cb
return ref | def add_data_available_callback(self, cb):
if (not self._callbacks):
self._enable_listener()
ref = (max(self._callbacks) if self._callbacks else 0)
self._callbacks[ref] = cb
return ref<|docstring|>Warning: callback is called back in another thread!<|endoftext|> |
39b9dac585b1e03bdde69662df250ff86d28a575c4bfbff15dbd5e9fa3bbbd98 | def _receive(self, instanceState: DDS_InstanceStateKindEnum, take=True):
"'takeFlag' controls whether read samples stay in the DDS cache (i.e. use DDS Read API) or removed (i.e. use DDS Take API) "
data_seq = DDSType.DynamicDataSeq()
DDSFunc.DynamicDataSeq_initialize(data_seq)
info_seq = DDSType.SampleInfoSeq()
DDSFunc.SampleInfoSeq_initialize(info_seq)
try:
if take:
self._dyn_narrowed_reader.take(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
else:
self._dyn_narrowed_reader.read(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
except Error as e:
if (str(e) == 'no data'):
return []
else:
raise e
data_seq_length = data_seq.get_length()
samplesList = []
try:
for i in range(data_seq_length):
sampleInfo = unpack_sampleInfo(info_seq.get_reference(i))
sampleData = unpack_dd(data_seq.get_reference(i))
sampleDict = {'sampleInfo': sampleInfo, 'sampleData': sampleData}
samplesList.append(sampleDict)
return samplesList
finally:
self._dyn_narrowed_reader.return_loan(ctypes.byref(data_seq), ctypes.byref(info_seq)) | 'takeFlag' controls whether read samples stay in the DDS cache (i.e. use DDS Read API) or removed (i.e. use DDS Take API) | dds/__init__.py | _receive | urielka/pydds-rti-xml | 0 | python | def _receive(self, instanceState: DDS_InstanceStateKindEnum, take=True):
" "
data_seq = DDSType.DynamicDataSeq()
DDSFunc.DynamicDataSeq_initialize(data_seq)
info_seq = DDSType.SampleInfoSeq()
DDSFunc.SampleInfoSeq_initialize(info_seq)
try:
if take:
self._dyn_narrowed_reader.take(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
else:
self._dyn_narrowed_reader.read(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
except Error as e:
if (str(e) == 'no data'):
return []
else:
raise e
data_seq_length = data_seq.get_length()
samplesList = []
try:
for i in range(data_seq_length):
sampleInfo = unpack_sampleInfo(info_seq.get_reference(i))
sampleData = unpack_dd(data_seq.get_reference(i))
sampleDict = {'sampleInfo': sampleInfo, 'sampleData': sampleData}
samplesList.append(sampleDict)
return samplesList
finally:
self._dyn_narrowed_reader.return_loan(ctypes.byref(data_seq), ctypes.byref(info_seq)) | def _receive(self, instanceState: DDS_InstanceStateKindEnum, take=True):
" "
data_seq = DDSType.DynamicDataSeq()
DDSFunc.DynamicDataSeq_initialize(data_seq)
info_seq = DDSType.SampleInfoSeq()
DDSFunc.SampleInfoSeq_initialize(info_seq)
try:
if take:
self._dyn_narrowed_reader.take(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
else:
self._dyn_narrowed_reader.read(ctypes.byref(data_seq), ctypes.byref(info_seq), DDS_LENGTH_UNLIMITED, get('ANY_SAMPLE_STATE', DDS_SampleStateMask), get('ANY_VIEW_STATE', DDS_ViewStateMask), instanceState.value)
except Error as e:
if (str(e) == 'no data'):
return []
else:
raise e
data_seq_length = data_seq.get_length()
samplesList = []
try:
for i in range(data_seq_length):
sampleInfo = unpack_sampleInfo(info_seq.get_reference(i))
sampleData = unpack_dd(data_seq.get_reference(i))
sampleDict = {'sampleInfo': sampleInfo, 'sampleData': sampleData}
samplesList.append(sampleDict)
return samplesList
finally:
self._dyn_narrowed_reader.return_loan(ctypes.byref(data_seq), ctypes.byref(info_seq))<|docstring|>'takeFlag' controls whether read samples stay in the DDS cache (i.e. use DDS Read API) or removed (i.e. use DDS Take API)<|endoftext|> |
324eba17e02956308ee64949ee798af552cb0c4da3989b553e0e3ed46626e4ba | def lookup_datawriter_by_name(self, datawriter_full_name):
'Retrieves the DDS DataWriter according to its full name (e.g. MyPublisher::HelloWorldWriter'
res = Writer(self, cstring(datawriter_full_name))
return res | Retrieves the DDS DataWriter according to its full name (e.g. MyPublisher::HelloWorldWriter | dds/__init__.py | lookup_datawriter_by_name | urielka/pydds-rti-xml | 0 | python | def lookup_datawriter_by_name(self, datawriter_full_name):
res = Writer(self, cstring(datawriter_full_name))
return res | def lookup_datawriter_by_name(self, datawriter_full_name):
res = Writer(self, cstring(datawriter_full_name))
return res<|docstring|>Retrieves the DDS DataWriter according to its full name (e.g. MyPublisher::HelloWorldWriter<|endoftext|> |
ece60077c7e155911cdc94b7028434896d585d9f8eee1a19c89650d51aedb722 | def lookup_datareader_by_name(self, datareader_full_name):
'Retrieves the DDS DataReader according to its full name (e.g. MySubscriber::HelloWorldReader'
res = Reader(self, cstring(datareader_full_name))
return res | Retrieves the DDS DataReader according to its full name (e.g. MySubscriber::HelloWorldReader | dds/__init__.py | lookup_datareader_by_name | urielka/pydds-rti-xml | 0 | python | def lookup_datareader_by_name(self, datareader_full_name):
res = Reader(self, cstring(datareader_full_name))
return res | def lookup_datareader_by_name(self, datareader_full_name):
res = Reader(self, cstring(datareader_full_name))
return res<|docstring|>Retrieves the DDS DataReader according to its full name (e.g. MySubscriber::HelloWorldReader<|endoftext|> |
67486c9401fb07b33683a45897ede99d4ffec605c5068507e6680fd9786b32bb | def test_r():
'\n This is basically just the using_R notebook.\n '
r = R(r_file)
r.display_source_ipython()
model = r.model('myModel')
distance = r.distance('myDistance')
sum_stat = r.summary_statistics('mySummaryStatistics')
prior = pyabc.Distribution(meanX=pyabc.RV('uniform', 0, 10), meanY=pyabc.RV('uniform', 0, 10))
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
abc = pyabc.ABCSMC(model, prior, distance, summary_statistics=sum_stat, sampler=sampler)
db = ('sqlite:///' + os.path.join(gettempdir(), 'test_external.db'))
abc.new(db, r.observation('mySumStatData'))
history = abc.run(minimum_epsilon=0.9, max_nr_populations=2)
history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]['cars'].head() | This is basically just the using_R notebook. | test/test_external.py | test_r | Pat-Laub/pyABC | 0 | python | def test_r():
'\n \n '
r = R(r_file)
r.display_source_ipython()
model = r.model('myModel')
distance = r.distance('myDistance')
sum_stat = r.summary_statistics('mySummaryStatistics')
prior = pyabc.Distribution(meanX=pyabc.RV('uniform', 0, 10), meanY=pyabc.RV('uniform', 0, 10))
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
abc = pyabc.ABCSMC(model, prior, distance, summary_statistics=sum_stat, sampler=sampler)
db = ('sqlite:///' + os.path.join(gettempdir(), 'test_external.db'))
abc.new(db, r.observation('mySumStatData'))
history = abc.run(minimum_epsilon=0.9, max_nr_populations=2)
history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]['cars'].head() | def test_r():
'\n \n '
r = R(r_file)
r.display_source_ipython()
model = r.model('myModel')
distance = r.distance('myDistance')
sum_stat = r.summary_statistics('mySummaryStatistics')
prior = pyabc.Distribution(meanX=pyabc.RV('uniform', 0, 10), meanY=pyabc.RV('uniform', 0, 10))
sampler = pyabc.sampler.MulticoreEvalParallelSampler(n_procs=2)
abc = pyabc.ABCSMC(model, prior, distance, summary_statistics=sum_stat, sampler=sampler)
db = ('sqlite:///' + os.path.join(gettempdir(), 'test_external.db'))
abc.new(db, r.observation('mySumStatData'))
history = abc.run(minimum_epsilon=0.9, max_nr_populations=2)
history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]['cars'].head()<|docstring|>This is basically just the using_R notebook.<|endoftext|> |
129f4eb25cf042b43cd9894f947a3511f32ce4f9159cf1a7c7090492f8cbaf70 | def __init__(self, filename, *args, **kwargs):
'\n Initialize an instance of GatkReport\n :param filename: path to a GATKReport file\n :param args: args\n :param kwargs: kwargs\n '
self.tables = dict()
self.update(dict(*args, **kwargs))
self.filename = filename
self.name = os.path.basename(self.filename)
with open(self.filename, 'r') as fh:
self.lines = [line.rstrip() for line in fh]
report_id = self._get_report_id(self.lines[0])
if (report_id is not None):
self.version = report_id.version
if (self.version[0] >= 1):
self._n_tables = report_id.n_tables
self._read_gatkreportv1(self.lines)
if (self.version[0] == 0):
self._read_gatkreportv0(self.lines)
else:
raise GsalibException("This isn't a GATK Report file or it's an unsupported version.") | Initialize an instance of GatkReport
:param filename: path to a GATKReport file
:param args: args
:param kwargs: kwargs | gsalib.py | __init__ | myourshaw/gsalib | 0 | python | def __init__(self, filename, *args, **kwargs):
'\n Initialize an instance of GatkReport\n :param filename: path to a GATKReport file\n :param args: args\n :param kwargs: kwargs\n '
self.tables = dict()
self.update(dict(*args, **kwargs))
self.filename = filename
self.name = os.path.basename(self.filename)
with open(self.filename, 'r') as fh:
self.lines = [line.rstrip() for line in fh]
report_id = self._get_report_id(self.lines[0])
if (report_id is not None):
self.version = report_id.version
if (self.version[0] >= 1):
self._n_tables = report_id.n_tables
self._read_gatkreportv1(self.lines)
if (self.version[0] == 0):
self._read_gatkreportv0(self.lines)
else:
raise GsalibException("This isn't a GATK Report file or it's an unsupported version.") | def __init__(self, filename, *args, **kwargs):
'\n Initialize an instance of GatkReport\n :param filename: path to a GATKReport file\n :param args: args\n :param kwargs: kwargs\n '
self.tables = dict()
self.update(dict(*args, **kwargs))
self.filename = filename
self.name = os.path.basename(self.filename)
with open(self.filename, 'r') as fh:
self.lines = [line.rstrip() for line in fh]
report_id = self._get_report_id(self.lines[0])
if (report_id is not None):
self.version = report_id.version
if (self.version[0] >= 1):
self._n_tables = report_id.n_tables
self._read_gatkreportv1(self.lines)
if (self.version[0] == 0):
self._read_gatkreportv0(self.lines)
else:
raise GsalibException("This isn't a GATK Report file or it's an unsupported version.")<|docstring|>Initialize an instance of GatkReport
:param filename: path to a GATKReport file
:param args: args
:param kwargs: kwargs<|endoftext|> |
afa9324aa0518a96b1c84b7f0d9d384ae47093765d32878ce6cf93858d35354e | def __setitem__(self, dataframe):
'\n Add a Dataframe to the report.\n :param dataframe: the dataframe to be added\n :return: None\n '
try:
table_name = [dataframe.name]
except AttributeError:
table_name = dataframe.name = 'table'
self._used_names[table_name] += 1
if (self._used_names[table_name] > 1):
dataframe.name = table_name = '{}.{}'.format(table_name, str(self._used_names[table_name]))
self.tables[table_name] = dataframe | Add a Dataframe to the report.
:param dataframe: the dataframe to be added
:return: None | gsalib.py | __setitem__ | myourshaw/gsalib | 0 | python | def __setitem__(self, dataframe):
'\n Add a Dataframe to the report.\n :param dataframe: the dataframe to be added\n :return: None\n '
try:
table_name = [dataframe.name]
except AttributeError:
table_name = dataframe.name = 'table'
self._used_names[table_name] += 1
if (self._used_names[table_name] > 1):
dataframe.name = table_name = '{}.{}'.format(table_name, str(self._used_names[table_name]))
self.tables[table_name] = dataframe | def __setitem__(self, dataframe):
'\n Add a Dataframe to the report.\n :param dataframe: the dataframe to be added\n :return: None\n '
try:
table_name = [dataframe.name]
except AttributeError:
table_name = dataframe.name = 'table'
self._used_names[table_name] += 1
if (self._used_names[table_name] > 1):
dataframe.name = table_name = '{}.{}'.format(table_name, str(self._used_names[table_name]))
self.tables[table_name] = dataframe<|docstring|>Add a Dataframe to the report.
:param dataframe: the dataframe to be added
:return: None<|endoftext|> |
04e88ce073793e0c7490ff2ec358220ea0abda9edc1308c4238336fac58d035c | def _get_report_id(self, report_line):
'\n Get the version of the GATK report. Fo versions >=1 also get the number of tables.\n :param report_line: report definition line\n :return: ReportId (version, n_tables,)\n '
m = self._report_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), int(m.group('n_tables')))
else:
m = self._report_v0_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), None)
else:
return None | Get the version of the GATK report. Fo versions >=1 also get the number of tables.
:param report_line: report definition line
:return: ReportId (version, n_tables,) | gsalib.py | _get_report_id | myourshaw/gsalib | 0 | python | def _get_report_id(self, report_line):
'\n Get the version of the GATK report. Fo versions >=1 also get the number of tables.\n :param report_line: report definition line\n :return: ReportId (version, n_tables,)\n '
m = self._report_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), int(m.group('n_tables')))
else:
m = self._report_v0_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), None)
else:
return None | def _get_report_id(self, report_line):
'\n Get the version of the GATK report. Fo versions >=1 also get the number of tables.\n :param report_line: report definition line\n :return: ReportId (version, n_tables,)\n '
m = self._report_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), int(m.group('n_tables')))
else:
m = self._report_v0_rx.match(report_line)
if (m is not None):
return self._ReportId(tuple(map(int, m.group('version').split('.'))), None)
else:
return None<|docstring|>Get the version of the GATK report. Fo versions >=1 also get the number of tables.
:param report_line: report definition line
:return: ReportId (version, n_tables,)<|endoftext|> |
f2c19e5e241a0f420ed3b70a9cb8a1a952408c081f3f1dfb4a215e728a096ccd | def _get_table_format(self, table_format_line):
'\n Get the format of a v1.x GATK table.\n :param table_format_line: table format definition line\n :return: TableFormat (n_cols, n_rows, col_formats,)\n '
m = self._table_format_rx.match(table_format_line)
if (m is not None):
return self._TableFormat((int(m.group('n_cols')) - 1), int(m.group('n_rows')), m.group('col_formats').split(':'))
else:
return None | Get the format of a v1.x GATK table.
:param table_format_line: table format definition line
:return: TableFormat (n_cols, n_rows, col_formats,) | gsalib.py | _get_table_format | myourshaw/gsalib | 0 | python | def _get_table_format(self, table_format_line):
'\n Get the format of a v1.x GATK table.\n :param table_format_line: table format definition line\n :return: TableFormat (n_cols, n_rows, col_formats,)\n '
m = self._table_format_rx.match(table_format_line)
if (m is not None):
return self._TableFormat((int(m.group('n_cols')) - 1), int(m.group('n_rows')), m.group('col_formats').split(':'))
else:
return None | def _get_table_format(self, table_format_line):
'\n Get the format of a v1.x GATK table.\n :param table_format_line: table format definition line\n :return: TableFormat (n_cols, n_rows, col_formats,)\n '
m = self._table_format_rx.match(table_format_line)
if (m is not None):
return self._TableFormat((int(m.group('n_cols')) - 1), int(m.group('n_rows')), m.group('col_formats').split(':'))
else:
return None<|docstring|>Get the format of a v1.x GATK table.
:param table_format_line: table format definition line
:return: TableFormat (n_cols, n_rows, col_formats,)<|endoftext|> |
59ed305241f9ba0e1806496553d8cb44506ddffae168ab33a2b7d5154217bb46 | def _get_table_v0_id(self, table_id_line):
'\n Get the name a v0.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._report_v0_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None | Get the name a v0.x GATK table.
:param table_id_line: table id definition line
:return: TableId (table_name, table_description,) | gsalib.py | _get_table_v0_id | myourshaw/gsalib | 0 | python | def _get_table_v0_id(self, table_id_line):
'\n Get the name a v0.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._report_v0_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None | def _get_table_v0_id(self, table_id_line):
'\n Get the name a v0.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._report_v0_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None<|docstring|>Get the name a v0.x GATK table.
:param table_id_line: table id definition line
:return: TableId (table_name, table_description,)<|endoftext|> |
1d01fdf9a69db210647e419c2a1ed1b33722d0205e60583270f31714da327a63 | def _get_table_id(self, table_id_line):
'\n Get the name and description of a v1.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._table_id_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None | Get the name and description of a v1.x GATK table.
:param table_id_line: table id definition line
:return: TableId (table_name, table_description,) | gsalib.py | _get_table_id | myourshaw/gsalib | 0 | python | def _get_table_id(self, table_id_line):
'\n Get the name and description of a v1.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._table_id_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None | def _get_table_id(self, table_id_line):
'\n Get the name and description of a v1.x GATK table.\n :param table_id_line: table id definition line\n :return: TableId (table_name, table_description,)\n '
m = self._table_id_rx.match(table_id_line)
if (m is not None):
return self._TableId(m.group('table_name'), m.group('table_description'))
else:
return None<|docstring|>Get the name and description of a v1.x GATK table.
:param table_id_line: table id definition line
:return: TableId (table_name, table_description,)<|endoftext|> |
6bc72d8f38e5df98f172c93bf68bf98c31cda7d13330071d3d37d4e927bbb0b3 | def _read_gatkreportv0(self, lines):
'\n Reads a v0.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_id = None
table_data = []
for line in lines:
if ((line.strip() == '') or (line.strip().startswith('#') and (not line.lower().startswith('##:gatkreport')))):
continue
elif line.lower().startswith('##:gatkreport'):
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_id = None
table_data = []
if (table_id is None):
table_id = self._get_table_v0_id(line)
n_tables += 1
continue
elif (table_id is not None):
table_data.append(line)
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df | Reads a v0.x GATK report into a GATKReport object
:param lines: list of lines from report file
:return: None | gsalib.py | _read_gatkreportv0 | myourshaw/gsalib | 0 | python | def _read_gatkreportv0(self, lines):
'\n Reads a v0.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_id = None
table_data = []
for line in lines:
if ((line.strip() == ) or (line.strip().startswith('#') and (not line.lower().startswith('##:gatkreport')))):
continue
elif line.lower().startswith('##:gatkreport'):
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_id = None
table_data = []
if (table_id is None):
table_id = self._get_table_v0_id(line)
n_tables += 1
continue
elif (table_id is not None):
table_data.append(line)
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df | def _read_gatkreportv0(self, lines):
'\n Reads a v0.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_id = None
table_data = []
for line in lines:
if ((line.strip() == ) or (line.strip().startswith('#') and (not line.lower().startswith('##:gatkreport')))):
continue
elif line.lower().startswith('##:gatkreport'):
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_id = None
table_data = []
if (table_id is None):
table_id = self._get_table_v0_id(line)
n_tables += 1
continue
elif (table_id is not None):
table_data.append(line)
if table_data:
table_str = StringIO('\n'.join(table_data))
if (self.version == (0, 1)):
df = pd.read_table(table_str, delim_whitespace=True)
else:
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df<|docstring|>Reads a v0.x GATK report into a GATKReport object
:param lines: list of lines from report file
:return: None<|endoftext|> |
5bf9162a1a3dac9ca061106d911c50663b38ca2659c6257152e301fb05d9d35c | def _read_gatkreportv1(self, lines):
'\n Reads a v1.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_format = None
table_id = None
table_data = []
for line in lines:
if ((line.strip() == '') or line.lower().startswith('#:gatkreport') or (line.strip().startswith('#') and (not line.lower().startswith('#:gatktable')))):
continue
elif line.lower().startswith('#:gatktable'):
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_format = None
table_id = None
table_data = []
if (table_format is None):
table_format = self._get_table_format(line)
else:
table_id = self._get_table_id(line)
n_tables += 1
continue
elif ((table_format is not None) and (table_id is not None)):
table_data.append(line)
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
if (n_tables != self._n_tables):
warn('Table {} should have {} tables, but actually has {}.'.format(table_id.table_name, self._n_tables, n_tables)) | Reads a v1.x GATK report into a GATKReport object
:param lines: list of lines from report file
:return: None | gsalib.py | _read_gatkreportv1 | myourshaw/gsalib | 0 | python | def _read_gatkreportv1(self, lines):
'\n Reads a v1.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_format = None
table_id = None
table_data = []
for line in lines:
if ((line.strip() == ) or line.lower().startswith('#:gatkreport') or (line.strip().startswith('#') and (not line.lower().startswith('#:gatktable')))):
continue
elif line.lower().startswith('#:gatktable'):
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_format = None
table_id = None
table_data = []
if (table_format is None):
table_format = self._get_table_format(line)
else:
table_id = self._get_table_id(line)
n_tables += 1
continue
elif ((table_format is not None) and (table_id is not None)):
table_data.append(line)
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
if (n_tables != self._n_tables):
warn('Table {} should have {} tables, but actually has {}.'.format(table_id.table_name, self._n_tables, n_tables)) | def _read_gatkreportv1(self, lines):
'\n Reads a v1.x GATK report into a GATKReport object\n :param lines: list of lines from report file\n :return: None\n '
n_tables = 0
table_format = None
table_id = None
table_data = []
for line in lines:
if ((line.strip() == ) or line.lower().startswith('#:gatkreport') or (line.strip().startswith('#') and (not line.lower().startswith('#:gatktable')))):
continue
elif line.lower().startswith('#:gatktable'):
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
table_format = None
table_id = None
table_data = []
if (table_format is None):
table_format = self._get_table_format(line)
else:
table_id = self._get_table_id(line)
n_tables += 1
continue
elif ((table_format is not None) and (table_id is not None)):
table_data.append(line)
if table_data:
n_rows = (len(table_data) - 1)
if (n_rows != table_format.n_rows):
warn('Table {} should have {} rows, but actually has {}.'.format(table_id.table_name, table_format.n_rows, n_rows))
table_str = StringIO('\n'.join(table_data))
df = pd.read_fwf(table_str)
df.name = table_id.table_name
self.tables[df.name] = df
if (n_tables != self._n_tables):
warn('Table {} should have {} tables, but actually has {}.'.format(table_id.table_name, self._n_tables, n_tables))<|docstring|>Reads a v1.x GATK report into a GATKReport object
:param lines: list of lines from report file
:return: None<|endoftext|> |
dccc74c2621802fc9c92630a774bf93cabddf05e3e44b5f5defdc51d56eb8493 | def on_save(self, *args):
'Events called when the "OK" dialog box button is clicked.'
self.dismiss() | Events called when the "OK" dialog box button is clicked. | modified_picker/picker.py | on_save | ShareASmile/car-locator | 21 | python | def on_save(self, *args):
self.dismiss() | def on_save(self, *args):
self.dismiss()<|docstring|>Events called when the "OK" dialog box button is clicked.<|endoftext|> |
0486ca9d7dc67b383755fd3d372c0f4d5555885e4bc76dce0619c1819e5e0847 | def on_cancel(self, *args):
'Events called when the "CANCEL" dialog box button is clicked.'
self.dismiss() | Events called when the "CANCEL" dialog box button is clicked. | modified_picker/picker.py | on_cancel | ShareASmile/car-locator | 21 | python | def on_cancel(self, *args):
self.dismiss() | def on_cancel(self, *args):
self.dismiss()<|docstring|>Events called when the "CANCEL" dialog box button is clicked.<|endoftext|> |
179895fe82b7e75916f309b1c8ba9f2807ead29c4796fb6061e2a6732140d7ac | def isnumeric(self, value):
'\n We are forced to create a custom method because if we set the ``int``\n value for the ``input_filter`` parameter of the text field, then the\n ``-`` character is still available for keyboard input. Apparently, this\n is a Kivy bug.\n '
try:
int(value)
return True
except ValueError:
return False | We are forced to create a custom method because if we set the ``int``
value for the ``input_filter`` parameter of the text field, then the
``-`` character is still available for keyboard input. Apparently, this
is a Kivy bug. | modified_picker/picker.py | isnumeric | ShareASmile/car-locator | 21 | python | def isnumeric(self, value):
'\n We are forced to create a custom method because if we set the ``int``\n value for the ``input_filter`` parameter of the text field, then the\n ``-`` character is still available for keyboard input. Apparently, this\n is a Kivy bug.\n '
try:
int(value)
return True
except ValueError:
return False | def isnumeric(self, value):
'\n We are forced to create a custom method because if we set the ``int``\n value for the ``input_filter`` parameter of the text field, then the\n ``-`` character is still available for keyboard input. Apparently, this\n is a Kivy bug.\n '
try:
int(value)
return True
except ValueError:
return False<|docstring|>We are forced to create a custom method because if we set the ``int``
value for the ``input_filter`` parameter of the text field, then the
``-`` character is still available for keyboard input. Apparently, this
is a Kivy bug.<|endoftext|> |
d68c8c0443cff085338e6bef03a3a6334e78d3768dad9e4091eec8b9489fe578 | def do_backspace(self, *args):
'Prevent deleting text from the middle of a line of a text field.'
self._backspace = True
self.text = self.text[:(- 1)]
self._date = self.text
self._backspace = False | Prevent deleting text from the middle of a line of a text field. | modified_picker/picker.py | do_backspace | ShareASmile/car-locator | 21 | python | def do_backspace(self, *args):
self._backspace = True
self.text = self.text[:(- 1)]
self._date = self.text
self._backspace = False | def do_backspace(self, *args):
self._backspace = True
self.text = self.text[:(- 1)]
self._date = self.text
self._backspace = False<|docstring|>Prevent deleting text from the middle of a line of a text field.<|endoftext|> |
7b0b4f2a344a8f455f9ed7722f80a7dc10da89ceb3b29a47f28157b496b25511 | def input_filter(self, value, boolean):
'Date validity check in dd/mm/yyyy format.'
cursor = self.cursor[0]
if (len(self.text) == 10):
return
if self.isnumeric(value):
self._date += value
value = int(value)
if (cursor == 0):
if (self.owner.sel_month == 2):
valid_value = 2
else:
valid_value = 3
if (value > valid_value):
self._date = self._date[:(- 1)]
return
if (cursor == 1):
days_of_month = []
for _date in self.owner.calendar.itermonthdates(self.owner.sel_year, self.owner.sel_month):
if (_date.month == self.owner.sel_month):
days_of_month.append(_date.day)
if (not (int(self._date[:2]) in days_of_month)):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 2):
if (int(value) > 1):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 4):
if (int(self._date[(- 2):]) not in list(range(1, 13))):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 6):
if (not int(value)):
self._date = self._date[:(- 1)]
return
return str(value) | Date validity check in dd/mm/yyyy format. | modified_picker/picker.py | input_filter | ShareASmile/car-locator | 21 | python | def input_filter(self, value, boolean):
cursor = self.cursor[0]
if (len(self.text) == 10):
return
if self.isnumeric(value):
self._date += value
value = int(value)
if (cursor == 0):
if (self.owner.sel_month == 2):
valid_value = 2
else:
valid_value = 3
if (value > valid_value):
self._date = self._date[:(- 1)]
return
if (cursor == 1):
days_of_month = []
for _date in self.owner.calendar.itermonthdates(self.owner.sel_year, self.owner.sel_month):
if (_date.month == self.owner.sel_month):
days_of_month.append(_date.day)
if (not (int(self._date[:2]) in days_of_month)):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 2):
if (int(value) > 1):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 4):
if (int(self._date[(- 2):]) not in list(range(1, 13))):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 6):
if (not int(value)):
self._date = self._date[:(- 1)]
return
return str(value) | def input_filter(self, value, boolean):
cursor = self.cursor[0]
if (len(self.text) == 10):
return
if self.isnumeric(value):
self._date += value
value = int(value)
if (cursor == 0):
if (self.owner.sel_month == 2):
valid_value = 2
else:
valid_value = 3
if (value > valid_value):
self._date = self._date[:(- 1)]
return
if (cursor == 1):
days_of_month = []
for _date in self.owner.calendar.itermonthdates(self.owner.sel_year, self.owner.sel_month):
if (_date.month == self.owner.sel_month):
days_of_month.append(_date.day)
if (not (int(self._date[:2]) in days_of_month)):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 2):
if (int(value) > 1):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 4):
if (int(self._date[(- 2):]) not in list(range(1, 13))):
self._date = self._date[:(- 1)]
return
elif (self.cursor[0] == 6):
if (not int(value)):
self._date = self._date[:(- 1)]
return
return str(value)<|docstring|>Date validity check in dd/mm/yyyy format.<|endoftext|> |
b5c15794eab34953fbdbf875d5b126dbe552ba069b4b029830ee3400ddfe718d | def _get_list_date(self):
'\n Returns a list as `[dd, mm, yyyy]` from a text fied for entering a date.\n '
return [d for d in self.text.split('/') if d] | Returns a list as `[dd, mm, yyyy]` from a text fied for entering a date. | modified_picker/picker.py | _get_list_date | ShareASmile/car-locator | 21 | python | def _get_list_date(self):
'\n \n '
return [d for d in self.text.split('/') if d] | def _get_list_date(self):
'\n \n '
return [d for d in self.text.split('/') if d]<|docstring|>Returns a list as `[dd, mm, yyyy]` from a text fied for entering a date.<|endoftext|> |
49474ea566c519db578326d23f60c6bba7ef3dc8c53c754d8d50dd441c4ef7c5 | def update_text_full_date(self, list_date):
'\n Updates the title of the week, month and number day name\n in an open date input dialog.\n '
if ((len(list_date) == 1) and (len(list_date[0]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, self.sel_month, list_date[0], self.theme_cls.device_orientation)
if ((len(list_date) == 2) and (len(list_date[1]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation)
if ((len(list_date) == 3) and (len(list_date[2]) == 4)):
self.ids.label_full_date.text = self.set_text_full_date(int(list_date[2]), int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation) | Updates the title of the week, month and number day name
in an open date input dialog. | modified_picker/picker.py | update_text_full_date | ShareASmile/car-locator | 21 | python | def update_text_full_date(self, list_date):
'\n Updates the title of the week, month and number day name\n in an open date input dialog.\n '
if ((len(list_date) == 1) and (len(list_date[0]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, self.sel_month, list_date[0], self.theme_cls.device_orientation)
if ((len(list_date) == 2) and (len(list_date[1]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation)
if ((len(list_date) == 3) and (len(list_date[2]) == 4)):
self.ids.label_full_date.text = self.set_text_full_date(int(list_date[2]), int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation) | def update_text_full_date(self, list_date):
'\n Updates the title of the week, month and number day name\n in an open date input dialog.\n '
if ((len(list_date) == 1) and (len(list_date[0]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, self.sel_month, list_date[0], self.theme_cls.device_orientation)
if ((len(list_date) == 2) and (len(list_date[1]) == 2)):
self.ids.label_full_date.text = self.set_text_full_date(self.sel_year, int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation)
if ((len(list_date) == 3) and (len(list_date[2]) == 4)):
self.ids.label_full_date.text = self.set_text_full_date(int(list_date[2]), int(list_date[1]), int(list_date[0]), self.theme_cls.device_orientation)<|docstring|>Updates the title of the week, month and number day name
in an open date input dialog.<|endoftext|> |
19ae8fe4567b7c6edba1bbf4b9a3e27cbb44dafd1b6b3de3fc949a381785fce8 | def get_field(self):
'Creates and returns a text field object used to enter dates.'
field = DatePickerEnterDataField(owner=self)
field.color_mode = 'custom'
field.line_color_focus = (self.theme_cls.primary_color if (not self.input_field_text_color) else self.input_field_text_color)
field.current_hint_text_color = field.line_color_focus
field._current_hint_text_color = field.line_color_focus
return field | Creates and returns a text field object used to enter dates. | modified_picker/picker.py | get_field | ShareASmile/car-locator | 21 | python | def get_field(self):
field = DatePickerEnterDataField(owner=self)
field.color_mode = 'custom'
field.line_color_focus = (self.theme_cls.primary_color if (not self.input_field_text_color) else self.input_field_text_color)
field.current_hint_text_color = field.line_color_focus
field._current_hint_text_color = field.line_color_focus
return field | def get_field(self):
field = DatePickerEnterDataField(owner=self)
field.color_mode = 'custom'
field.line_color_focus = (self.theme_cls.primary_color if (not self.input_field_text_color) else self.input_field_text_color)
field.current_hint_text_color = field.line_color_focus
field._current_hint_text_color = field.line_color_focus
return field<|docstring|>Creates and returns a text field object used to enter dates.<|endoftext|> |
a31f9327a225b2ba2e5a2be167c154b9e71471e2902a4bfe26eb9bf46724ddbd | def set_text_full_date(self, year, month, day, orientation):
'\n Returns a string of type "Tue, Feb 2" or "Tue,\nFeb 2" for a date\n choose and a string like "Feb 15 - Mar 23" or "Feb 15,\nMar 23" for\n a date range.\n '
if (12 < int(month) < 0):
raise ValueError(f'''set_text_full_date:
Month [{month}] out of range.''')
if (int(day) > calendar.monthrange(int(year), month)[1]):
raise ValueError(f'''set_text_full_date:
Day [{day}] out of range for the month {month}''')
date = datetime.date(int(year), int(month), int(day))
separator = ('\n' if ((orientation == 'landscape') and (not self._input_date_dialog_open)) else ' ')
if (self.mode == 'picker'):
if ((not self.min_date) and (not self.max_date)):
return (((date.strftime('%a,').capitalize() + separator) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
return ((((self.min_date.strftime('%b ').capitalize() + str(self.min_date.day).lstrip('0')) + (' - ' if (orientation == 'portrait') else (',\n' if (not self._input_date_dialog_open) else ', '))) + self.max_date.strftime('%b ').capitalize()) + str(self.max_date.day).lstrip('0'))
elif (self.mode == 'range'):
if (self._start_range_date and self._end_range_date):
if ((orientation == 'landscape') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + (',\n' if (not self._input_date_dialog_open) else ' - ')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
if ((orientation == 'landscape') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + (',\n' if (not self._input_date_dialog_open) else '-')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + '-') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + ' - ') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
elif (self._start_range_date and (not self._end_range_date)):
return (((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + ' - End') if (orientation != 'landscape') else ((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + '{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - '))))
elif ((not self._start_range_date) and (not self._end_range_date)):
return ('Start - End' if (orientation != 'landscape') else 'Start{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - '))) | Returns a string of type "Tue, Feb 2" or "Tue,
Feb 2" for a date
choose and a string like "Feb 15 - Mar 23" or "Feb 15,
Mar 23" for
a date range. | modified_picker/picker.py | set_text_full_date | ShareASmile/car-locator | 21 | python | def set_text_full_date(self, year, month, day, orientation):
'\n Returns a string of type "Tue, Feb 2" or "Tue,\nFeb 2" for a date\n choose and a string like "Feb 15 - Mar 23" or "Feb 15,\nMar 23" for\n a date range.\n '
if (12 < int(month) < 0):
raise ValueError(f'set_text_full_date:
Month [{month}] out of range.')
if (int(day) > calendar.monthrange(int(year), month)[1]):
raise ValueError(f'set_text_full_date:
Day [{day}] out of range for the month {month}')
date = datetime.date(int(year), int(month), int(day))
separator = ('\n' if ((orientation == 'landscape') and (not self._input_date_dialog_open)) else ' ')
if (self.mode == 'picker'):
if ((not self.min_date) and (not self.max_date)):
return (((date.strftime('%a,').capitalize() + separator) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
return ((((self.min_date.strftime('%b ').capitalize() + str(self.min_date.day).lstrip('0')) + (' - ' if (orientation == 'portrait') else (',\n' if (not self._input_date_dialog_open) else ', '))) + self.max_date.strftime('%b ').capitalize()) + str(self.max_date.day).lstrip('0'))
elif (self.mode == 'range'):
if (self._start_range_date and self._end_range_date):
if ((orientation == 'landscape') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + (',\n' if (not self._input_date_dialog_open) else ' - ')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
if ((orientation == 'landscape') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + (',\n' if (not self._input_date_dialog_open) else '-')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + '-') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + ' - ') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
elif (self._start_range_date and (not self._end_range_date)):
return (((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + ' - End') if (orientation != 'landscape') else ((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + '{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - '))))
elif ((not self._start_range_date) and (not self._end_range_date)):
return ('Start - End' if (orientation != 'landscape') else 'Start{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - '))) | def set_text_full_date(self, year, month, day, orientation):
'\n Returns a string of type "Tue, Feb 2" or "Tue,\nFeb 2" for a date\n choose and a string like "Feb 15 - Mar 23" or "Feb 15,\nMar 23" for\n a date range.\n '
if (12 < int(month) < 0):
raise ValueError(f'set_text_full_date:
Month [{month}] out of range.')
if (int(day) > calendar.monthrange(int(year), month)[1]):
raise ValueError(f'set_text_full_date:
Day [{day}] out of range for the month {month}')
date = datetime.date(int(year), int(month), int(day))
separator = ('\n' if ((orientation == 'landscape') and (not self._input_date_dialog_open)) else ' ')
if (self.mode == 'picker'):
if ((not self.min_date) and (not self.max_date)):
return (((date.strftime('%a,').capitalize() + separator) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
return ((((self.min_date.strftime('%b ').capitalize() + str(self.min_date.day).lstrip('0')) + (' - ' if (orientation == 'portrait') else (',\n' if (not self._input_date_dialog_open) else ', '))) + self.max_date.strftime('%b ').capitalize()) + str(self.max_date.day).lstrip('0'))
elif (self.mode == 'range'):
if (self._start_range_date and self._end_range_date):
if ((orientation == 'landscape') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + (',\n' if (not self._input_date_dialog_open) else ' - ')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
else:
if ((orientation == 'landscape') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + (',\n' if (not self._input_date_dialog_open) else '-')) + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and (',' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split(',')[0].strip() + '-') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
if ((orientation == 'portrait') and ('-' in self.ids.label_full_date.text)):
return (((self.ids.label_full_date.text.split('-')[0].strip() + ' - ') + date.strftime('%b ').capitalize()) + str(day).lstrip('0'))
elif (self._start_range_date and (not self._end_range_date)):
return (((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + ' - End') if (orientation != 'landscape') else ((date.strftime('%b ').capitalize() + str(day).lstrip('0')) + '{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - '))))
elif ((not self._start_range_date) and (not self._end_range_date)):
return ('Start - End' if (orientation != 'landscape') else 'Start{}End'.format((',\n' if (not self._input_date_dialog_open) else ' - ')))<|docstring|>Returns a string of type "Tue, Feb 2" or "Tue,
Feb 2" for a date
choose and a string like "Feb 15 - Mar 23" or "Feb 15,
Mar 23" for
a date range.<|endoftext|> |
321db849309a79d0a053e7a3f0de762aca726fb49c84744687b853ca978db1db | def change_month(self, operation):
'\n Called when "chevron-left" and "chevron-right" buttons are pressed.\n Switches the calendar to the previous/next month.\n '
operation = (1 if (operation == 'next') else (- 1))
month = (12 if ((self.month + operation) == 0) else (1 if ((self.month + operation) == 13) else (self.month + operation)))
year = ((self.year - 1) if ((self.month + operation) == 0) else ((self.year + 1) if ((self.month + operation) == 13) else self.year))
self.update_calendar(year, month)
if self.sel_day:
x = calendar.monthrange(year, month)[1]
if (x < self.sel_day):
self.sel_day = (x if ((year <= self.sel_year) and (month <= self.sel_year)) else 1) | Called when "chevron-left" and "chevron-right" buttons are pressed.
Switches the calendar to the previous/next month. | modified_picker/picker.py | change_month | ShareASmile/car-locator | 21 | python | def change_month(self, operation):
'\n Called when "chevron-left" and "chevron-right" buttons are pressed.\n Switches the calendar to the previous/next month.\n '
operation = (1 if (operation == 'next') else (- 1))
month = (12 if ((self.month + operation) == 0) else (1 if ((self.month + operation) == 13) else (self.month + operation)))
year = ((self.year - 1) if ((self.month + operation) == 0) else ((self.year + 1) if ((self.month + operation) == 13) else self.year))
self.update_calendar(year, month)
if self.sel_day:
x = calendar.monthrange(year, month)[1]
if (x < self.sel_day):
self.sel_day = (x if ((year <= self.sel_year) and (month <= self.sel_year)) else 1) | def change_month(self, operation):
'\n Called when "chevron-left" and "chevron-right" buttons are pressed.\n Switches the calendar to the previous/next month.\n '
operation = (1 if (operation == 'next') else (- 1))
month = (12 if ((self.month + operation) == 0) else (1 if ((self.month + operation) == 13) else (self.month + operation)))
year = ((self.year - 1) if ((self.month + operation) == 0) else ((self.year + 1) if ((self.month + operation) == 13) else self.year))
self.update_calendar(year, month)
if self.sel_day:
x = calendar.monthrange(year, month)[1]
if (x < self.sel_day):
self.sel_day = (x if ((year <= self.sel_year) and (month <= self.sel_year)) else 1)<|docstring|>Called when "chevron-left" and "chevron-right" buttons are pressed.
Switches the calendar to the previous/next month.<|endoftext|> |
80785c48a0c3bde9807826e9f8e53b8c0f417f43eac217a292a15c0860018561 | def on_text(self, *args):
'\n Texts should be center aligned. now we are setting the padding of text\n to somehow make them aligned.\n '
if (not self.c):
self.c = Clock.schedule_once(self._set_padding, 0) | Texts should be center aligned. now we are setting the padding of text
to somehow make them aligned. | modified_picker/picker.py | on_text | ShareASmile/car-locator | 21 | python | def on_text(self, *args):
'\n Texts should be center aligned. now we are setting the padding of text\n to somehow make them aligned.\n '
if (not self.c):
self.c = Clock.schedule_once(self._set_padding, 0) | def on_text(self, *args):
'\n Texts should be center aligned. now we are setting the padding of text\n to somehow make them aligned.\n '
if (not self.c):
self.c = Clock.schedule_once(self._set_padding, 0)<|docstring|>Texts should be center aligned. now we are setting the padding of text
to somehow make them aligned.<|endoftext|> |
f4d2d2e96a9493cd96c125c38e644acda9b87f9304621feddff5060bd824ffae | def _update_labels(self, animate=True, *args):
'\n This method builds the selector based on current mode which currently\n can be hour or minute.\n '
if (self.mode == 'hour'):
param = (1, 12)
self.degree_spacing = 30
self.start_from = 60
elif (self.mode == 'minute'):
param = (0, 59, 5)
self.degree_spacing = 6
self.start_from = 90
elif (self.mode == 'military'):
param = (1, 24)
self.degree_spacing = 30
self.start_from = 90
if animate:
anim = Animation(content_scale=0, t=self.t, d=self.d)
anim.bind(on_complete=(lambda *args: self._add_items(*param)))
anim.start(self)
else:
self._add_items(*param) | This method builds the selector based on current mode which currently
can be hour or minute. | modified_picker/picker.py | _update_labels | ShareASmile/car-locator | 21 | python | def _update_labels(self, animate=True, *args):
'\n This method builds the selector based on current mode which currently\n can be hour or minute.\n '
if (self.mode == 'hour'):
param = (1, 12)
self.degree_spacing = 30
self.start_from = 60
elif (self.mode == 'minute'):
param = (0, 59, 5)
self.degree_spacing = 6
self.start_from = 90
elif (self.mode == 'military'):
param = (1, 24)
self.degree_spacing = 30
self.start_from = 90
if animate:
anim = Animation(content_scale=0, t=self.t, d=self.d)
anim.bind(on_complete=(lambda *args: self._add_items(*param)))
anim.start(self)
else:
self._add_items(*param) | def _update_labels(self, animate=True, *args):
'\n This method builds the selector based on current mode which currently\n can be hour or minute.\n '
if (self.mode == 'hour'):
param = (1, 12)
self.degree_spacing = 30
self.start_from = 60
elif (self.mode == 'minute'):
param = (0, 59, 5)
self.degree_spacing = 6
self.start_from = 90
elif (self.mode == 'military'):
param = (1, 24)
self.degree_spacing = 30
self.start_from = 90
if animate:
anim = Animation(content_scale=0, t=self.t, d=self.d)
anim.bind(on_complete=(lambda *args: self._add_items(*param)))
anim.start(self)
else:
self._add_items(*param)<|docstring|>This method builds the selector based on current mode which currently
can be hour or minute.<|endoftext|> |
f26dfca3de22b29e5ecfffdaff43a679ecca43e0fbac46667e0125df49a63f5f | def _add_items(self, start, end, step=1):
"\n Adds all number in range `[start, end + 1]` to the circular layout with\n the specified step. Step means that all widgets will be added to layout\n but sets the opacity for skipped widgets to `0` because we are using\n the label's text as a reference to the selected number so we have to\n add these to layout.\n "
self.clear_widgets()
i = 0
for x in range(start, (end + 1)):
label = SelectorLabel(text=f'{x}')
if ((i % step) != 0):
label.opacity = 0
self.bind(text_color=label.setter('text_color'), font_name=label.setter('font_name'))
self.add_widget(label)
i += 1
Clock.schedule_once(self.update_time)
Clock.schedule_once(self._get_centers, 0.1)
anim = Animation(content_scale=1, t=self.t, d=self.d)
anim.start(self) | Adds all number in range `[start, end + 1]` to the circular layout with
the specified step. Step means that all widgets will be added to layout
but sets the opacity for skipped widgets to `0` because we are using
the label's text as a reference to the selected number so we have to
add these to layout. | modified_picker/picker.py | _add_items | ShareASmile/car-locator | 21 | python | def _add_items(self, start, end, step=1):
"\n Adds all number in range `[start, end + 1]` to the circular layout with\n the specified step. Step means that all widgets will be added to layout\n but sets the opacity for skipped widgets to `0` because we are using\n the label's text as a reference to the selected number so we have to\n add these to layout.\n "
self.clear_widgets()
i = 0
for x in range(start, (end + 1)):
label = SelectorLabel(text=f'{x}')
if ((i % step) != 0):
label.opacity = 0
self.bind(text_color=label.setter('text_color'), font_name=label.setter('font_name'))
self.add_widget(label)
i += 1
Clock.schedule_once(self.update_time)
Clock.schedule_once(self._get_centers, 0.1)
anim = Animation(content_scale=1, t=self.t, d=self.d)
anim.start(self) | def _add_items(self, start, end, step=1):
"\n Adds all number in range `[start, end + 1]` to the circular layout with\n the specified step. Step means that all widgets will be added to layout\n but sets the opacity for skipped widgets to `0` because we are using\n the label's text as a reference to the selected number so we have to\n add these to layout.\n "
self.clear_widgets()
i = 0
for x in range(start, (end + 1)):
label = SelectorLabel(text=f'{x}')
if ((i % step) != 0):
label.opacity = 0
self.bind(text_color=label.setter('text_color'), font_name=label.setter('font_name'))
self.add_widget(label)
i += 1
Clock.schedule_once(self.update_time)
Clock.schedule_once(self._get_centers, 0.1)
anim = Animation(content_scale=1, t=self.t, d=self.d)
anim.start(self)<|docstring|>Adds all number in range `[start, end + 1]` to the circular layout with
the specified step. Step means that all widgets will be added to layout
but sets the opacity for skipped widgets to `0` because we are using
the label's text as a reference to the selected number so we have to
add these to layout.<|endoftext|> |
081d5be0ab7c3334a27a097ab32bc3a173ca24a0145fa84a4948e0ed0d7f5069 | def _get_centers(self, *args):
'\n Returns a list of all center. we use this for positioning the selector\n indicator.\n '
self._centers_pos = []
for child in self.children:
self._centers_pos.append(child.center) | Returns a list of all center. we use this for positioning the selector
indicator. | modified_picker/picker.py | _get_centers | ShareASmile/car-locator | 21 | python | def _get_centers(self, *args):
'\n Returns a list of all center. we use this for positioning the selector\n indicator.\n '
self._centers_pos = []
for child in self.children:
self._centers_pos.append(child.center) | def _get_centers(self, *args):
'\n Returns a list of all center. we use this for positioning the selector\n indicator.\n '
self._centers_pos = []
for child in self.children:
self._centers_pos.append(child.center)<|docstring|>Returns a list of all center. we use this for positioning the selector
indicator.<|endoftext|> |
ecc4098b106cc9abe0536563fb8f1c613dbdda09b6105823741026ca716c777c | def _get_closest_widget(self, pos):
'\n Returns the nearest widget to the given position. we use this to create\n the magnetic effect.\n '
distance = [Vector(pos).distance(point) for point in self._centers_pos]
if (not distance):
return False
index = distance.index(min(distance))
return self.children[index] | Returns the nearest widget to the given position. we use this to create
the magnetic effect. | modified_picker/picker.py | _get_closest_widget | ShareASmile/car-locator | 21 | python | def _get_closest_widget(self, pos):
'\n Returns the nearest widget to the given position. we use this to create\n the magnetic effect.\n '
distance = [Vector(pos).distance(point) for point in self._centers_pos]
if (not distance):
return False
index = distance.index(min(distance))
return self.children[index] | def _get_closest_widget(self, pos):
'\n Returns the nearest widget to the given position. we use this to create\n the magnetic effect.\n '
distance = [Vector(pos).distance(point) for point in self._centers_pos]
if (not distance):
return False
index = distance.index(min(distance))
return self.children[index]<|docstring|>Returns the nearest widget to the given position. we use this to create
the magnetic effect.<|endoftext|> |
92e9795b9781eef257adcf475d00088eee93155e878ac16655e205f1252fd1de | def set_selector(self, selected):
"\n Sets the selector's position towards the given text.\n "
widget = None
for wid in self.children:
wid.text_color = self.text_color
if (wid.text == selected):
widget = wid
if (not widget):
return False
self.selector_pos = widget.center
widget.text_color = [1, 1, 1, 1]
self.dispatch('on_selector_change')
return True | Sets the selector's position towards the given text. | modified_picker/picker.py | set_selector | ShareASmile/car-locator | 21 | python | def set_selector(self, selected):
"\n \n "
widget = None
for wid in self.children:
wid.text_color = self.text_color
if (wid.text == selected):
widget = wid
if (not widget):
return False
self.selector_pos = widget.center
widget.text_color = [1, 1, 1, 1]
self.dispatch('on_selector_change')
return True | def set_selector(self, selected):
"\n \n "
widget = None
for wid in self.children:
wid.text_color = self.text_color
if (wid.text == selected):
widget = wid
if (not widget):
return False
self.selector_pos = widget.center
widget.text_color = [1, 1, 1, 1]
self.dispatch('on_selector_change')
return True<|docstring|>Sets the selector's position towards the given text.<|endoftext|> |
f2e36d314ab3569de066f354152a8e791bd2b8e61cb78d5e0c6bb25e47cd2aca | def set_time(self, time_obj):
'\n Manually set time dialog with the specified time.\n '
hour = time_obj.hour
minute = time_obj.minute
if (hour > 12):
hour -= 12
mode = 'pm'
else:
mode = 'am'
hour = str(hour)
minute = str(minute)
self._set_time_input(hour, minute)
self._set_dial_time(hour, minute)
self._set_am_pm(mode) | Manually set time dialog with the specified time. | modified_picker/picker.py | set_time | ShareASmile/car-locator | 21 | python | def set_time(self, time_obj):
'\n \n '
hour = time_obj.hour
minute = time_obj.minute
if (hour > 12):
hour -= 12
mode = 'pm'
else:
mode = 'am'
hour = str(hour)
minute = str(minute)
self._set_time_input(hour, minute)
self._set_dial_time(hour, minute)
self._set_am_pm(mode) | def set_time(self, time_obj):
'\n \n '
hour = time_obj.hour
minute = time_obj.minute
if (hour > 12):
hour -= 12
mode = 'pm'
else:
mode = 'am'
hour = str(hour)
minute = str(minute)
self._set_time_input(hour, minute)
self._set_dial_time(hour, minute)
self._set_am_pm(mode)<|docstring|>Manually set time dialog with the specified time.<|endoftext|> |
1b63d0c5589578011c2c8851f7102a14fe3ca8ca93b7956e36cabc522d4641a1 | def get_state(self):
'\n Returns the current state of TimePicker.\n Can be one of `portrait`, `landscape` or `input`.\n '
return self._state | Returns the current state of TimePicker.
Can be one of `portrait`, `landscape` or `input`. | modified_picker/picker.py | get_state | ShareASmile/car-locator | 21 | python | def get_state(self):
'\n Returns the current state of TimePicker.\n Can be one of `portrait`, `landscape` or `input`.\n '
return self._state | def get_state(self):
'\n Returns the current state of TimePicker.\n Can be one of `portrait`, `landscape` or `input`.\n '
return self._state<|docstring|>Returns the current state of TimePicker.
Can be one of `portrait`, `landscape` or `input`.<|endoftext|> |
ac181a61f8f8749b28b9121e87dc31a97a1acbefbfcb42d2021c8f7359ed68e0 | def load_data(train_path, val_path, test_path):
'\n Load data from csvs into a dictionary for the different splits.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n Returns:\n data (dict): dictionary of the form {split: sub_dic} for each\n split, where sub_dic contains SMILES strings and values for\n each property.\n\n '
data = {}
paths = [train_path, val_path, test_path]
names = ['train', 'val', 'test']
for (name, path) in zip(names, paths):
data[name] = read_csv(path)
return data | Load data from csvs into a dictionary for the different splits.
Args:
train_path (str): path to csv with training data
val_path (str): path to csv with validation data
test_path (str): path to csv with test data
Returns:
data (dict): dictionary of the form {split: sub_dic} for each
split, where sub_dic contains SMILES strings and values for
each property. | scripts/cp3d/sklearn/run.py | load_data | jkaraguesian/NeuralForceField | 0 | python | def load_data(train_path, val_path, test_path):
'\n Load data from csvs into a dictionary for the different splits.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n Returns:\n data (dict): dictionary of the form {split: sub_dic} for each\n split, where sub_dic contains SMILES strings and values for\n each property.\n\n '
data = {}
paths = [train_path, val_path, test_path]
names = ['train', 'val', 'test']
for (name, path) in zip(names, paths):
data[name] = read_csv(path)
return data | def load_data(train_path, val_path, test_path):
'\n Load data from csvs into a dictionary for the different splits.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n Returns:\n data (dict): dictionary of the form {split: sub_dic} for each\n split, where sub_dic contains SMILES strings and values for\n each property.\n\n '
data = {}
paths = [train_path, val_path, test_path]
names = ['train', 'val', 'test']
for (name, path) in zip(names, paths):
data[name] = read_csv(path)
return data<|docstring|>Load data from csvs into a dictionary for the different splits.
Args:
train_path (str): path to csv with training data
val_path (str): path to csv with validation data
test_path (str): path to csv with test data
Returns:
data (dict): dictionary of the form {split: sub_dic} for each
split, where sub_dic contains SMILES strings and values for
each property.<|endoftext|> |
18d8963c8022a3e91c5850b7fa591d34e12e4036823e7bc05696aab7ab4aae87 | def make_mol_rep(data, splits, props, fp_type, fp_kwargs):
"\n Make representations for each molecule through Morgan fingerprints,\n and combine all the labels into an array.\n Args:\n data (dict): dictionary with data for each split\n splits (list[str]): name of the splits to use (e.g. train, val, test)\n props (list[str]): properties you'll want to predict with the model.\n fp_type (str): type of fingerprint\n fp_kwargs (dict): kwargs for making the fingerprint\n Returns:\n fps (np.array): fingerprints\n vals (np.array): values to predict\n "
fps = []
vals = []
for split in splits:
smiles_list = data[split]['smiles']
for (i, smiles) in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
if (fp_type == 'morgan'):
fp = AllChem.GetMorganFingerprintAsBitVect(mol, fp_kwargs['radius'], nBits=fp_kwargs['fp_len'])
elif (fp_type == 'atom_pair'):
fp = atom_pair_fp(mol, nBits=fp_kwargs['fp_len'])
else:
raise NotImplementedError
val_list = [data[split][prop][i] for prop in props]
vals.append(np.array(val_list))
fps.append(fp)
vals = np.stack(vals)
if (vals.shape[(- 1)] == 1):
vals = vals.reshape((- 1))
fps = np.array(fps)
return (fps, vals) | Make representations for each molecule through Morgan fingerprints,
and combine all the labels into an array.
Args:
data (dict): dictionary with data for each split
splits (list[str]): name of the splits to use (e.g. train, val, test)
props (list[str]): properties you'll want to predict with the model.
fp_type (str): type of fingerprint
fp_kwargs (dict): kwargs for making the fingerprint
Returns:
fps (np.array): fingerprints
vals (np.array): values to predict | scripts/cp3d/sklearn/run.py | make_mol_rep | jkaraguesian/NeuralForceField | 0 | python | def make_mol_rep(data, splits, props, fp_type, fp_kwargs):
"\n Make representations for each molecule through Morgan fingerprints,\n and combine all the labels into an array.\n Args:\n data (dict): dictionary with data for each split\n splits (list[str]): name of the splits to use (e.g. train, val, test)\n props (list[str]): properties you'll want to predict with the model.\n fp_type (str): type of fingerprint\n fp_kwargs (dict): kwargs for making the fingerprint\n Returns:\n fps (np.array): fingerprints\n vals (np.array): values to predict\n "
fps = []
vals = []
for split in splits:
smiles_list = data[split]['smiles']
for (i, smiles) in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
if (fp_type == 'morgan'):
fp = AllChem.GetMorganFingerprintAsBitVect(mol, fp_kwargs['radius'], nBits=fp_kwargs['fp_len'])
elif (fp_type == 'atom_pair'):
fp = atom_pair_fp(mol, nBits=fp_kwargs['fp_len'])
else:
raise NotImplementedError
val_list = [data[split][prop][i] for prop in props]
vals.append(np.array(val_list))
fps.append(fp)
vals = np.stack(vals)
if (vals.shape[(- 1)] == 1):
vals = vals.reshape((- 1))
fps = np.array(fps)
return (fps, vals) | def make_mol_rep(data, splits, props, fp_type, fp_kwargs):
"\n Make representations for each molecule through Morgan fingerprints,\n and combine all the labels into an array.\n Args:\n data (dict): dictionary with data for each split\n splits (list[str]): name of the splits to use (e.g. train, val, test)\n props (list[str]): properties you'll want to predict with the model.\n fp_type (str): type of fingerprint\n fp_kwargs (dict): kwargs for making the fingerprint\n Returns:\n fps (np.array): fingerprints\n vals (np.array): values to predict\n "
fps = []
vals = []
for split in splits:
smiles_list = data[split]['smiles']
for (i, smiles) in enumerate(smiles_list):
mol = Chem.MolFromSmiles(smiles)
if (fp_type == 'morgan'):
fp = AllChem.GetMorganFingerprintAsBitVect(mol, fp_kwargs['radius'], nBits=fp_kwargs['fp_len'])
elif (fp_type == 'atom_pair'):
fp = atom_pair_fp(mol, nBits=fp_kwargs['fp_len'])
else:
raise NotImplementedError
val_list = [data[split][prop][i] for prop in props]
vals.append(np.array(val_list))
fps.append(fp)
vals = np.stack(vals)
if (vals.shape[(- 1)] == 1):
vals = vals.reshape((- 1))
fps = np.array(fps)
return (fps, vals)<|docstring|>Make representations for each molecule through Morgan fingerprints,
and combine all the labels into an array.
Args:
data (dict): dictionary with data for each split
splits (list[str]): name of the splits to use (e.g. train, val, test)
props (list[str]): properties you'll want to predict with the model.
fp_type (str): type of fingerprint
fp_kwargs (dict): kwargs for making the fingerprint
Returns:
fps (np.array): fingerprints
vals (np.array): values to predict<|endoftext|> |
c82e73bf6947e578447c30d629ecd76269d1d38553f2c0a7e2f76dec483edcd1 | def get_hyperparams(model_type, classifier, custom_hyps=None, fp_type='morgan'):
"\n Get hyperparameters and ranges to be optimized for a\n given model type.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n hyperparams (dict): dictionary with hyperparameters, their\n types, and their ranges.\n "
class_or_reg = ('classification' if classifier else 'regression')
hyperparams = HYPERPARAMS[class_or_reg][model_type]
hyperparams.update(HYPERPARAMS[fp_type])
if (custom_hyps is not None):
for (key, vals) in custom_hyps.items():
if (key in hyperparams):
hyperparams[key]['vals'] = vals
return hyperparams | Get hyperparameters and ranges to be optimized for a
given model type.
Args:
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
hyperparams (dict): dictionary with hyperparameters, their
types, and their ranges. | scripts/cp3d/sklearn/run.py | get_hyperparams | jkaraguesian/NeuralForceField | 0 | python | def get_hyperparams(model_type, classifier, custom_hyps=None, fp_type='morgan'):
"\n Get hyperparameters and ranges to be optimized for a\n given model type.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n hyperparams (dict): dictionary with hyperparameters, their\n types, and their ranges.\n "
class_or_reg = ('classification' if classifier else 'regression')
hyperparams = HYPERPARAMS[class_or_reg][model_type]
hyperparams.update(HYPERPARAMS[fp_type])
if (custom_hyps is not None):
for (key, vals) in custom_hyps.items():
if (key in hyperparams):
hyperparams[key]['vals'] = vals
return hyperparams | def get_hyperparams(model_type, classifier, custom_hyps=None, fp_type='morgan'):
"\n Get hyperparameters and ranges to be optimized for a\n given model type.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n hyperparams (dict): dictionary with hyperparameters, their\n types, and their ranges.\n "
class_or_reg = ('classification' if classifier else 'regression')
hyperparams = HYPERPARAMS[class_or_reg][model_type]
hyperparams.update(HYPERPARAMS[fp_type])
if (custom_hyps is not None):
for (key, vals) in custom_hyps.items():
if (key in hyperparams):
hyperparams[key]['vals'] = vals
return hyperparams<|docstring|>Get hyperparameters and ranges to be optimized for a
given model type.
Args:
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
hyperparams (dict): dictionary with hyperparameters, their
types, and their ranges.<|endoftext|> |
abc39c03f1eb98fbdbc05d6e1df45dfec25063ea9cc35917fe725f0f48f035d8 | def make_space(model_type, classifier, fp_type='morgan'):
"\n Make `hyperopt` space of hyperparameters.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n space (dict): hyperopt` space of hyperparameters\n "
space = {}
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
for (name, sub_dic) in hyperparams.items():
val_type = sub_dic['type']
vals = sub_dic['vals']
if (val_type == 'categorical'):
sample = hp.choice(name, vals)
elif (val_type == 'float'):
sample = hp.uniform(name, low=float(min(vals)), high=float(max(vals)))
elif (val_type == 'int'):
sample = hp.quniform(name, low=min(vals), high=max(vals), q=1)
space[name] = sample
return space | Make `hyperopt` space of hyperparameters.
Args:
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
fp_type (str, optional): type of fingerprint to use
Returns:
space (dict): hyperopt` space of hyperparameters | scripts/cp3d/sklearn/run.py | make_space | jkaraguesian/NeuralForceField | 0 | python | def make_space(model_type, classifier, fp_type='morgan'):
"\n Make `hyperopt` space of hyperparameters.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n space (dict): hyperopt` space of hyperparameters\n "
space = {}
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
for (name, sub_dic) in hyperparams.items():
val_type = sub_dic['type']
vals = sub_dic['vals']
if (val_type == 'categorical'):
sample = hp.choice(name, vals)
elif (val_type == 'float'):
sample = hp.uniform(name, low=float(min(vals)), high=float(max(vals)))
elif (val_type == 'int'):
sample = hp.quniform(name, low=min(vals), high=max(vals), q=1)
space[name] = sample
return space | def make_space(model_type, classifier, fp_type='morgan'):
"\n Make `hyperopt` space of hyperparameters.\n Args:\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n space (dict): hyperopt` space of hyperparameters\n "
space = {}
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
for (name, sub_dic) in hyperparams.items():
val_type = sub_dic['type']
vals = sub_dic['vals']
if (val_type == 'categorical'):
sample = hp.choice(name, vals)
elif (val_type == 'float'):
sample = hp.uniform(name, low=float(min(vals)), high=float(max(vals)))
elif (val_type == 'int'):
sample = hp.quniform(name, low=min(vals), high=max(vals), q=1)
space[name] = sample
return space<|docstring|>Make `hyperopt` space of hyperparameters.
Args:
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
fp_type (str, optional): type of fingerprint to use
Returns:
space (dict): hyperopt` space of hyperparameters<|endoftext|> |
7225d54376f19f89aa6a93c0811f8f36a5fd1508627b1d00016461bcac571cf4 | def make_sample_data(max_specs, data, props, seed):
"\n Get a sample of the data for hyperopt.\n Args:\n max_specs (int, optional): maximum number of species to use in hyperopt\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n seed (int, optional): seed to use if we take a subsample of the data\n Returns:\n sample_data (dict): sub sample of `data`\n "
if (max_specs is None):
sample_data = data
return sample_data
sample_data = {}
sample_key = list(data['train'].keys())[0]
old_num_per_split = {split: len(sub_dic[sample_key]) for (split, sub_dic) in data.items()}
total_num = sum(old_num_per_split.values())
new_num_per_split = {split: int(((num / total_num) * max_specs)) for (split, num) in old_num_per_split.items()}
for (split, num) in new_num_per_split.items():
sample_dic = {}
for (i, smiles) in enumerate(data[split]['smiles']):
sub_dic = {prop: data[split][prop][i] for prop in props}
sample_dic.update({smiles: sub_dic})
if all([(i in [0, 1]) for i in data[split][props[0]]]):
dataset_type = 'classification'
else:
dataset_type = 'regression'
smiles_idx = {smiles: i for (i, smiles) in enumerate(data[split]['smiles'])}
keep_smiles = prop_split(max_specs=num, dataset_type=dataset_type, props=props, sample_dic=sample_dic, seed=seed)
sample_data[split] = {key: [] for key in data[split].keys()}
for smiles in keep_smiles:
sample_data[split]['smiles'].append(smiles)
idx = smiles_idx[smiles]
for prop in props:
val = data[split][prop][idx]
sample_data[split][prop].append(val)
return sample_data | Get a sample of the data for hyperopt.
Args:
max_specs (int, optional): maximum number of species to use in hyperopt
data (dict): dictionary with data for each split
props (list[str]): properties you'll want to predict with the model.
seed (int, optional): seed to use if we take a subsample of the data
Returns:
sample_data (dict): sub sample of `data` | scripts/cp3d/sklearn/run.py | make_sample_data | jkaraguesian/NeuralForceField | 0 | python | def make_sample_data(max_specs, data, props, seed):
"\n Get a sample of the data for hyperopt.\n Args:\n max_specs (int, optional): maximum number of species to use in hyperopt\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n seed (int, optional): seed to use if we take a subsample of the data\n Returns:\n sample_data (dict): sub sample of `data`\n "
if (max_specs is None):
sample_data = data
return sample_data
sample_data = {}
sample_key = list(data['train'].keys())[0]
old_num_per_split = {split: len(sub_dic[sample_key]) for (split, sub_dic) in data.items()}
total_num = sum(old_num_per_split.values())
new_num_per_split = {split: int(((num / total_num) * max_specs)) for (split, num) in old_num_per_split.items()}
for (split, num) in new_num_per_split.items():
sample_dic = {}
for (i, smiles) in enumerate(data[split]['smiles']):
sub_dic = {prop: data[split][prop][i] for prop in props}
sample_dic.update({smiles: sub_dic})
if all([(i in [0, 1]) for i in data[split][props[0]]]):
dataset_type = 'classification'
else:
dataset_type = 'regression'
smiles_idx = {smiles: i for (i, smiles) in enumerate(data[split]['smiles'])}
keep_smiles = prop_split(max_specs=num, dataset_type=dataset_type, props=props, sample_dic=sample_dic, seed=seed)
sample_data[split] = {key: [] for key in data[split].keys()}
for smiles in keep_smiles:
sample_data[split]['smiles'].append(smiles)
idx = smiles_idx[smiles]
for prop in props:
val = data[split][prop][idx]
sample_data[split][prop].append(val)
return sample_data | def make_sample_data(max_specs, data, props, seed):
"\n Get a sample of the data for hyperopt.\n Args:\n max_specs (int, optional): maximum number of species to use in hyperopt\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n seed (int, optional): seed to use if we take a subsample of the data\n Returns:\n sample_data (dict): sub sample of `data`\n "
if (max_specs is None):
sample_data = data
return sample_data
sample_data = {}
sample_key = list(data['train'].keys())[0]
old_num_per_split = {split: len(sub_dic[sample_key]) for (split, sub_dic) in data.items()}
total_num = sum(old_num_per_split.values())
new_num_per_split = {split: int(((num / total_num) * max_specs)) for (split, num) in old_num_per_split.items()}
for (split, num) in new_num_per_split.items():
sample_dic = {}
for (i, smiles) in enumerate(data[split]['smiles']):
sub_dic = {prop: data[split][prop][i] for prop in props}
sample_dic.update({smiles: sub_dic})
if all([(i in [0, 1]) for i in data[split][props[0]]]):
dataset_type = 'classification'
else:
dataset_type = 'regression'
smiles_idx = {smiles: i for (i, smiles) in enumerate(data[split]['smiles'])}
keep_smiles = prop_split(max_specs=num, dataset_type=dataset_type, props=props, sample_dic=sample_dic, seed=seed)
sample_data[split] = {key: [] for key in data[split].keys()}
for smiles in keep_smiles:
sample_data[split]['smiles'].append(smiles)
idx = smiles_idx[smiles]
for prop in props:
val = data[split][prop][idx]
sample_data[split][prop].append(val)
return sample_data<|docstring|>Get a sample of the data for hyperopt.
Args:
max_specs (int, optional): maximum number of species to use in hyperopt
data (dict): dictionary with data for each split
props (list[str]): properties you'll want to predict with the model.
seed (int, optional): seed to use if we take a subsample of the data
Returns:
sample_data (dict): sub sample of `data`<|endoftext|> |
6daf15b3793efcc30d89241f3ef699835659653cd119bda0529190548c42a3c7 | def get_splits(space, data, props, max_specs=None, seed=None, fp_type='morgan'):
"\n Get representations and values of the data given a certain\n set of Morgan hyperparameters.\n Args:\n space (dict): hyperopt` space of hyperparameters\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n max_specs (int, optional): maximum number of species to use in hyperopt\n seed (int, optional): seed to use if we take a subsample of the data\n fp_type (str, optional): type of fingerprint to use\n Returns:\n xy_dic (dict): dictionary of the form {split: [x, y]} for each split,\n where x and y are arrays of the input and output.\n "
if (fp_type == 'morgan'):
fp_hyper_keys = MORGAN_HYPER_KEYS
elif (fp_type == 'atom_pair'):
fp_hyper_keys = PAIR_FP_HYPER_KEYS
fp_kwargs = {key: val for (key, val) in space.items() if (key in fp_hyper_keys)}
sample_data = make_sample_data(max_specs=max_specs, data=data, props=props, seed=seed)
xy_dic = {}
for name in ['train', 'val', 'test']:
(x, y) = make_mol_rep(data=sample_data, splits=[name], props=props, fp_type=fp_type, fp_kwargs=fp_kwargs)
xy_dic[name] = [x, y]
return xy_dic | Get representations and values of the data given a certain
set of Morgan hyperparameters.
Args:
space (dict): hyperopt` space of hyperparameters
data (dict): dictionary with data for each split
props (list[str]): properties you'll want to predict with the model.
max_specs (int, optional): maximum number of species to use in hyperopt
seed (int, optional): seed to use if we take a subsample of the data
fp_type (str, optional): type of fingerprint to use
Returns:
xy_dic (dict): dictionary of the form {split: [x, y]} for each split,
where x and y are arrays of the input and output. | scripts/cp3d/sklearn/run.py | get_splits | jkaraguesian/NeuralForceField | 0 | python | def get_splits(space, data, props, max_specs=None, seed=None, fp_type='morgan'):
"\n Get representations and values of the data given a certain\n set of Morgan hyperparameters.\n Args:\n space (dict): hyperopt` space of hyperparameters\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n max_specs (int, optional): maximum number of species to use in hyperopt\n seed (int, optional): seed to use if we take a subsample of the data\n fp_type (str, optional): type of fingerprint to use\n Returns:\n xy_dic (dict): dictionary of the form {split: [x, y]} for each split,\n where x and y are arrays of the input and output.\n "
if (fp_type == 'morgan'):
fp_hyper_keys = MORGAN_HYPER_KEYS
elif (fp_type == 'atom_pair'):
fp_hyper_keys = PAIR_FP_HYPER_KEYS
fp_kwargs = {key: val for (key, val) in space.items() if (key in fp_hyper_keys)}
sample_data = make_sample_data(max_specs=max_specs, data=data, props=props, seed=seed)
xy_dic = {}
for name in ['train', 'val', 'test']:
(x, y) = make_mol_rep(data=sample_data, splits=[name], props=props, fp_type=fp_type, fp_kwargs=fp_kwargs)
xy_dic[name] = [x, y]
return xy_dic | def get_splits(space, data, props, max_specs=None, seed=None, fp_type='morgan'):
"\n Get representations and values of the data given a certain\n set of Morgan hyperparameters.\n Args:\n space (dict): hyperopt` space of hyperparameters\n data (dict): dictionary with data for each split\n props (list[str]): properties you'll want to predict with the model.\n max_specs (int, optional): maximum number of species to use in hyperopt\n seed (int, optional): seed to use if we take a subsample of the data\n fp_type (str, optional): type of fingerprint to use\n Returns:\n xy_dic (dict): dictionary of the form {split: [x, y]} for each split,\n where x and y are arrays of the input and output.\n "
if (fp_type == 'morgan'):
fp_hyper_keys = MORGAN_HYPER_KEYS
elif (fp_type == 'atom_pair'):
fp_hyper_keys = PAIR_FP_HYPER_KEYS
fp_kwargs = {key: val for (key, val) in space.items() if (key in fp_hyper_keys)}
sample_data = make_sample_data(max_specs=max_specs, data=data, props=props, seed=seed)
xy_dic = {}
for name in ['train', 'val', 'test']:
(x, y) = make_mol_rep(data=sample_data, splits=[name], props=props, fp_type=fp_type, fp_kwargs=fp_kwargs)
xy_dic[name] = [x, y]
return xy_dic<|docstring|>Get representations and values of the data given a certain
set of Morgan hyperparameters.
Args:
space (dict): hyperopt` space of hyperparameters
data (dict): dictionary with data for each split
props (list[str]): properties you'll want to predict with the model.
max_specs (int, optional): maximum number of species to use in hyperopt
seed (int, optional): seed to use if we take a subsample of the data
fp_type (str, optional): type of fingerprint to use
Returns:
xy_dic (dict): dictionary of the form {split: [x, y]} for each split,
where x and y are arrays of the input and output.<|endoftext|> |
156b60ae28cbe8d9e0a9f76a3548f8a45e3cccb58f78fcd6bba66ee7f6303fd3 | def balance_weights(y_train):
'\n Make balanced weights. This can apply to a classification\n model being fit by a classifier or by a regressor.\n Args:\n y_train (np.array): training labels\n Returns:\n sample_weight (np.array): weights for each \n item.\n '
pos_idx = (y_train == 1).nonzero()[0]
neg_idx = (y_train == 0).nonzero()[0]
num_pos = pos_idx.shape[0]
num_neg = neg_idx.shape[0]
sample_weight = np.ones_like(y_train).astype('float')
sample_weight[pos_idx] = (1 / (2 * num_pos))
sample_weight[neg_idx] = (1 / (2 * num_neg))
return sample_weight | Make balanced weights. This can apply to a classification
model being fit by a classifier or by a regressor.
Args:
y_train (np.array): training labels
Returns:
sample_weight (np.array): weights for each
item. | scripts/cp3d/sklearn/run.py | balance_weights | jkaraguesian/NeuralForceField | 0 | python | def balance_weights(y_train):
'\n Make balanced weights. This can apply to a classification\n model being fit by a classifier or by a regressor.\n Args:\n y_train (np.array): training labels\n Returns:\n sample_weight (np.array): weights for each \n item.\n '
pos_idx = (y_train == 1).nonzero()[0]
neg_idx = (y_train == 0).nonzero()[0]
num_pos = pos_idx.shape[0]
num_neg = neg_idx.shape[0]
sample_weight = np.ones_like(y_train).astype('float')
sample_weight[pos_idx] = (1 / (2 * num_pos))
sample_weight[neg_idx] = (1 / (2 * num_neg))
return sample_weight | def balance_weights(y_train):
'\n Make balanced weights. This can apply to a classification\n model being fit by a classifier or by a regressor.\n Args:\n y_train (np.array): training labels\n Returns:\n sample_weight (np.array): weights for each \n item.\n '
pos_idx = (y_train == 1).nonzero()[0]
neg_idx = (y_train == 0).nonzero()[0]
num_pos = pos_idx.shape[0]
num_neg = neg_idx.shape[0]
sample_weight = np.ones_like(y_train).astype('float')
sample_weight[pos_idx] = (1 / (2 * num_pos))
sample_weight[neg_idx] = (1 / (2 * num_neg))
return sample_weight<|docstring|>Make balanced weights. This can apply to a classification
model being fit by a classifier or by a regressor.
Args:
y_train (np.array): training labels
Returns:
sample_weight (np.array): weights for each
item.<|endoftext|> |
241131676c26d50acea34016c3671d52599c2aecfe6b950b73143d835329f63a | def run_sklearn(space, seed, model_type, classifier, x_train, y_train, x_test, y_test):
"\n Train an sklearn model.\n Args:\n space (dict): hyperopt` space of hyperparameters\n seed (int): random seed\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n x_train (np.array): input in training set\n y_train (np.array): output in training set\n x_test (np.array): input in test set\n y_test (np.array): output in test set\n Returns:\n pred_test (np.array): predicted test set values\n y_test (np.array): output in test set\n pred_fn (callable): trained regressor or classifier\n "
sk_hyperparams = {key: val for (key, val) in space.items() if (key not in MORGAN_HYPER_KEYS)}
if classifier:
if (model_type == 'random_forest'):
pref_fn = RandomForestClassifier(class_weight='balanced', random_state=seed, **sk_hyperparams)
else:
raise NotImplementedError
elif (model_type == 'random_forest'):
pref_fn = RandomForestRegressor(random_state=seed, **sk_hyperparams)
elif (model_type == 'kernel_ridge'):
pref_fn = KernelRidge(**sk_hyperparams)
else:
raise NotImplementedError
sample_weight = None
if all([(i in [0, 1]) for i in y_train]):
sample_weight = balance_weights(y_train)
pref_fn.fit(x_train, y_train, sample_weight=sample_weight)
pred_test = pref_fn.predict(x_test)
return (pred_test, y_test, pref_fn) | Train an sklearn model.
Args:
space (dict): hyperopt` space of hyperparameters
seed (int): random seed
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
x_train (np.array): input in training set
y_train (np.array): output in training set
x_test (np.array): input in test set
y_test (np.array): output in test set
Returns:
pred_test (np.array): predicted test set values
y_test (np.array): output in test set
pred_fn (callable): trained regressor or classifier | scripts/cp3d/sklearn/run.py | run_sklearn | jkaraguesian/NeuralForceField | 0 | python | def run_sklearn(space, seed, model_type, classifier, x_train, y_train, x_test, y_test):
"\n Train an sklearn model.\n Args:\n space (dict): hyperopt` space of hyperparameters\n seed (int): random seed\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n x_train (np.array): input in training set\n y_train (np.array): output in training set\n x_test (np.array): input in test set\n y_test (np.array): output in test set\n Returns:\n pred_test (np.array): predicted test set values\n y_test (np.array): output in test set\n pred_fn (callable): trained regressor or classifier\n "
sk_hyperparams = {key: val for (key, val) in space.items() if (key not in MORGAN_HYPER_KEYS)}
if classifier:
if (model_type == 'random_forest'):
pref_fn = RandomForestClassifier(class_weight='balanced', random_state=seed, **sk_hyperparams)
else:
raise NotImplementedError
elif (model_type == 'random_forest'):
pref_fn = RandomForestRegressor(random_state=seed, **sk_hyperparams)
elif (model_type == 'kernel_ridge'):
pref_fn = KernelRidge(**sk_hyperparams)
else:
raise NotImplementedError
sample_weight = None
if all([(i in [0, 1]) for i in y_train]):
sample_weight = balance_weights(y_train)
pref_fn.fit(x_train, y_train, sample_weight=sample_weight)
pred_test = pref_fn.predict(x_test)
return (pred_test, y_test, pref_fn) | def run_sklearn(space, seed, model_type, classifier, x_train, y_train, x_test, y_test):
"\n Train an sklearn model.\n Args:\n space (dict): hyperopt` space of hyperparameters\n seed (int): random seed\n model_type (str): name of model (e.g. random_forest)\n classifier (bool): whether or not it's a classifier\n x_train (np.array): input in training set\n y_train (np.array): output in training set\n x_test (np.array): input in test set\n y_test (np.array): output in test set\n Returns:\n pred_test (np.array): predicted test set values\n y_test (np.array): output in test set\n pred_fn (callable): trained regressor or classifier\n "
sk_hyperparams = {key: val for (key, val) in space.items() if (key not in MORGAN_HYPER_KEYS)}
if classifier:
if (model_type == 'random_forest'):
pref_fn = RandomForestClassifier(class_weight='balanced', random_state=seed, **sk_hyperparams)
else:
raise NotImplementedError
elif (model_type == 'random_forest'):
pref_fn = RandomForestRegressor(random_state=seed, **sk_hyperparams)
elif (model_type == 'kernel_ridge'):
pref_fn = KernelRidge(**sk_hyperparams)
else:
raise NotImplementedError
sample_weight = None
if all([(i in [0, 1]) for i in y_train]):
sample_weight = balance_weights(y_train)
pref_fn.fit(x_train, y_train, sample_weight=sample_weight)
pred_test = pref_fn.predict(x_test)
return (pred_test, y_test, pref_fn)<|docstring|>Train an sklearn model.
Args:
space (dict): hyperopt` space of hyperparameters
seed (int): random seed
model_type (str): name of model (e.g. random_forest)
classifier (bool): whether or not it's a classifier
x_train (np.array): input in training set
y_train (np.array): output in training set
x_test (np.array): input in test set
y_test (np.array): output in test set
Returns:
pred_test (np.array): predicted test set values
y_test (np.array): output in test set
pred_fn (callable): trained regressor or classifier<|endoftext|> |
dc2a284acbc243a7306d09de989e3e296b8877649b513f2d36432e95e8f5cd90 | def get_metrics(pred, real, score_metrics, props):
'\n Get scores on various metrics.\n Args:\n pred (np.array): predicted values\n real (np.array): real values\n score_metrics (list[str]): metrics to use\n props (list[str]): properties being predicted.\n Returns:\n metric_scores (dict): dictionary of the form\n {prop: sub_dic} for each property, where sub_dic\n has the form {metric: score} for each metric.\n '
if (len(props) == 1):
pred = pred.reshape((- 1), 1)
real = real.reshape((- 1), 1)
metric_scores = {}
for (i, prop) in enumerate(props):
metric_scores[prop] = {}
for metric in score_metrics:
this_pred = pred[(:, i)]
this_real = real[(:, i)]
score = apply_metric(metric=metric, pred=this_pred, actual=this_real)
metric_scores[prop][metric] = float(score)
return metric_scores | Get scores on various metrics.
Args:
pred (np.array): predicted values
real (np.array): real values
score_metrics (list[str]): metrics to use
props (list[str]): properties being predicted.
Returns:
metric_scores (dict): dictionary of the form
{prop: sub_dic} for each property, where sub_dic
has the form {metric: score} for each metric. | scripts/cp3d/sklearn/run.py | get_metrics | jkaraguesian/NeuralForceField | 0 | python | def get_metrics(pred, real, score_metrics, props):
'\n Get scores on various metrics.\n Args:\n pred (np.array): predicted values\n real (np.array): real values\n score_metrics (list[str]): metrics to use\n props (list[str]): properties being predicted.\n Returns:\n metric_scores (dict): dictionary of the form\n {prop: sub_dic} for each property, where sub_dic\n has the form {metric: score} for each metric.\n '
if (len(props) == 1):
pred = pred.reshape((- 1), 1)
real = real.reshape((- 1), 1)
metric_scores = {}
for (i, prop) in enumerate(props):
metric_scores[prop] = {}
for metric in score_metrics:
this_pred = pred[(:, i)]
this_real = real[(:, i)]
score = apply_metric(metric=metric, pred=this_pred, actual=this_real)
metric_scores[prop][metric] = float(score)
return metric_scores | def get_metrics(pred, real, score_metrics, props):
'\n Get scores on various metrics.\n Args:\n pred (np.array): predicted values\n real (np.array): real values\n score_metrics (list[str]): metrics to use\n props (list[str]): properties being predicted.\n Returns:\n metric_scores (dict): dictionary of the form\n {prop: sub_dic} for each property, where sub_dic\n has the form {metric: score} for each metric.\n '
if (len(props) == 1):
pred = pred.reshape((- 1), 1)
real = real.reshape((- 1), 1)
metric_scores = {}
for (i, prop) in enumerate(props):
metric_scores[prop] = {}
for metric in score_metrics:
this_pred = pred[(:, i)]
this_real = real[(:, i)]
score = apply_metric(metric=metric, pred=this_pred, actual=this_real)
metric_scores[prop][metric] = float(score)
return metric_scores<|docstring|>Get scores on various metrics.
Args:
pred (np.array): predicted values
real (np.array): real values
score_metrics (list[str]): metrics to use
props (list[str]): properties being predicted.
Returns:
metric_scores (dict): dictionary of the form
{prop: sub_dic} for each property, where sub_dic
has the form {metric: score} for each metric.<|endoftext|> |
7d6e462b7ea3691a97e968c319a6ea585e6d3395dbf4c0cfbaa36395008b87a9 | def update_saved_scores(score_path, space, metrics):
'\n Update saved hyperparameter scores with new results.\n Args:\n score_path (str): path to JSON file with scores\n space (dict): hyperopt` space of hyperparameters\n metrics (dict): scores on various metrics.\n Returns:\n None\n '
if os.path.isfile(score_path):
with open(score_path, 'r') as f:
scores = json.load(f)
else:
scores = []
scores.append({**space, **metrics})
with open(score_path, 'w') as f:
json.dump(scores, f, indent=4, sort_keys=True) | Update saved hyperparameter scores with new results.
Args:
score_path (str): path to JSON file with scores
space (dict): hyperopt` space of hyperparameters
metrics (dict): scores on various metrics.
Returns:
None | scripts/cp3d/sklearn/run.py | update_saved_scores | jkaraguesian/NeuralForceField | 0 | python | def update_saved_scores(score_path, space, metrics):
'\n Update saved hyperparameter scores with new results.\n Args:\n score_path (str): path to JSON file with scores\n space (dict): hyperopt` space of hyperparameters\n metrics (dict): scores on various metrics.\n Returns:\n None\n '
if os.path.isfile(score_path):
with open(score_path, 'r') as f:
scores = json.load(f)
else:
scores = []
scores.append({**space, **metrics})
with open(score_path, 'w') as f:
json.dump(scores, f, indent=4, sort_keys=True) | def update_saved_scores(score_path, space, metrics):
'\n Update saved hyperparameter scores with new results.\n Args:\n score_path (str): path to JSON file with scores\n space (dict): hyperopt` space of hyperparameters\n metrics (dict): scores on various metrics.\n Returns:\n None\n '
if os.path.isfile(score_path):
with open(score_path, 'r') as f:
scores = json.load(f)
else:
scores = []
scores.append({**space, **metrics})
with open(score_path, 'w') as f:
json.dump(scores, f, indent=4, sort_keys=True)<|docstring|>Update saved hyperparameter scores with new results.
Args:
score_path (str): path to JSON file with scores
space (dict): hyperopt` space of hyperparameters
metrics (dict): scores on various metrics.
Returns:
None<|endoftext|> |
82f68e7593f20c01ef089e6056fb86627aae2ca1d88b53952b2cf9736c0dedc1 | def make_objective(data, metric_name, seed, classifier, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Make objective function for `hyperopt`.\n Args:\n data (dict): dictionary with data for each split\n metric_name (str): metric to optimize\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n hyper_score_path (str): path to JSON file to save hyperparameter\n scores.\n model_type (str): name of model type to be trained.\n props (list[str]): properties you'll want to predict with themodel.\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n objective (callable): objective function for use in `hyperopt`.\n "
hyperparams = get_hyperparams(model_type, classifier, custom_hyps, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
def objective(space):
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
space[key] = int(space[key])
if isinstance(hyperparams[key]['vals'][0], bool):
space[key] = bool(space[key])
xy_dic = get_splits(space=space, data=data, props=props, max_specs=max_specs, seed=seed, fp_type=fp_type)
(x_val, y_val) = xy_dic['val']
(x_train, y_train) = xy_dic['train']
(pred, real, _) = run_sklearn(space=space, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_val, y_test=y_val)
metrics = get_metrics(pred, real, [metric_name], props=props)
score = np.mean([metrics[prop][metric_name] for prop in props])
metric_obj = METRIC_DIC[convert_metric(metric_name)]
if (metric_obj == 'maximize'):
score *= (- 1)
update_saved_scores(hyper_score_path, space, metrics)
return score
return objective | Make objective function for `hyperopt`.
Args:
data (dict): dictionary with data for each split
metric_name (str): metric to optimize
seed (int): random seed
classifier (bool): whether the model is a classifier
hyper_score_path (str): path to JSON file to save hyperparameter
scores.
model_type (str): name of model type to be trained.
props (list[str]): properties you'll want to predict with themodel.
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
objective (callable): objective function for use in `hyperopt`. | scripts/cp3d/sklearn/run.py | make_objective | jkaraguesian/NeuralForceField | 0 | python | def make_objective(data, metric_name, seed, classifier, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Make objective function for `hyperopt`.\n Args:\n data (dict): dictionary with data for each split\n metric_name (str): metric to optimize\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n hyper_score_path (str): path to JSON file to save hyperparameter\n scores.\n model_type (str): name of model type to be trained.\n props (list[str]): properties you'll want to predict with themodel.\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n objective (callable): objective function for use in `hyperopt`.\n "
hyperparams = get_hyperparams(model_type, classifier, custom_hyps, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
def objective(space):
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
space[key] = int(space[key])
if isinstance(hyperparams[key]['vals'][0], bool):
space[key] = bool(space[key])
xy_dic = get_splits(space=space, data=data, props=props, max_specs=max_specs, seed=seed, fp_type=fp_type)
(x_val, y_val) = xy_dic['val']
(x_train, y_train) = xy_dic['train']
(pred, real, _) = run_sklearn(space=space, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_val, y_test=y_val)
metrics = get_metrics(pred, real, [metric_name], props=props)
score = np.mean([metrics[prop][metric_name] for prop in props])
metric_obj = METRIC_DIC[convert_metric(metric_name)]
if (metric_obj == 'maximize'):
score *= (- 1)
update_saved_scores(hyper_score_path, space, metrics)
return score
return objective | def make_objective(data, metric_name, seed, classifier, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Make objective function for `hyperopt`.\n Args:\n data (dict): dictionary with data for each split\n metric_name (str): metric to optimize\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n hyper_score_path (str): path to JSON file to save hyperparameter\n scores.\n model_type (str): name of model type to be trained.\n props (list[str]): properties you'll want to predict with themodel.\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n objective (callable): objective function for use in `hyperopt`.\n "
hyperparams = get_hyperparams(model_type, classifier, custom_hyps, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
def objective(space):
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
space[key] = int(space[key])
if isinstance(hyperparams[key]['vals'][0], bool):
space[key] = bool(space[key])
xy_dic = get_splits(space=space, data=data, props=props, max_specs=max_specs, seed=seed, fp_type=fp_type)
(x_val, y_val) = xy_dic['val']
(x_train, y_train) = xy_dic['train']
(pred, real, _) = run_sklearn(space=space, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_val, y_test=y_val)
metrics = get_metrics(pred, real, [metric_name], props=props)
score = np.mean([metrics[prop][metric_name] for prop in props])
metric_obj = METRIC_DIC[convert_metric(metric_name)]
if (metric_obj == 'maximize'):
score *= (- 1)
update_saved_scores(hyper_score_path, space, metrics)
return score
return objective<|docstring|>Make objective function for `hyperopt`.
Args:
data (dict): dictionary with data for each split
metric_name (str): metric to optimize
seed (int): random seed
classifier (bool): whether the model is a classifier
hyper_score_path (str): path to JSON file to save hyperparameter
scores.
model_type (str): name of model type to be trained.
props (list[str]): properties you'll want to predict with themodel.
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
objective (callable): objective function for use in `hyperopt`.<|endoftext|> |
9ee4e61acefa133034794f17a5b703c299f0f3c47512eec54b929391e2a2ecfa | def translate_best_params(best_params, model_type, classifier, fp_type='morgan'):
'\n Translate the hyperparameters outputted by hyperopt.\n Args:\n best_params (dict): parameters outputted by hyperopt\n model_type (str): name of model type to be trained.\n classifier (bool): whether the model is a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n translate_params (dict): translated parameters\n '
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
translate_params = copy.deepcopy(best_params)
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
translate_params[key] = int(best_params[key])
if (typ == 'categorical'):
translate_params[key] = hyperparams[key]['vals'][best_params[key]]
if (type(hyperparams[key]['vals'][0]) is bool):
translate_params[key] = bool(best_params[key])
return translate_params | Translate the hyperparameters outputted by hyperopt.
Args:
best_params (dict): parameters outputted by hyperopt
model_type (str): name of model type to be trained.
classifier (bool): whether the model is a classifier
fp_type (str, optional): type of fingerprint to use
Returns:
translate_params (dict): translated parameters | scripts/cp3d/sklearn/run.py | translate_best_params | jkaraguesian/NeuralForceField | 0 | python | def translate_best_params(best_params, model_type, classifier, fp_type='morgan'):
'\n Translate the hyperparameters outputted by hyperopt.\n Args:\n best_params (dict): parameters outputted by hyperopt\n model_type (str): name of model type to be trained.\n classifier (bool): whether the model is a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n translate_params (dict): translated parameters\n '
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
translate_params = copy.deepcopy(best_params)
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
translate_params[key] = int(best_params[key])
if (typ == 'categorical'):
translate_params[key] = hyperparams[key]['vals'][best_params[key]]
if (type(hyperparams[key]['vals'][0]) is bool):
translate_params[key] = bool(best_params[key])
return translate_params | def translate_best_params(best_params, model_type, classifier, fp_type='morgan'):
'\n Translate the hyperparameters outputted by hyperopt.\n Args:\n best_params (dict): parameters outputted by hyperopt\n model_type (str): name of model type to be trained.\n classifier (bool): whether the model is a classifier\n fp_type (str, optional): type of fingerprint to use\n Returns:\n translate_params (dict): translated parameters\n '
hyperparams = get_hyperparams(model_type, classifier, fp_type=fp_type)
param_type_dic = {name: sub_dic['type'] for (name, sub_dic) in hyperparams.items()}
translate_params = copy.deepcopy(best_params)
for (key, typ) in param_type_dic.items():
if (typ == 'int'):
translate_params[key] = int(best_params[key])
if (typ == 'categorical'):
translate_params[key] = hyperparams[key]['vals'][best_params[key]]
if (type(hyperparams[key]['vals'][0]) is bool):
translate_params[key] = bool(best_params[key])
return translate_params<|docstring|>Translate the hyperparameters outputted by hyperopt.
Args:
best_params (dict): parameters outputted by hyperopt
model_type (str): name of model type to be trained.
classifier (bool): whether the model is a classifier
fp_type (str, optional): type of fingerprint to use
Returns:
translate_params (dict): translated parameters<|endoftext|> |
ef7d517ec4f2d8b7acca824e3e3e23944d0a6140a3173faca4d4fde54d0391bd | def get_preds(pred_fn, score_metrics, xy_dic, props):
'\n Get predictions and scores from a model.\n Args:\n pred_fn (callable): trained model\n score_metrics (list[str]): metrics to evaluate\n xy_dic (dict): dictionary of inputs and outputs for\n each split\n props (list[str]): properties to predict\n Returns:\n results (dict): dictionary of the form {prop: sub_dic}\n for each prop, where sub_dic has the form {split: \n metric_scores} for each split of the dataset.\n '
results = {prop: {} for prop in props}
for name in ['train', 'val', 'test']:
(x, real) = xy_dic[name]
pred = pred_fn.predict(x)
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
for prop in props:
results[prop][name] = {'true': real.tolist(), 'pred': pred.tolist(), **metrics[prop]}
return results | Get predictions and scores from a model.
Args:
pred_fn (callable): trained model
score_metrics (list[str]): metrics to evaluate
xy_dic (dict): dictionary of inputs and outputs for
each split
props (list[str]): properties to predict
Returns:
results (dict): dictionary of the form {prop: sub_dic}
for each prop, where sub_dic has the form {split:
metric_scores} for each split of the dataset. | scripts/cp3d/sklearn/run.py | get_preds | jkaraguesian/NeuralForceField | 0 | python | def get_preds(pred_fn, score_metrics, xy_dic, props):
'\n Get predictions and scores from a model.\n Args:\n pred_fn (callable): trained model\n score_metrics (list[str]): metrics to evaluate\n xy_dic (dict): dictionary of inputs and outputs for\n each split\n props (list[str]): properties to predict\n Returns:\n results (dict): dictionary of the form {prop: sub_dic}\n for each prop, where sub_dic has the form {split: \n metric_scores} for each split of the dataset.\n '
results = {prop: {} for prop in props}
for name in ['train', 'val', 'test']:
(x, real) = xy_dic[name]
pred = pred_fn.predict(x)
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
for prop in props:
results[prop][name] = {'true': real.tolist(), 'pred': pred.tolist(), **metrics[prop]}
return results | def get_preds(pred_fn, score_metrics, xy_dic, props):
'\n Get predictions and scores from a model.\n Args:\n pred_fn (callable): trained model\n score_metrics (list[str]): metrics to evaluate\n xy_dic (dict): dictionary of inputs and outputs for\n each split\n props (list[str]): properties to predict\n Returns:\n results (dict): dictionary of the form {prop: sub_dic}\n for each prop, where sub_dic has the form {split: \n metric_scores} for each split of the dataset.\n '
results = {prop: {} for prop in props}
for name in ['train', 'val', 'test']:
(x, real) = xy_dic[name]
pred = pred_fn.predict(x)
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
for prop in props:
results[prop][name] = {'true': real.tolist(), 'pred': pred.tolist(), **metrics[prop]}
return results<|docstring|>Get predictions and scores from a model.
Args:
pred_fn (callable): trained model
score_metrics (list[str]): metrics to evaluate
xy_dic (dict): dictionary of inputs and outputs for
each split
props (list[str]): properties to predict
Returns:
results (dict): dictionary of the form {prop: sub_dic}
for each prop, where sub_dic has the form {split:
metric_scores} for each split of the dataset.<|endoftext|> |
efc2c53da2f5a8c9dff274bdd9714faca2d0f2a47611e62e8e3e93284c537b5b | def save_preds(ensemble_preds, ensemble_scores, pred_save_path, score_save_path, pred_fns):
'\n Save predictions and models.\n Args:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n pred_fns (dict): Dictionary of fitted models for each seed\n\n Returns:\n None\n '
with open(score_save_path, 'w') as f:
json.dump(ensemble_scores, f, indent=4, sort_keys=True)
with open(pred_save_path, 'w') as f:
json.dump(ensemble_preds, f, indent=4, sort_keys=True)
model_save_path = pred_save_path.replace('.json', '_models.pickle')
with open(model_save_path, 'wb') as f:
pickle.dump(pred_fns, f)
print(f'Predictions saved to {pred_save_path}')
print(f'Scores saved to {score_save_path}')
print(f'Models saved to {model_save_path}') | Save predictions and models.
Args:
ensemble_preds (dict): predictions
ensemble_scores (dict): scores
pred_save_path (str): path to JSON file in which to save
predictions.
score_save_path (str): path to JSON file in which to save
scores.
pred_fns (dict): Dictionary of fitted models for each seed
Returns:
None | scripts/cp3d/sklearn/run.py | save_preds | jkaraguesian/NeuralForceField | 0 | python | def save_preds(ensemble_preds, ensemble_scores, pred_save_path, score_save_path, pred_fns):
'\n Save predictions and models.\n Args:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n pred_fns (dict): Dictionary of fitted models for each seed\n\n Returns:\n None\n '
with open(score_save_path, 'w') as f:
json.dump(ensemble_scores, f, indent=4, sort_keys=True)
with open(pred_save_path, 'w') as f:
json.dump(ensemble_preds, f, indent=4, sort_keys=True)
model_save_path = pred_save_path.replace('.json', '_models.pickle')
with open(model_save_path, 'wb') as f:
pickle.dump(pred_fns, f)
print(f'Predictions saved to {pred_save_path}')
print(f'Scores saved to {score_save_path}')
print(f'Models saved to {model_save_path}') | def save_preds(ensemble_preds, ensemble_scores, pred_save_path, score_save_path, pred_fns):
'\n Save predictions and models.\n Args:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n pred_fns (dict): Dictionary of fitted models for each seed\n\n Returns:\n None\n '
with open(score_save_path, 'w') as f:
json.dump(ensemble_scores, f, indent=4, sort_keys=True)
with open(pred_save_path, 'w') as f:
json.dump(ensemble_preds, f, indent=4, sort_keys=True)
model_save_path = pred_save_path.replace('.json', '_models.pickle')
with open(model_save_path, 'wb') as f:
pickle.dump(pred_fns, f)
print(f'Predictions saved to {pred_save_path}')
print(f'Scores saved to {score_save_path}')
print(f'Models saved to {model_save_path}')<|docstring|>Save predictions and models.
Args:
ensemble_preds (dict): predictions
ensemble_scores (dict): scores
pred_save_path (str): path to JSON file in which to save
predictions.
score_save_path (str): path to JSON file in which to save
scores.
pred_fns (dict): Dictionary of fitted models for each seed
Returns:
None<|endoftext|> |
d71ac2f0072aa63e6ac641537ba1875a30211cda1e5fb3054381f82101f3e98d | def get_or_load_hypers(hyper_save_path, rerun_hyper, data, hyper_metric, seed, classifier, num_samples, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Optimize hyperparameters or load hyperparameters if\n they've already been otpimized.\n Args:\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n data (dict): dictionary with data for each split\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n num_samples (int): number of hyperparameter combinations to try\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n translate_params (dict): translated version of the best hyperparameters\n "
if (os.path.isfile(hyper_save_path) and (not rerun_hyper)):
with open(hyper_save_path, 'r') as f:
translate_params = json.load(f)
else:
objective = make_objective(data=data, metric_name=hyper_metric, seed=seed, classifier=classifier, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
space = make_space(model_type, classifier, fp_type)
best_params = fmin(objective, space, algo=tpe.suggest, max_evals=num_samples, rstate=np.random.RandomState(seed))
translate_params = translate_best_params(best_params=best_params, model_type=model_type, classifier=classifier, fp_type=fp_type)
with open(hyper_save_path, 'w') as f:
json.dump(translate_params, f, indent=4, sort_keys=True)
print('\n')
print(f'Best parameters: {translate_params}')
return translate_params | Optimize hyperparameters or load hyperparameters if
they've already been otpimized.
Args:
hyper_save_path (str): path to best hyperparameters
rerun_hyper (bool): rerun the hyperparameter optimization
even if `hyper_save_path` exists.
data (dict): dictionary with data for each split
hyper_metric (str): metric to use for optimizing hyperparameters
seed (int): random seed
classifier (bool): whether the model is a classifier
num_samples (int): number of hyperparameter combinations to try
hyper_score_path (str): path to scores of different
hyperparameter combinations.
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
translate_params (dict): translated version of the best hyperparameters | scripts/cp3d/sklearn/run.py | get_or_load_hypers | jkaraguesian/NeuralForceField | 0 | python | def get_or_load_hypers(hyper_save_path, rerun_hyper, data, hyper_metric, seed, classifier, num_samples, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Optimize hyperparameters or load hyperparameters if\n they've already been otpimized.\n Args:\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n data (dict): dictionary with data for each split\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n num_samples (int): number of hyperparameter combinations to try\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n translate_params (dict): translated version of the best hyperparameters\n "
if (os.path.isfile(hyper_save_path) and (not rerun_hyper)):
with open(hyper_save_path, 'r') as f:
translate_params = json.load(f)
else:
objective = make_objective(data=data, metric_name=hyper_metric, seed=seed, classifier=classifier, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
space = make_space(model_type, classifier, fp_type)
best_params = fmin(objective, space, algo=tpe.suggest, max_evals=num_samples, rstate=np.random.RandomState(seed))
translate_params = translate_best_params(best_params=best_params, model_type=model_type, classifier=classifier, fp_type=fp_type)
with open(hyper_save_path, 'w') as f:
json.dump(translate_params, f, indent=4, sort_keys=True)
print('\n')
print(f'Best parameters: {translate_params}')
return translate_params | def get_or_load_hypers(hyper_save_path, rerun_hyper, data, hyper_metric, seed, classifier, num_samples, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type='morgan'):
"\n Optimize hyperparameters or load hyperparameters if\n they've already been otpimized.\n Args:\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n data (dict): dictionary with data for each split\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n classifier (bool): whether the model is a classifier\n num_samples (int): number of hyperparameter combinations to try\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n\n Returns:\n translate_params (dict): translated version of the best hyperparameters\n "
if (os.path.isfile(hyper_save_path) and (not rerun_hyper)):
with open(hyper_save_path, 'r') as f:
translate_params = json.load(f)
else:
objective = make_objective(data=data, metric_name=hyper_metric, seed=seed, classifier=classifier, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
space = make_space(model_type, classifier, fp_type)
best_params = fmin(objective, space, algo=tpe.suggest, max_evals=num_samples, rstate=np.random.RandomState(seed))
translate_params = translate_best_params(best_params=best_params, model_type=model_type, classifier=classifier, fp_type=fp_type)
with open(hyper_save_path, 'w') as f:
json.dump(translate_params, f, indent=4, sort_keys=True)
print('\n')
print(f'Best parameters: {translate_params}')
return translate_params<|docstring|>Optimize hyperparameters or load hyperparameters if
they've already been otpimized.
Args:
hyper_save_path (str): path to best hyperparameters
rerun_hyper (bool): rerun the hyperparameter optimization
even if `hyper_save_path` exists.
data (dict): dictionary with data for each split
hyper_metric (str): metric to use for optimizing hyperparameters
seed (int): random seed
classifier (bool): whether the model is a classifier
num_samples (int): number of hyperparameter combinations to try
hyper_score_path (str): path to scores of different
hyperparameter combinations.
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
translate_params (dict): translated version of the best hyperparameters<|endoftext|> |
c119f0338cc1eca69687b684a666c1d3793cf81d9926cc5c0bc75a59f2a30a87 | def get_ensemble_preds(test_folds, translate_params, data, classifier, score_metrics, model_type, props, fp_type='morgan'):
"\n Get ensemble-averaged predictions from a model.\n Args:\n test_folds (int): number of different models to train\n and evaluate on the test set\n translate_params (dict): best hyperparameters\n data (dict): dictionary with data for each split\n classifier (bool): whether the model is a classifier\n score_metrics (list[str]): metrics to apply to the test set\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n fp_type (str, optional): type of fingerprint to use\n Returns:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_fns (dict): Dictionary of fitted models for each seed\n "
ensemble_preds = {}
ensemble_scores = {}
splits = ['train', 'val', 'test']
xy_dic = get_splits(space=translate_params, data=data, props=props, fp_type=fp_type)
(x_train, y_train) = xy_dic['train']
(x_test, y_test) = xy_dic['test']
pred_fns = {}
for seed in range(test_folds):
(pred, real, pred_fn) = run_sklearn(translate_params, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test)
pred_fns[seed] = pred_fn
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
print(f'Fold {seed} test scores: {metrics}')
results = get_preds(pred_fn=pred_fn, score_metrics=score_metrics, xy_dic=xy_dic, props=props)
these_preds = {prop: {} for prop in props}
these_scores = {prop: {} for prop in props}
for prop in props:
for split in splits:
these_results = results[prop][split]
these_scores[prop].update({split: {key: val for (key, val) in these_results.items() if (key not in ['true', 'pred'])}})
these_preds[prop].update({split: {key: val for (key, val) in these_results.items() if (key in ['true', 'pred'])}})
ensemble_preds[str(seed)] = these_preds
ensemble_scores[str(seed)] = these_scores
avg = {prop: {split: {} for split in splits} for prop in props}
for prop in props:
for split in splits:
score_dics = [sub_dic[prop][split] for sub_dic in ensemble_scores.values()]
for key in score_metrics:
all_vals = [score_dic[key] for score_dic in score_dics]
mean = np.mean(all_vals)
std = np.std(all_vals)
avg[prop][split][key] = {'mean': mean, 'std': std}
ensemble_scores['average'] = avg
return (ensemble_preds, ensemble_scores, pred_fns) | Get ensemble-averaged predictions from a model.
Args:
test_folds (int): number of different models to train
and evaluate on the test set
translate_params (dict): best hyperparameters
data (dict): dictionary with data for each split
classifier (bool): whether the model is a classifier
score_metrics (list[str]): metrics to apply to the test set
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
fp_type (str, optional): type of fingerprint to use
Returns:
ensemble_preds (dict): predictions
ensemble_scores (dict): scores
pred_fns (dict): Dictionary of fitted models for each seed | scripts/cp3d/sklearn/run.py | get_ensemble_preds | jkaraguesian/NeuralForceField | 0 | python | def get_ensemble_preds(test_folds, translate_params, data, classifier, score_metrics, model_type, props, fp_type='morgan'):
"\n Get ensemble-averaged predictions from a model.\n Args:\n test_folds (int): number of different models to train\n and evaluate on the test set\n translate_params (dict): best hyperparameters\n data (dict): dictionary with data for each split\n classifier (bool): whether the model is a classifier\n score_metrics (list[str]): metrics to apply to the test set\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n fp_type (str, optional): type of fingerprint to use\n Returns:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_fns (dict): Dictionary of fitted models for each seed\n "
ensemble_preds = {}
ensemble_scores = {}
splits = ['train', 'val', 'test']
xy_dic = get_splits(space=translate_params, data=data, props=props, fp_type=fp_type)
(x_train, y_train) = xy_dic['train']
(x_test, y_test) = xy_dic['test']
pred_fns = {}
for seed in range(test_folds):
(pred, real, pred_fn) = run_sklearn(translate_params, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test)
pred_fns[seed] = pred_fn
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
print(f'Fold {seed} test scores: {metrics}')
results = get_preds(pred_fn=pred_fn, score_metrics=score_metrics, xy_dic=xy_dic, props=props)
these_preds = {prop: {} for prop in props}
these_scores = {prop: {} for prop in props}
for prop in props:
for split in splits:
these_results = results[prop][split]
these_scores[prop].update({split: {key: val for (key, val) in these_results.items() if (key not in ['true', 'pred'])}})
these_preds[prop].update({split: {key: val for (key, val) in these_results.items() if (key in ['true', 'pred'])}})
ensemble_preds[str(seed)] = these_preds
ensemble_scores[str(seed)] = these_scores
avg = {prop: {split: {} for split in splits} for prop in props}
for prop in props:
for split in splits:
score_dics = [sub_dic[prop][split] for sub_dic in ensemble_scores.values()]
for key in score_metrics:
all_vals = [score_dic[key] for score_dic in score_dics]
mean = np.mean(all_vals)
std = np.std(all_vals)
avg[prop][split][key] = {'mean': mean, 'std': std}
ensemble_scores['average'] = avg
return (ensemble_preds, ensemble_scores, pred_fns) | def get_ensemble_preds(test_folds, translate_params, data, classifier, score_metrics, model_type, props, fp_type='morgan'):
"\n Get ensemble-averaged predictions from a model.\n Args:\n test_folds (int): number of different models to train\n and evaluate on the test set\n translate_params (dict): best hyperparameters\n data (dict): dictionary with data for each split\n classifier (bool): whether the model is a classifier\n score_metrics (list[str]): metrics to apply to the test set\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n fp_type (str, optional): type of fingerprint to use\n Returns:\n ensemble_preds (dict): predictions\n ensemble_scores (dict): scores\n pred_fns (dict): Dictionary of fitted models for each seed\n "
ensemble_preds = {}
ensemble_scores = {}
splits = ['train', 'val', 'test']
xy_dic = get_splits(space=translate_params, data=data, props=props, fp_type=fp_type)
(x_train, y_train) = xy_dic['train']
(x_test, y_test) = xy_dic['test']
pred_fns = {}
for seed in range(test_folds):
(pred, real, pred_fn) = run_sklearn(translate_params, seed=seed, model_type=model_type, classifier=classifier, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test)
pred_fns[seed] = pred_fn
metrics = get_metrics(pred=pred, real=real, score_metrics=score_metrics, props=props)
print(f'Fold {seed} test scores: {metrics}')
results = get_preds(pred_fn=pred_fn, score_metrics=score_metrics, xy_dic=xy_dic, props=props)
these_preds = {prop: {} for prop in props}
these_scores = {prop: {} for prop in props}
for prop in props:
for split in splits:
these_results = results[prop][split]
these_scores[prop].update({split: {key: val for (key, val) in these_results.items() if (key not in ['true', 'pred'])}})
these_preds[prop].update({split: {key: val for (key, val) in these_results.items() if (key in ['true', 'pred'])}})
ensemble_preds[str(seed)] = these_preds
ensemble_scores[str(seed)] = these_scores
avg = {prop: {split: {} for split in splits} for prop in props}
for prop in props:
for split in splits:
score_dics = [sub_dic[prop][split] for sub_dic in ensemble_scores.values()]
for key in score_metrics:
all_vals = [score_dic[key] for score_dic in score_dics]
mean = np.mean(all_vals)
std = np.std(all_vals)
avg[prop][split][key] = {'mean': mean, 'std': std}
ensemble_scores['average'] = avg
return (ensemble_preds, ensemble_scores, pred_fns)<|docstring|>Get ensemble-averaged predictions from a model.
Args:
test_folds (int): number of different models to train
and evaluate on the test set
translate_params (dict): best hyperparameters
data (dict): dictionary with data for each split
classifier (bool): whether the model is a classifier
score_metrics (list[str]): metrics to apply to the test set
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
fp_type (str, optional): type of fingerprint to use
Returns:
ensemble_preds (dict): predictions
ensemble_scores (dict): scores
pred_fns (dict): Dictionary of fitted models for each seed<|endoftext|> |
6a2328a305b8b0de48c5152429ab09c90e64ee6883df604a44a403ea663caf96 | def hyper_and_train(train_path, val_path, test_path, pred_save_path, score_save_path, num_samples, hyper_metric, seed, score_metrics, hyper_save_path, rerun_hyper, classifier, test_folds, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type, **kwargs):
"\n Run hyperparameter optimization and train an ensemble of models.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n num_samples (int): number of hyperparameter combinations to try\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n score_metrics (list[str]): metrics to apply to the test set\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n classifier (bool): whether the model is a classifier\n test_folds (int): number of different models to train\n and evaluate on the test set\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n None\n\n "
data = load_data(train_path, val_path, test_path)
translate_params = get_or_load_hypers(hyper_save_path=hyper_save_path, rerun_hyper=rerun_hyper, data=data, hyper_metric=hyper_metric, seed=seed, classifier=classifier, num_samples=num_samples, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
(ensemble_preds, ensemble_scores, pred_fns) = get_ensemble_preds(test_folds=test_folds, translate_params=translate_params, data=data, classifier=classifier, score_metrics=score_metrics, model_type=model_type, props=props, fp_type=fp_type)
save_preds(ensemble_preds=ensemble_preds, ensemble_scores=ensemble_scores, pred_save_path=pred_save_path, score_save_path=score_save_path, pred_fns=pred_fns) | Run hyperparameter optimization and train an ensemble of models.
Args:
train_path (str): path to csv with training data
val_path (str): path to csv with validation data
test_path (str): path to csv with test data
pred_save_path (str): path to JSON file in which to save
predictions.
score_save_path (str): path to JSON file in which to save
scores.
num_samples (int): number of hyperparameter combinations to try
hyper_metric (str): metric to use for optimizing hyperparameters
seed (int): random seed
score_metrics (list[str]): metrics to apply to the test set
hyper_save_path (str): path to best hyperparameters
rerun_hyper (bool): rerun the hyperparameter optimization
even if `hyper_save_path` exists.
classifier (bool): whether the model is a classifier
test_folds (int): number of different models to train
and evaluate on the test set
hyper_score_path (str): path to scores of different
hyperparameter combinations.
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
None | scripts/cp3d/sklearn/run.py | hyper_and_train | jkaraguesian/NeuralForceField | 0 | python | def hyper_and_train(train_path, val_path, test_path, pred_save_path, score_save_path, num_samples, hyper_metric, seed, score_metrics, hyper_save_path, rerun_hyper, classifier, test_folds, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type, **kwargs):
"\n Run hyperparameter optimization and train an ensemble of models.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n num_samples (int): number of hyperparameter combinations to try\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n score_metrics (list[str]): metrics to apply to the test set\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n classifier (bool): whether the model is a classifier\n test_folds (int): number of different models to train\n and evaluate on the test set\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n None\n\n "
data = load_data(train_path, val_path, test_path)
translate_params = get_or_load_hypers(hyper_save_path=hyper_save_path, rerun_hyper=rerun_hyper, data=data, hyper_metric=hyper_metric, seed=seed, classifier=classifier, num_samples=num_samples, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
(ensemble_preds, ensemble_scores, pred_fns) = get_ensemble_preds(test_folds=test_folds, translate_params=translate_params, data=data, classifier=classifier, score_metrics=score_metrics, model_type=model_type, props=props, fp_type=fp_type)
save_preds(ensemble_preds=ensemble_preds, ensemble_scores=ensemble_scores, pred_save_path=pred_save_path, score_save_path=score_save_path, pred_fns=pred_fns) | def hyper_and_train(train_path, val_path, test_path, pred_save_path, score_save_path, num_samples, hyper_metric, seed, score_metrics, hyper_save_path, rerun_hyper, classifier, test_folds, hyper_score_path, model_type, props, max_specs, custom_hyps, fp_type, **kwargs):
"\n Run hyperparameter optimization and train an ensemble of models.\n Args:\n train_path (str): path to csv with training data\n val_path (str): path to csv with validation data\n test_path (str): path to csv with test data\n pred_save_path (str): path to JSON file in which to save\n predictions.\n score_save_path (str): path to JSON file in which to save\n scores.\n num_samples (int): number of hyperparameter combinations to try\n hyper_metric (str): metric to use for optimizing hyperparameters\n seed (int): random seed\n score_metrics (list[str]): metrics to apply to the test set\n hyper_save_path (str): path to best hyperparameters\n rerun_hyper (bool): rerun the hyperparameter optimization\n even if `hyper_save_path` exists.\n classifier (bool): whether the model is a classifier\n test_folds (int): number of different models to train\n and evaluate on the test set\n hyper_score_path (str): path to scores of different\n hyperparameter combinations.\n model_type (str): name of model type to be trained\n props (list[str]): properties you'll want to predict with the model\n max_specs (int): maximum number of species to use in hyperopt\n custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}\n for each hyperparameter, where `new_vals` is the range you want\n for each.\n fp_type (str, optional): type of fingerprint to use\n Returns:\n None\n\n "
data = load_data(train_path, val_path, test_path)
translate_params = get_or_load_hypers(hyper_save_path=hyper_save_path, rerun_hyper=rerun_hyper, data=data, hyper_metric=hyper_metric, seed=seed, classifier=classifier, num_samples=num_samples, hyper_score_path=hyper_score_path, model_type=model_type, props=props, max_specs=max_specs, custom_hyps=custom_hyps, fp_type=fp_type)
(ensemble_preds, ensemble_scores, pred_fns) = get_ensemble_preds(test_folds=test_folds, translate_params=translate_params, data=data, classifier=classifier, score_metrics=score_metrics, model_type=model_type, props=props, fp_type=fp_type)
save_preds(ensemble_preds=ensemble_preds, ensemble_scores=ensemble_scores, pred_save_path=pred_save_path, score_save_path=score_save_path, pred_fns=pred_fns)<|docstring|>Run hyperparameter optimization and train an ensemble of models.
Args:
train_path (str): path to csv with training data
val_path (str): path to csv with validation data
test_path (str): path to csv with test data
pred_save_path (str): path to JSON file in which to save
predictions.
score_save_path (str): path to JSON file in which to save
scores.
num_samples (int): number of hyperparameter combinations to try
hyper_metric (str): metric to use for optimizing hyperparameters
seed (int): random seed
score_metrics (list[str]): metrics to apply to the test set
hyper_save_path (str): path to best hyperparameters
rerun_hyper (bool): rerun the hyperparameter optimization
even if `hyper_save_path` exists.
classifier (bool): whether the model is a classifier
test_folds (int): number of different models to train
and evaluate on the test set
hyper_score_path (str): path to scores of different
hyperparameter combinations.
model_type (str): name of model type to be trained
props (list[str]): properties you'll want to predict with the model
max_specs (int): maximum number of species to use in hyperopt
custom_hyps (dict): Dictionary of the form {hyperparam: new_vals}
for each hyperparameter, where `new_vals` is the range you want
for each.
fp_type (str, optional): type of fingerprint to use
Returns:
None<|endoftext|> |
1c57fa41d0ba515ab1c106c889ecf9e2babcc620a167ed7b13b891f5b36ec854 | def firstUniqChar(s):
'\n :type s: str\n :rtype: int\n '
mapping = {}
for x in s:
if (x not in mapping):
mapping[x] = 1
else:
mapping[x] += 1
for i in range(len(s)):
x = s[i]
if (mapping[x] == 1):
return i
return (- 1) | :type s: str
:rtype: int | Amazon/FirstUniqueCharacter.py | firstUniqChar | roeiherz/CodingInterviews | 0 | python | def firstUniqChar(s):
'\n :type s: str\n :rtype: int\n '
mapping = {}
for x in s:
if (x not in mapping):
mapping[x] = 1
else:
mapping[x] += 1
for i in range(len(s)):
x = s[i]
if (mapping[x] == 1):
return i
return (- 1) | def firstUniqChar(s):
'\n :type s: str\n :rtype: int\n '
mapping = {}
for x in s:
if (x not in mapping):
mapping[x] = 1
else:
mapping[x] += 1
for i in range(len(s)):
x = s[i]
if (mapping[x] == 1):
return i
return (- 1)<|docstring|>:type s: str
:rtype: int<|endoftext|> |
b5cb8072a8d34ec62fbb6d7c601421bc26e6b3502cdb93c2b75769a04150bb09 | def parse_name(name, from_i, to_i, mapping=None):
'Source: https://audeering.github.io/audformat/emodb-example.html'
key = name[from_i:to_i]
return (mapping[key] if mapping else key) | Source: https://audeering.github.io/audformat/emodb-example.html | tensorflow_datasets/aesdd/aesdd.py | parse_name | Neclow/SERAB | 10 | python | def parse_name(name, from_i, to_i, mapping=None):
key = name[from_i:to_i]
return (mapping[key] if mapping else key) | def parse_name(name, from_i, to_i, mapping=None):
key = name[from_i:to_i]
return (mapping[key] if mapping else key)<|docstring|>Source: https://audeering.github.io/audformat/emodb-example.html<|endoftext|> |
6fbc0beaecb972fbc4575a52ceae1b1c40ef4432593915e3b95ee54d03f2a4f3 | def _compute_split_boundaries(split_probs, n_items):
"Computes boundary indices for each of the splits in split_probs.\n Args:\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n n_items: Number of items we want to split.\n Returns:\n The item indices of boundaries between different splits. For the above\n example and n_items=100, these will be\n [('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)].\n "
if (len(split_probs) > n_items):
raise ValueError('Not enough items for the splits. There are {splits} splits while there are only {items} items'.format(splits=len(split_probs), items=n_items))
total_probs = sum((p for (name, p) in split_probs))
if (abs((1 - total_probs)) > 1e-08):
raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs))
split_boundaries = []
sum_p = 0.0
for (name, p) in split_probs:
prev = sum_p
sum_p += p
split_boundaries.append((name, int((prev * n_items)), int((sum_p * n_items))))
split_boundaries[(- 1)] = (split_boundaries[(- 1)][0], split_boundaries[(- 1)][1], n_items)
return split_boundaries | Computes boundary indices for each of the splits in split_probs.
Args:
split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),
('test', 0.2)]
n_items: Number of items we want to split.
Returns:
The item indices of boundaries between different splits. For the above
example and n_items=100, these will be
[('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)]. | tensorflow_datasets/aesdd/aesdd.py | _compute_split_boundaries | Neclow/SERAB | 10 | python | def _compute_split_boundaries(split_probs, n_items):
"Computes boundary indices for each of the splits in split_probs.\n Args:\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n n_items: Number of items we want to split.\n Returns:\n The item indices of boundaries between different splits. For the above\n example and n_items=100, these will be\n [('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)].\n "
if (len(split_probs) > n_items):
raise ValueError('Not enough items for the splits. There are {splits} splits while there are only {items} items'.format(splits=len(split_probs), items=n_items))
total_probs = sum((p for (name, p) in split_probs))
if (abs((1 - total_probs)) > 1e-08):
raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs))
split_boundaries = []
sum_p = 0.0
for (name, p) in split_probs:
prev = sum_p
sum_p += p
split_boundaries.append((name, int((prev * n_items)), int((sum_p * n_items))))
split_boundaries[(- 1)] = (split_boundaries[(- 1)][0], split_boundaries[(- 1)][1], n_items)
return split_boundaries | def _compute_split_boundaries(split_probs, n_items):
"Computes boundary indices for each of the splits in split_probs.\n Args:\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n n_items: Number of items we want to split.\n Returns:\n The item indices of boundaries between different splits. For the above\n example and n_items=100, these will be\n [('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)].\n "
if (len(split_probs) > n_items):
raise ValueError('Not enough items for the splits. There are {splits} splits while there are only {items} items'.format(splits=len(split_probs), items=n_items))
total_probs = sum((p for (name, p) in split_probs))
if (abs((1 - total_probs)) > 1e-08):
raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs))
split_boundaries = []
sum_p = 0.0
for (name, p) in split_probs:
prev = sum_p
sum_p += p
split_boundaries.append((name, int((prev * n_items)), int((sum_p * n_items))))
split_boundaries[(- 1)] = (split_boundaries[(- 1)][0], split_boundaries[(- 1)][1], n_items)
return split_boundaries<|docstring|>Computes boundary indices for each of the splits in split_probs.
Args:
split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),
('test', 0.2)]
n_items: Number of items we want to split.
Returns:
The item indices of boundaries between different splits. For the above
example and n_items=100, these will be
[('train', 0, 60), ('dev', 60, 80), ('test', 80, 100)].<|endoftext|> |
41f7db59d75db22c8a4b6f153a5692b878609250e6ce8c295a64988331df2cbc | def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):
"Split items to train/dev/test, so all items in group go into same split.\n Each group contains all the samples from the same speaker ID. The samples are\n splitted so that all each speaker belongs to exactly one split.\n Args:\n items_and_groups: Sequence of (item_id, group_id) pairs.\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n split_number: Generated splits should change with split_number.\n Returns:\n Dictionary that looks like {split name -> set(ids)}.\n "
groups = sorted(set((group_id for (item_id, group_id) in items_and_groups)))
rng = np.random.RandomState(split_number)
rng.shuffle(groups)
split_boundaries = _compute_split_boundaries(split_probs, len(groups))
group_id_to_split = {}
for (split_name, i_start, i_end) in split_boundaries:
for i in range(i_start, i_end):
group_id_to_split[groups[i]] = split_name
split_to_ids = collections.defaultdict(set)
for (item_id, group_id) in items_and_groups:
split = group_id_to_split[group_id]
split_to_ids[split].add(item_id)
return split_to_ids | Split items to train/dev/test, so all items in group go into same split.
Each group contains all the samples from the same speaker ID. The samples are
splitted so that all each speaker belongs to exactly one split.
Args:
items_and_groups: Sequence of (item_id, group_id) pairs.
split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),
('test', 0.2)]
split_number: Generated splits should change with split_number.
Returns:
Dictionary that looks like {split name -> set(ids)}. | tensorflow_datasets/aesdd/aesdd.py | _get_inter_splits_by_group | Neclow/SERAB | 10 | python | def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):
"Split items to train/dev/test, so all items in group go into same split.\n Each group contains all the samples from the same speaker ID. The samples are\n splitted so that all each speaker belongs to exactly one split.\n Args:\n items_and_groups: Sequence of (item_id, group_id) pairs.\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n split_number: Generated splits should change with split_number.\n Returns:\n Dictionary that looks like {split name -> set(ids)}.\n "
groups = sorted(set((group_id for (item_id, group_id) in items_and_groups)))
rng = np.random.RandomState(split_number)
rng.shuffle(groups)
split_boundaries = _compute_split_boundaries(split_probs, len(groups))
group_id_to_split = {}
for (split_name, i_start, i_end) in split_boundaries:
for i in range(i_start, i_end):
group_id_to_split[groups[i]] = split_name
split_to_ids = collections.defaultdict(set)
for (item_id, group_id) in items_and_groups:
split = group_id_to_split[group_id]
split_to_ids[split].add(item_id)
return split_to_ids | def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):
"Split items to train/dev/test, so all items in group go into same split.\n Each group contains all the samples from the same speaker ID. The samples are\n splitted so that all each speaker belongs to exactly one split.\n Args:\n items_and_groups: Sequence of (item_id, group_id) pairs.\n split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),\n ('test', 0.2)]\n split_number: Generated splits should change with split_number.\n Returns:\n Dictionary that looks like {split name -> set(ids)}.\n "
groups = sorted(set((group_id for (item_id, group_id) in items_and_groups)))
rng = np.random.RandomState(split_number)
rng.shuffle(groups)
split_boundaries = _compute_split_boundaries(split_probs, len(groups))
group_id_to_split = {}
for (split_name, i_start, i_end) in split_boundaries:
for i in range(i_start, i_end):
group_id_to_split[groups[i]] = split_name
split_to_ids = collections.defaultdict(set)
for (item_id, group_id) in items_and_groups:
split = group_id_to_split[group_id]
split_to_ids[split].add(item_id)
return split_to_ids<|docstring|>Split items to train/dev/test, so all items in group go into same split.
Each group contains all the samples from the same speaker ID. The samples are
splitted so that all each speaker belongs to exactly one split.
Args:
items_and_groups: Sequence of (item_id, group_id) pairs.
split_probs: List of (split_name, prob), e.g. [('train', 0.6), ('dev', 0.2),
('test', 0.2)]
split_number: Generated splits should change with split_number.
Returns:
Dictionary that looks like {split name -> set(ids)}.<|endoftext|> |
0919d45dd14f7dadccd4533ad9830233fa1739871cd978a2411cca4431cb7d75 | def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'audio': tfds.features.Audio(file_format='wav', sample_rate=_SAMPLE_RATE), 'label': tfds.features.ClassLabel(names=_LABEL_MAP.values()), 'speaker_id': tf.string}), supervised_keys=('audio', 'label'), homepage=_HOMEPAGE, citation=_CITATION) | Returns the dataset metadata. | tensorflow_datasets/aesdd/aesdd.py | _info | Neclow/SERAB | 10 | python | def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'audio': tfds.features.Audio(file_format='wav', sample_rate=_SAMPLE_RATE), 'label': tfds.features.ClassLabel(names=_LABEL_MAP.values()), 'speaker_id': tf.string}), supervised_keys=('audio', 'label'), homepage=_HOMEPAGE, citation=_CITATION) | def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'audio': tfds.features.Audio(file_format='wav', sample_rate=_SAMPLE_RATE), 'label': tfds.features.ClassLabel(names=_LABEL_MAP.values()), 'speaker_id': tf.string}), supervised_keys=('audio', 'label'), homepage=_HOMEPAGE, citation=_CITATION)<|docstring|>Returns the dataset metadata.<|endoftext|> |
4c664c4b03d6208c8ddb78ee6609c3fa1d923b243cbb80820ee31f1288a2e933 | def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
zip_path = os.path.join(dl_manager.manual_dir, 'Acted Emotional Speech Dynamic Database.zip')
if (not tf.io.gfile.exists(zip_path)):
raise AssertionError('AESDD requires manual download of the data. Please download the audio data and place it into: {}'.format(zip_path))
extract_path = dl_manager.extract(zip_path)
items_and_groups = []
for fname in tf.io.gfile.glob('{}/*/*/*.wav'.format(extract_path)):
if ('s05 (3).wav' in fname):
continue
else:
speaker_id = parse_speaker_id(os.path.basename(fname))
items_and_groups.append((fname, speaker_id))
split_probs = [('train', 0.6), ('validation', 0.2), ('test', 0.2)]
splits = _get_inter_splits_by_group(items_and_groups, split_probs, 0)
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'file_names': splits['train']}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'file_names': splits['validation']}), tfds.core.SplitGenerator(name=tfds.Split.TEST, gen_kwargs={'file_names': splits['test']})] | Returns SplitGenerators. | tensorflow_datasets/aesdd/aesdd.py | _split_generators | Neclow/SERAB | 10 | python | def _split_generators(self, dl_manager: tfds.download.DownloadManager):
zip_path = os.path.join(dl_manager.manual_dir, 'Acted Emotional Speech Dynamic Database.zip')
if (not tf.io.gfile.exists(zip_path)):
raise AssertionError('AESDD requires manual download of the data. Please download the audio data and place it into: {}'.format(zip_path))
extract_path = dl_manager.extract(zip_path)
items_and_groups = []
for fname in tf.io.gfile.glob('{}/*/*/*.wav'.format(extract_path)):
if ('s05 (3).wav' in fname):
continue
else:
speaker_id = parse_speaker_id(os.path.basename(fname))
items_and_groups.append((fname, speaker_id))
split_probs = [('train', 0.6), ('validation', 0.2), ('test', 0.2)]
splits = _get_inter_splits_by_group(items_and_groups, split_probs, 0)
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'file_names': splits['train']}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'file_names': splits['validation']}), tfds.core.SplitGenerator(name=tfds.Split.TEST, gen_kwargs={'file_names': splits['test']})] | def _split_generators(self, dl_manager: tfds.download.DownloadManager):
zip_path = os.path.join(dl_manager.manual_dir, 'Acted Emotional Speech Dynamic Database.zip')
if (not tf.io.gfile.exists(zip_path)):
raise AssertionError('AESDD requires manual download of the data. Please download the audio data and place it into: {}'.format(zip_path))
extract_path = dl_manager.extract(zip_path)
items_and_groups = []
for fname in tf.io.gfile.glob('{}/*/*/*.wav'.format(extract_path)):
if ('s05 (3).wav' in fname):
continue
else:
speaker_id = parse_speaker_id(os.path.basename(fname))
items_and_groups.append((fname, speaker_id))
split_probs = [('train', 0.6), ('validation', 0.2), ('test', 0.2)]
splits = _get_inter_splits_by_group(items_and_groups, split_probs, 0)
return [tfds.core.SplitGenerator(name=tfds.Split.TRAIN, gen_kwargs={'file_names': splits['train']}), tfds.core.SplitGenerator(name=tfds.Split.VALIDATION, gen_kwargs={'file_names': splits['validation']}), tfds.core.SplitGenerator(name=tfds.Split.TEST, gen_kwargs={'file_names': splits['test']})]<|docstring|>Returns SplitGenerators.<|endoftext|> |
0675f33c16b41a62faa9bcd382a1ee4dd46f495ec92cac0511a991c2989061c9 | def _generate_examples(self, file_names):
'Yields examples.'
for fname in file_names:
wavname = os.path.basename(fname)
speaker_id = parse_speaker_id(wavname)
label = parse_name(wavname, from_i=0, to_i=1, mapping=_LABEL_MAP)
example = {'audio': fname, 'label': label, 'speaker_id': speaker_id}
(yield (fname, example)) | Yields examples. | tensorflow_datasets/aesdd/aesdd.py | _generate_examples | Neclow/SERAB | 10 | python | def _generate_examples(self, file_names):
for fname in file_names:
wavname = os.path.basename(fname)
speaker_id = parse_speaker_id(wavname)
label = parse_name(wavname, from_i=0, to_i=1, mapping=_LABEL_MAP)
example = {'audio': fname, 'label': label, 'speaker_id': speaker_id}
(yield (fname, example)) | def _generate_examples(self, file_names):
for fname in file_names:
wavname = os.path.basename(fname)
speaker_id = parse_speaker_id(wavname)
label = parse_name(wavname, from_i=0, to_i=1, mapping=_LABEL_MAP)
example = {'audio': fname, 'label': label, 'speaker_id': speaker_id}
(yield (fname, example))<|docstring|>Yields examples.<|endoftext|> |
f21d718b2a0a439f72edd3fcf9dfb6dd196fcc043f518a6fa5b09d72f18c54d2 | def get(self, request, *args, **kwargs):
'\n Render media list\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n '
documents = Document.objects.all()
return render(request, self.template_name, {**self.base_context, 'documents': documents}) | Render media list
:param request: The current request
:type request: ~django.http.HttpResponse
:param args: The supplied arguments
:type args: list
:param kwargs: The supplied keyword arguments
:type kwargs: dict
:return: The rendered template response
:rtype: ~django.template.response.TemplateResponse | src/cms/views/media/media_list_view.py | get | mckinly/cms-django | 0 | python | def get(self, request, *args, **kwargs):
'\n Render media list\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n '
documents = Document.objects.all()
return render(request, self.template_name, {**self.base_context, 'documents': documents}) | def get(self, request, *args, **kwargs):
'\n Render media list\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n '
documents = Document.objects.all()
return render(request, self.template_name, {**self.base_context, 'documents': documents})<|docstring|>Render media list
:param request: The current request
:type request: ~django.http.HttpResponse
:param args: The supplied arguments
:type args: list
:param kwargs: The supplied keyword arguments
:type kwargs: dict
:return: The rendered template response
:rtype: ~django.template.response.TemplateResponse<|endoftext|> |
dc7899e02060ccd7ada54a510e1c30233ce3c66c040e51faf36ee00ca05b9840 | def get_tweets_from_screen_name(screen_name, credentials):
"\n Get the last 3240 tweets (maximum allowed by the API) from an user\n with given screen name.\n Adapted from https://gist.github.com/yanofsky/5436496\n\n Parameters:\n screen_name: str, the screen_name of the user (ex: @random_user becomes\n 'random_user')\n credentials: dic, contain the credentials for accessing twitter. Must\n include the keys 'consumer_API_key',\n 'consumer_API_secret_key', 'access_token', and\n 'access_secret_token'\n Returns:\n tweets: list, list of Status objects containing the last 3240 of given user\n "
auth = tweepy.OAuthHandler(credentials['consumer_API_key'], credentials['consumer_API_secret_key'])
auth.set_access_token(credentials['access_token'], credentials['access_secret_token'])
api = tweepy.API(auth)
tweets = []
while True:
try:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode='extended')
except:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, tweet_mode='extended')
if (len(new_tweet) == 0):
break
tweets.extend(new_tweet)
oldest = (tweets[(- 1)].id - 1)
return tweets | Get the last 3240 tweets (maximum allowed by the API) from an user
with given screen name.
Adapted from https://gist.github.com/yanofsky/5436496
Parameters:
screen_name: str, the screen_name of the user (ex: @random_user becomes
'random_user')
credentials: dic, contain the credentials for accessing twitter. Must
include the keys 'consumer_API_key',
'consumer_API_secret_key', 'access_token', and
'access_secret_token'
Returns:
tweets: list, list of Status objects containing the last 3240 of given user | utils.py | get_tweets_from_screen_name | delpapa/TweetGen | 0 | python | def get_tweets_from_screen_name(screen_name, credentials):
"\n Get the last 3240 tweets (maximum allowed by the API) from an user\n with given screen name.\n Adapted from https://gist.github.com/yanofsky/5436496\n\n Parameters:\n screen_name: str, the screen_name of the user (ex: @random_user becomes\n 'random_user')\n credentials: dic, contain the credentials for accessing twitter. Must\n include the keys 'consumer_API_key',\n 'consumer_API_secret_key', 'access_token', and\n 'access_secret_token'\n Returns:\n tweets: list, list of Status objects containing the last 3240 of given user\n "
auth = tweepy.OAuthHandler(credentials['consumer_API_key'], credentials['consumer_API_secret_key'])
auth.set_access_token(credentials['access_token'], credentials['access_secret_token'])
api = tweepy.API(auth)
tweets = []
while True:
try:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode='extended')
except:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, tweet_mode='extended')
if (len(new_tweet) == 0):
break
tweets.extend(new_tweet)
oldest = (tweets[(- 1)].id - 1)
return tweets | def get_tweets_from_screen_name(screen_name, credentials):
"\n Get the last 3240 tweets (maximum allowed by the API) from an user\n with given screen name.\n Adapted from https://gist.github.com/yanofsky/5436496\n\n Parameters:\n screen_name: str, the screen_name of the user (ex: @random_user becomes\n 'random_user')\n credentials: dic, contain the credentials for accessing twitter. Must\n include the keys 'consumer_API_key',\n 'consumer_API_secret_key', 'access_token', and\n 'access_secret_token'\n Returns:\n tweets: list, list of Status objects containing the last 3240 of given user\n "
auth = tweepy.OAuthHandler(credentials['consumer_API_key'], credentials['consumer_API_secret_key'])
auth.set_access_token(credentials['access_token'], credentials['access_secret_token'])
api = tweepy.API(auth)
tweets = []
while True:
try:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode='extended')
except:
new_tweet = api.user_timeline(screen_name=screen_name, count=200, tweet_mode='extended')
if (len(new_tweet) == 0):
break
tweets.extend(new_tweet)
oldest = (tweets[(- 1)].id - 1)
return tweets<|docstring|>Get the last 3240 tweets (maximum allowed by the API) from an user
with given screen name.
Adapted from https://gist.github.com/yanofsky/5436496
Parameters:
screen_name: str, the screen_name of the user (ex: @random_user becomes
'random_user')
credentials: dic, contain the credentials for accessing twitter. Must
include the keys 'consumer_API_key',
'consumer_API_secret_key', 'access_token', and
'access_secret_token'
Returns:
tweets: list, list of Status objects containing the last 3240 of given user<|endoftext|> |
4548e41171696ae5b9bbf678737cab157fbc2fdcd51c5bbed9c90bb9e8419b71 | def still_has_cards(self):
'\n Returs True if player still has cards left\n '
return (len(self.hand.cards) != 0) | Returs True if player still has cards left | oop_project.py | still_has_cards | profmcdan/War_Game | 0 | python | def still_has_cards(self):
'\n \n '
return (len(self.hand.cards) != 0) | def still_has_cards(self):
'\n \n '
return (len(self.hand.cards) != 0)<|docstring|>Returs True if player still has cards left<|endoftext|> |
cac00557fdf60ed2db4eed8621dd54186a58bfcfc98567278d01c48d7a1d432f | def load(self, device: str):
'\n Load user-selected task-specific model\n\n Args:\n device (str): device information\n\n Returns:\n object: User-selected task-specific model\n\n '
if ('brainbert' in self.config.n_model):
from pororo.models.brainbert import BrainRobertaModel
model = BrainRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('jaberta' in self.config.n_model):
from pororo.models.brainbert import JabertaModel
model = JabertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('zhberta' in self.config.n_model):
from pororo.models.brainbert import ZhbertaModel
model = ZhbertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('roberta' in self.config.n_model):
from pororo.models.brainbert import CustomRobertaModel
model = CustomRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config) | Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model | pororo/tasks/zero_shot_classification.py | load | jayten42/pororo | 1,137 | python | def load(self, device: str):
'\n Load user-selected task-specific model\n\n Args:\n device (str): device information\n\n Returns:\n object: User-selected task-specific model\n\n '
if ('brainbert' in self.config.n_model):
from pororo.models.brainbert import BrainRobertaModel
model = BrainRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('jaberta' in self.config.n_model):
from pororo.models.brainbert import JabertaModel
model = JabertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('zhberta' in self.config.n_model):
from pororo.models.brainbert import ZhbertaModel
model = ZhbertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('roberta' in self.config.n_model):
from pororo.models.brainbert import CustomRobertaModel
model = CustomRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config) | def load(self, device: str):
'\n Load user-selected task-specific model\n\n Args:\n device (str): device information\n\n Returns:\n object: User-selected task-specific model\n\n '
if ('brainbert' in self.config.n_model):
from pororo.models.brainbert import BrainRobertaModel
model = BrainRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('jaberta' in self.config.n_model):
from pororo.models.brainbert import JabertaModel
model = JabertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('zhberta' in self.config.n_model):
from pororo.models.brainbert import ZhbertaModel
model = ZhbertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)
if ('roberta' in self.config.n_model):
from pororo.models.brainbert import CustomRobertaModel
model = CustomRobertaModel.load_model(f'bert/{self.config.n_model}', self.config.lang).eval().to(device)
return PororoBertZeroShot(model, self.config)<|docstring|>Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model<|endoftext|> |
560b3e00b83da4fec99dbec8e7d47666a77e0ddd90e95303f7a70a9d5ad59e26 | def predict(self, sent: str, labels: List[str], **kwargs) -> Dict[(str, float)]:
'\n Conduct zero-shot classification\n\n Args:\n sent (str): sentence to be classified\n labels (List[str]): candidate labels\n\n Returns:\n List[Tuple(str, float)]: confidence scores corresponding to each input label\n\n '
cands = [self._template[self.config.lang].format(label=label) for label in labels]
result = dict()
for (label, cand) in zip(labels, cands):
if (self.config.lang == 'ko'):
tokens = self._model.encode(sent, cand, add_special_tokens=True, no_separator=False)
else:
tokens = self._model.encode(sent, cand, no_separator=False)
pred = self._model.predict('sentence_classification_head', tokens, return_logits=True)[(:, [0, 2])]
prob = (pred.softmax(dim=1)[(:, 1)].item() * 100)
result[label] = round(prob, 2)
return result | Conduct zero-shot classification
Args:
sent (str): sentence to be classified
labels (List[str]): candidate labels
Returns:
List[Tuple(str, float)]: confidence scores corresponding to each input label | pororo/tasks/zero_shot_classification.py | predict | jayten42/pororo | 1,137 | python | def predict(self, sent: str, labels: List[str], **kwargs) -> Dict[(str, float)]:
'\n Conduct zero-shot classification\n\n Args:\n sent (str): sentence to be classified\n labels (List[str]): candidate labels\n\n Returns:\n List[Tuple(str, float)]: confidence scores corresponding to each input label\n\n '
cands = [self._template[self.config.lang].format(label=label) for label in labels]
result = dict()
for (label, cand) in zip(labels, cands):
if (self.config.lang == 'ko'):
tokens = self._model.encode(sent, cand, add_special_tokens=True, no_separator=False)
else:
tokens = self._model.encode(sent, cand, no_separator=False)
pred = self._model.predict('sentence_classification_head', tokens, return_logits=True)[(:, [0, 2])]
prob = (pred.softmax(dim=1)[(:, 1)].item() * 100)
result[label] = round(prob, 2)
return result | def predict(self, sent: str, labels: List[str], **kwargs) -> Dict[(str, float)]:
'\n Conduct zero-shot classification\n\n Args:\n sent (str): sentence to be classified\n labels (List[str]): candidate labels\n\n Returns:\n List[Tuple(str, float)]: confidence scores corresponding to each input label\n\n '
cands = [self._template[self.config.lang].format(label=label) for label in labels]
result = dict()
for (label, cand) in zip(labels, cands):
if (self.config.lang == 'ko'):
tokens = self._model.encode(sent, cand, add_special_tokens=True, no_separator=False)
else:
tokens = self._model.encode(sent, cand, no_separator=False)
pred = self._model.predict('sentence_classification_head', tokens, return_logits=True)[(:, [0, 2])]
prob = (pred.softmax(dim=1)[(:, 1)].item() * 100)
result[label] = round(prob, 2)
return result<|docstring|>Conduct zero-shot classification
Args:
sent (str): sentence to be classified
labels (List[str]): candidate labels
Returns:
List[Tuple(str, float)]: confidence scores corresponding to each input label<|endoftext|> |
6f386169b8b7abaef604a260539360af625a66c7d777fc3f8e720670660efcad | def pretty_print_POST(req):
'\n At this point it is completely built and ready\n to be fired; it is "prepared".\n\n However pay attention at the formatting used in\n this function because it is programmed to be pretty\n printed and may differ from the actual request.\n\n https://stackoverflow.com/a/23816211\n '
print('{}\n{}\r\n{}\r\n\r\n{}'.format('-----------START-----------', ((req.method + ' ') + req.url), '\r\n'.join(('{}: {}'.format(k, v) for (k, v) in req.headers.items())), req.body)) | At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
https://stackoverflow.com/a/23816211 | collaborator_api/client.py | pretty_print_POST | OpenUpSA/collaborator-api-client | 0 | python | def pretty_print_POST(req):
'\n At this point it is completely built and ready\n to be fired; it is "prepared".\n\n However pay attention at the formatting used in\n this function because it is programmed to be pretty\n printed and may differ from the actual request.\n\n https://stackoverflow.com/a/23816211\n '
print('{}\n{}\r\n{}\r\n\r\n{}'.format('-----------START-----------', ((req.method + ' ') + req.url), '\r\n'.join(('{}: {}'.format(k, v) for (k, v) in req.headers.items())), req.body)) | def pretty_print_POST(req):
'\n At this point it is completely built and ready\n to be fired; it is "prepared".\n\n However pay attention at the formatting used in\n this function because it is programmed to be pretty\n printed and may differ from the actual request.\n\n https://stackoverflow.com/a/23816211\n '
print('{}\n{}\r\n{}\r\n\r\n{}'.format('-----------START-----------', ((req.method + ' ') + req.url), '\r\n'.join(('{}: {}'.format(k, v) for (k, v) in req.headers.items())), req.body))<|docstring|>At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
https://stackoverflow.com/a/23816211<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.