body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
e6843de2d70bf38954a05a146ec782290ff0c7f638ea74335b172c31b6687602
def __init__(self, host='localhost', user=None, passwd='', database=None, port=3306, unix_socket=None, charset='', sql_mode=None, read_default_file=None, conv=decoders, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=None, ssl=None, read_default_group=None, compress=None, named_pipe=None, no_delay=False, autocommit=False, db=None, io_loop=None): "\n Establish a connection to the MySQL database. Accepts several\n arguments:\n\n host: Host where the database server is located\n user: Username to log in as\n passwd: Password to use.\n database: Database to use, None to not use a particular one.\n port: MySQL port to use, default is usually OK.\n unix_socket: Optionally, you can use a unix socket rather than TCP/IP.\n charset: Charset you want to use.\n sql_mode: Default SQL_MODE to use.\n read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.\n conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.\n use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.\n client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.\n cursorclass: Custom cursor class to use.\n init_command: Initial SQL statement to run when connection is established.\n connect_timeout: Timeout before throwing an exception when connecting.\n ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.\n read_default_group: Group to read from in the configuration file.\n compress; Not supported\n named_pipe: Not supported\n no_delay: Disable Nagle's algorithm on the socket\n autocommit: Autocommit mode. None means use server default. (default: False)\n db: Alias for database. (for compatibility to MySQLdb)\n " if ((use_unicode is None) and (sys.version_info[0] > 2)): use_unicode = True if ((db is not None) and (database is None)): database = db if (compress or named_pipe): raise NotImplementedError('compress and named_pipe arguments are not supported') if (ssl and (('capath' in ssl) or ('cipher' in ssl))): raise NotImplementedError('ssl options capath and cipher are not supported') self.ssl = False if ssl: if (not SSL_ENABLED): raise NotImplementedError('ssl module not found') self.ssl = True client_flag |= SSL for k in ('key', 'cert', 'ca'): v = None if (k in ssl): v = ssl[k] setattr(self, k, v) if (read_default_group and (not read_default_file)): if sys.platform.startswith('win'): read_default_file = 'c:\\my.ini' else: read_default_file = '/etc/my.cnf' if read_default_file: if (not read_default_group): read_default_group = 'client' cfg = configparser.RawConfigParser() cfg.read(os.path.expanduser(read_default_file)) def _config(key, default): try: return cfg.get(read_default_group, key) except Exception: return default user = _config('user', user) passwd = _config('password', passwd) host = _config('host', host) database = _config('database', database) unix_socket = _config('socket', unix_socket) port = int(_config('port', port)) charset = _config('default-character-set', charset) self.host = host self.port = port self.user = (user or DEFAULT_USER) self.password = (passwd or '') self.db = database self.no_delay = no_delay self.unix_socket = unix_socket if charset: self.charset = charset self.use_unicode = True else: self.charset = DEFAULT_CHARSET self.use_unicode = False if (use_unicode is not None): self.use_unicode = use_unicode self.encoding = charset_by_name(self.charset).encoding client_flag |= CAPABILITIES client_flag |= MULTI_STATEMENTS if self.db: client_flag |= CONNECT_WITH_DB self.client_flag = client_flag self.cursorclass = cursorclass self.connect_timeout = connect_timeout self._result = None self._affected_rows = 0 self.host_info = 'Not connected' self.autocommit_mode = autocommit self.encoders = encoders self.decoders = conv self.sql_mode = sql_mode self.init_command = init_command self.io_loop = (io_loop or tornado.ioloop.IOLoop.current()) self.stream = None
Establish a connection to the MySQL database. Accepts several arguments: host: Host where the database server is located user: Username to log in as passwd: Password to use. database: Database to use, None to not use a particular one. port: MySQL port to use, default is usually OK. unix_socket: Optionally, you can use a unix socket rather than TCP/IP. charset: Charset you want to use. sql_mode: Default SQL_MODE to use. read_default_file: Specifies my.cnf file to read these parameters from under the [client] section. conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters. use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k. client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT. cursorclass: Custom cursor class to use. init_command: Initial SQL statement to run when connection is established. connect_timeout: Timeout before throwing an exception when connecting. ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported. read_default_group: Group to read from in the configuration file. compress; Not supported named_pipe: Not supported no_delay: Disable Nagle's algorithm on the socket autocommit: Autocommit mode. None means use server default. (default: False) db: Alias for database. (for compatibility to MySQLdb)
asynctorndb/connection.py
__init__
mayflaver/AsyncTorndb
103
python
def __init__(self, host='localhost', user=None, passwd=, database=None, port=3306, unix_socket=None, charset=, sql_mode=None, read_default_file=None, conv=decoders, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=None, ssl=None, read_default_group=None, compress=None, named_pipe=None, no_delay=False, autocommit=False, db=None, io_loop=None): "\n Establish a connection to the MySQL database. Accepts several\n arguments:\n\n host: Host where the database server is located\n user: Username to log in as\n passwd: Password to use.\n database: Database to use, None to not use a particular one.\n port: MySQL port to use, default is usually OK.\n unix_socket: Optionally, you can use a unix socket rather than TCP/IP.\n charset: Charset you want to use.\n sql_mode: Default SQL_MODE to use.\n read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.\n conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.\n use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.\n client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.\n cursorclass: Custom cursor class to use.\n init_command: Initial SQL statement to run when connection is established.\n connect_timeout: Timeout before throwing an exception when connecting.\n ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.\n read_default_group: Group to read from in the configuration file.\n compress; Not supported\n named_pipe: Not supported\n no_delay: Disable Nagle's algorithm on the socket\n autocommit: Autocommit mode. None means use server default. (default: False)\n db: Alias for database. (for compatibility to MySQLdb)\n " if ((use_unicode is None) and (sys.version_info[0] > 2)): use_unicode = True if ((db is not None) and (database is None)): database = db if (compress or named_pipe): raise NotImplementedError('compress and named_pipe arguments are not supported') if (ssl and (('capath' in ssl) or ('cipher' in ssl))): raise NotImplementedError('ssl options capath and cipher are not supported') self.ssl = False if ssl: if (not SSL_ENABLED): raise NotImplementedError('ssl module not found') self.ssl = True client_flag |= SSL for k in ('key', 'cert', 'ca'): v = None if (k in ssl): v = ssl[k] setattr(self, k, v) if (read_default_group and (not read_default_file)): if sys.platform.startswith('win'): read_default_file = 'c:\\my.ini' else: read_default_file = '/etc/my.cnf' if read_default_file: if (not read_default_group): read_default_group = 'client' cfg = configparser.RawConfigParser() cfg.read(os.path.expanduser(read_default_file)) def _config(key, default): try: return cfg.get(read_default_group, key) except Exception: return default user = _config('user', user) passwd = _config('password', passwd) host = _config('host', host) database = _config('database', database) unix_socket = _config('socket', unix_socket) port = int(_config('port', port)) charset = _config('default-character-set', charset) self.host = host self.port = port self.user = (user or DEFAULT_USER) self.password = (passwd or ) self.db = database self.no_delay = no_delay self.unix_socket = unix_socket if charset: self.charset = charset self.use_unicode = True else: self.charset = DEFAULT_CHARSET self.use_unicode = False if (use_unicode is not None): self.use_unicode = use_unicode self.encoding = charset_by_name(self.charset).encoding client_flag |= CAPABILITIES client_flag |= MULTI_STATEMENTS if self.db: client_flag |= CONNECT_WITH_DB self.client_flag = client_flag self.cursorclass = cursorclass self.connect_timeout = connect_timeout self._result = None self._affected_rows = 0 self.host_info = 'Not connected' self.autocommit_mode = autocommit self.encoders = encoders self.decoders = conv self.sql_mode = sql_mode self.init_command = init_command self.io_loop = (io_loop or tornado.ioloop.IOLoop.current()) self.stream = None
def __init__(self, host='localhost', user=None, passwd=, database=None, port=3306, unix_socket=None, charset=, sql_mode=None, read_default_file=None, conv=decoders, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=None, ssl=None, read_default_group=None, compress=None, named_pipe=None, no_delay=False, autocommit=False, db=None, io_loop=None): "\n Establish a connection to the MySQL database. Accepts several\n arguments:\n\n host: Host where the database server is located\n user: Username to log in as\n passwd: Password to use.\n database: Database to use, None to not use a particular one.\n port: MySQL port to use, default is usually OK.\n unix_socket: Optionally, you can use a unix socket rather than TCP/IP.\n charset: Charset you want to use.\n sql_mode: Default SQL_MODE to use.\n read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.\n conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.\n use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.\n client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.\n cursorclass: Custom cursor class to use.\n init_command: Initial SQL statement to run when connection is established.\n connect_timeout: Timeout before throwing an exception when connecting.\n ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.\n read_default_group: Group to read from in the configuration file.\n compress; Not supported\n named_pipe: Not supported\n no_delay: Disable Nagle's algorithm on the socket\n autocommit: Autocommit mode. None means use server default. (default: False)\n db: Alias for database. (for compatibility to MySQLdb)\n " if ((use_unicode is None) and (sys.version_info[0] > 2)): use_unicode = True if ((db is not None) and (database is None)): database = db if (compress or named_pipe): raise NotImplementedError('compress and named_pipe arguments are not supported') if (ssl and (('capath' in ssl) or ('cipher' in ssl))): raise NotImplementedError('ssl options capath and cipher are not supported') self.ssl = False if ssl: if (not SSL_ENABLED): raise NotImplementedError('ssl module not found') self.ssl = True client_flag |= SSL for k in ('key', 'cert', 'ca'): v = None if (k in ssl): v = ssl[k] setattr(self, k, v) if (read_default_group and (not read_default_file)): if sys.platform.startswith('win'): read_default_file = 'c:\\my.ini' else: read_default_file = '/etc/my.cnf' if read_default_file: if (not read_default_group): read_default_group = 'client' cfg = configparser.RawConfigParser() cfg.read(os.path.expanduser(read_default_file)) def _config(key, default): try: return cfg.get(read_default_group, key) except Exception: return default user = _config('user', user) passwd = _config('password', passwd) host = _config('host', host) database = _config('database', database) unix_socket = _config('socket', unix_socket) port = int(_config('port', port)) charset = _config('default-character-set', charset) self.host = host self.port = port self.user = (user or DEFAULT_USER) self.password = (passwd or ) self.db = database self.no_delay = no_delay self.unix_socket = unix_socket if charset: self.charset = charset self.use_unicode = True else: self.charset = DEFAULT_CHARSET self.use_unicode = False if (use_unicode is not None): self.use_unicode = use_unicode self.encoding = charset_by_name(self.charset).encoding client_flag |= CAPABILITIES client_flag |= MULTI_STATEMENTS if self.db: client_flag |= CONNECT_WITH_DB self.client_flag = client_flag self.cursorclass = cursorclass self.connect_timeout = connect_timeout self._result = None self._affected_rows = 0 self.host_info = 'Not connected' self.autocommit_mode = autocommit self.encoders = encoders self.decoders = conv self.sql_mode = sql_mode self.init_command = init_command self.io_loop = (io_loop or tornado.ioloop.IOLoop.current()) self.stream = None<|docstring|>Establish a connection to the MySQL database. Accepts several arguments: host: Host where the database server is located user: Username to log in as passwd: Password to use. database: Database to use, None to not use a particular one. port: MySQL port to use, default is usually OK. unix_socket: Optionally, you can use a unix socket rather than TCP/IP. charset: Charset you want to use. sql_mode: Default SQL_MODE to use. read_default_file: Specifies my.cnf file to read these parameters from under the [client] section. conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters. use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k. client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT. cursorclass: Custom cursor class to use. init_command: Initial SQL statement to run when connection is established. connect_timeout: Timeout before throwing an exception when connecting. ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported. read_default_group: Group to read from in the configuration file. compress; Not supported named_pipe: Not supported no_delay: Disable Nagle's algorithm on the socket autocommit: Autocommit mode. None means use server default. (default: False) db: Alias for database. (for compatibility to MySQLdb)<|endoftext|>
77203e983723c9c3a26e0ef0250259aa610d8ecef9bbba42e28947b3d30b76e3
@coroutine def close(self): ' Send the quit message and close the socket ' if self.stream.closed(): raise Error('Already closed') send_data = (struct.pack('<i', 1) + int2byte(COM_QUIT)) try: (yield self.stream.write(send_data)) except Exception: pass finally: self.stream.close() self.stream = None
Send the quit message and close the socket
asynctorndb/connection.py
close
mayflaver/AsyncTorndb
103
python
@coroutine def close(self): ' ' if self.stream.closed(): raise Error('Already closed') send_data = (struct.pack('<i', 1) + int2byte(COM_QUIT)) try: (yield self.stream.write(send_data)) except Exception: pass finally: self.stream.close() self.stream = None
@coroutine def close(self): ' ' if self.stream.closed(): raise Error('Already closed') send_data = (struct.pack('<i', 1) + int2byte(COM_QUIT)) try: (yield self.stream.write(send_data)) except Exception: pass finally: self.stream.close() self.stream = None<|docstring|>Send the quit message and close the socket<|endoftext|>
f793d272d784f2100a08f4f709ac8bb5c05bf9432d4d0205d9e52415a23aefee
@coroutine def _send_autocommit_mode(self): ' Set whether or not to commit after every execute() ' self._execute_command(COM_QUERY, ('SET AUTOCOMMIT = %s' % self.escape(self.autocommit_mode))) (yield self._read_ok_packet())
Set whether or not to commit after every execute()
asynctorndb/connection.py
_send_autocommit_mode
mayflaver/AsyncTorndb
103
python
@coroutine def _send_autocommit_mode(self): ' ' self._execute_command(COM_QUERY, ('SET AUTOCOMMIT = %s' % self.escape(self.autocommit_mode))) (yield self._read_ok_packet())
@coroutine def _send_autocommit_mode(self): ' ' self._execute_command(COM_QUERY, ('SET AUTOCOMMIT = %s' % self.escape(self.autocommit_mode))) (yield self._read_ok_packet())<|docstring|>Set whether or not to commit after every execute()<|endoftext|>
4248b5cdbb9f9ba533c76318aa483a34421d7cdfec88bdc1b26bafc106735559
@coroutine def commit(self): ' Commit changes to stable storage ' self._execute_command(COM_QUERY, 'COMMIT') (yield self._read_ok_packet())
Commit changes to stable storage
asynctorndb/connection.py
commit
mayflaver/AsyncTorndb
103
python
@coroutine def commit(self): ' ' self._execute_command(COM_QUERY, 'COMMIT') (yield self._read_ok_packet())
@coroutine def commit(self): ' ' self._execute_command(COM_QUERY, 'COMMIT') (yield self._read_ok_packet())<|docstring|>Commit changes to stable storage<|endoftext|>
207f216fdd24e9a5042c1331ffafb8dd7861a3c64efc6164defc302c782b6305
@coroutine def rollback(self): ' Roll back the current transaction ' (yield self._execute_command(COM_QUERY, 'ROLLBACK')) (yield self._read_ok_packet())
Roll back the current transaction
asynctorndb/connection.py
rollback
mayflaver/AsyncTorndb
103
python
@coroutine def rollback(self): ' ' (yield self._execute_command(COM_QUERY, 'ROLLBACK')) (yield self._read_ok_packet())
@coroutine def rollback(self): ' ' (yield self._execute_command(COM_QUERY, 'ROLLBACK')) (yield self._read_ok_packet())<|docstring|>Roll back the current transaction<|endoftext|>
30da28a9ba880e0d9b98716ace96819103d6f60e2a86c002b44e4a73569b2ff7
@coroutine def select_db(self, db): 'Set current db' (yield self._execute_command(COM_INIT_DB, db)) (yield self._read_ok_packet())
Set current db
asynctorndb/connection.py
select_db
mayflaver/AsyncTorndb
103
python
@coroutine def select_db(self, db): (yield self._execute_command(COM_INIT_DB, db)) (yield self._read_ok_packet())
@coroutine def select_db(self, db): (yield self._execute_command(COM_INIT_DB, db)) (yield self._read_ok_packet())<|docstring|>Set current db<|endoftext|>
aa8a4838ff689668790e3f0c1bb93bb5f64a4ba8a57738f33201e97c76e94183
def escape(self, obj): ' Escape whatever value you pass to it ' if isinstance(obj, str_type): return (("'" + self.escape_string(obj)) + "'") return escape_item(obj, self.charset)
Escape whatever value you pass to it
asynctorndb/connection.py
escape
mayflaver/AsyncTorndb
103
python
def escape(self, obj): ' ' if isinstance(obj, str_type): return (("'" + self.escape_string(obj)) + "'") return escape_item(obj, self.charset)
def escape(self, obj): ' ' if isinstance(obj, str_type): return (("'" + self.escape_string(obj)) + "'") return escape_item(obj, self.charset)<|docstring|>Escape whatever value you pass to it<|endoftext|>
465011ceb4399dd42cf42227c3f2f407d4c45a5683ac6a0915d759f2d50dac7d
def literal(self, obj): 'Alias for escape()' return self.escape(obj)
Alias for escape()
asynctorndb/connection.py
literal
mayflaver/AsyncTorndb
103
python
def literal(self, obj): return self.escape(obj)
def literal(self, obj): return self.escape(obj)<|docstring|>Alias for escape()<|endoftext|>
d26a0f6f23bf2918ee6bc50638a66969e229a10255726bdbf871e3de755ff609
def cursor(self, cursor=None): ' Create a new cursor to execute queries with ' if cursor: return cursor(self) return self.cursorclass(self)
Create a new cursor to execute queries with
asynctorndb/connection.py
cursor
mayflaver/AsyncTorndb
103
python
def cursor(self, cursor=None): ' ' if cursor: return cursor(self) return self.cursorclass(self)
def cursor(self, cursor=None): ' ' if cursor: return cursor(self) return self.cursorclass(self)<|docstring|>Create a new cursor to execute queries with<|endoftext|>
3b92166da523960b4ca2411ab7c7f9186d061f806d1296ff1072816d8d0f3778
def __enter__(self): ' Context manager that returns a Cursor ' return self.cursor()
Context manager that returns a Cursor
asynctorndb/connection.py
__enter__
mayflaver/AsyncTorndb
103
python
def __enter__(self): ' ' return self.cursor()
def __enter__(self): ' ' return self.cursor()<|docstring|>Context manager that returns a Cursor<|endoftext|>
cb5fa828fb96b1eee737d252a1733d164859dcd29264fef9c7e1641483d698ce
@coroutine def __exit__(self, exc, value, traceback): ' On successful exit, commit. On exception, rollback. ' if exc: (yield self.rollback()) else: (yield self.commit())
On successful exit, commit. On exception, rollback.
asynctorndb/connection.py
__exit__
mayflaver/AsyncTorndb
103
python
@coroutine def __exit__(self, exc, value, traceback): ' ' if exc: (yield self.rollback()) else: (yield self.commit())
@coroutine def __exit__(self, exc, value, traceback): ' ' if exc: (yield self.rollback()) else: (yield self.commit())<|docstring|>On successful exit, commit. On exception, rollback.<|endoftext|>
dda212a01dfac34c4569c411938e9b3567375862a62bdbbf3186b836ce02491f
@coroutine def query(self, sql, *args): 'Returns a row list for the given query and parameters.' cur = self.cursor() try: (yield cur.execute(sql, *args)) column_names = [d[0] for d in cur.description] raise Return([Row(zip(column_names, row)) for row in cur]) finally: cur.close()
Returns a row list for the given query and parameters.
asynctorndb/connection.py
query
mayflaver/AsyncTorndb
103
python
@coroutine def query(self, sql, *args): cur = self.cursor() try: (yield cur.execute(sql, *args)) column_names = [d[0] for d in cur.description] raise Return([Row(zip(column_names, row)) for row in cur]) finally: cur.close()
@coroutine def query(self, sql, *args): cur = self.cursor() try: (yield cur.execute(sql, *args)) column_names = [d[0] for d in cur.description] raise Return([Row(zip(column_names, row)) for row in cur]) finally: cur.close()<|docstring|>Returns a row list for the given query and parameters.<|endoftext|>
0b4ad3a82cd800b4c1dba5b8595da05e7178c1e4b96299516a210faf226fae5d
@coroutine def get(self, sql, *args): 'Returns the (singular) row returned by the given query.\n \n If the query has no results, returns None. If it has\n more than one result, raises an exception.\n ' rows = (yield self.query(sql, *args)) if (not rows): raise Return(None) elif (len(rows) > 1): raise Exception('Multiple rows returned for Database.get() query') else: raise Return(rows[0])
Returns the (singular) row returned by the given query. If the query has no results, returns None. If it has more than one result, raises an exception.
asynctorndb/connection.py
get
mayflaver/AsyncTorndb
103
python
@coroutine def get(self, sql, *args): 'Returns the (singular) row returned by the given query.\n \n If the query has no results, returns None. If it has\n more than one result, raises an exception.\n ' rows = (yield self.query(sql, *args)) if (not rows): raise Return(None) elif (len(rows) > 1): raise Exception('Multiple rows returned for Database.get() query') else: raise Return(rows[0])
@coroutine def get(self, sql, *args): 'Returns the (singular) row returned by the given query.\n \n If the query has no results, returns None. If it has\n more than one result, raises an exception.\n ' rows = (yield self.query(sql, *args)) if (not rows): raise Return(None) elif (len(rows) > 1): raise Exception('Multiple rows returned for Database.get() query') else: raise Return(rows[0])<|docstring|>Returns the (singular) row returned by the given query. If the query has no results, returns None. If it has more than one result, raises an exception.<|endoftext|>
2a493e955a17cfb387aa184bca4742b513a81907f6e61e7dc9f12cdb9d9adadf
@coroutine def execute(self, sql, *args): 'Executes the given query, returning the lastrowid from the query.' raise Return((yield self.execute_lastrowid(sql, *args)))
Executes the given query, returning the lastrowid from the query.
asynctorndb/connection.py
execute
mayflaver/AsyncTorndb
103
python
@coroutine def execute(self, sql, *args): raise Return((yield self.execute_lastrowid(sql, *args)))
@coroutine def execute(self, sql, *args): raise Return((yield self.execute_lastrowid(sql, *args)))<|docstring|>Executes the given query, returning the lastrowid from the query.<|endoftext|>
4f2c61522a570876a64d2eef24f6d3cd04f3f47ffd5bcf4cb695fafde639a3c9
@coroutine def execute_lastrowid(self, sql, *args): 'Executes the given query, returning the lastrowid from the query.' cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.lastrowid) finally: cur.close()
Executes the given query, returning the lastrowid from the query.
asynctorndb/connection.py
execute_lastrowid
mayflaver/AsyncTorndb
103
python
@coroutine def execute_lastrowid(self, sql, *args): cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.lastrowid) finally: cur.close()
@coroutine def execute_lastrowid(self, sql, *args): cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.lastrowid) finally: cur.close()<|docstring|>Executes the given query, returning the lastrowid from the query.<|endoftext|>
3fccb9a6ffbe3e47ef83f7b670a9543963733e5544ca64f5f4e35fb598b30879
@coroutine def execute_rowcount(self, sql, *args): 'Executes the given query, returning the rowcount from the query.' cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.rowcount) finally: cur.close()
Executes the given query, returning the rowcount from the query.
asynctorndb/connection.py
execute_rowcount
mayflaver/AsyncTorndb
103
python
@coroutine def execute_rowcount(self, sql, *args): cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.rowcount) finally: cur.close()
@coroutine def execute_rowcount(self, sql, *args): cur = self.cursor() try: (yield self._execute(cur, sql, *args)) raise Return(cur.rowcount) finally: cur.close()<|docstring|>Executes the given query, returning the rowcount from the query.<|endoftext|>
d2d196e782c712454c108e683b043edb5dd592da3b69b8bda34446de5a81235c
@coroutine def executemany(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' raise Return((yield self.executemany_lastrowid(sql, args)))
Executes the given query against all the given param sequences. We return the lastrowid from the query.
asynctorndb/connection.py
executemany
mayflaver/AsyncTorndb
103
python
@coroutine def executemany(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' raise Return((yield self.executemany_lastrowid(sql, args)))
@coroutine def executemany(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' raise Return((yield self.executemany_lastrowid(sql, args)))<|docstring|>Executes the given query against all the given param sequences. We return the lastrowid from the query.<|endoftext|>
a375c1a40d92a541f11ec463ccb5d9c4e0c50c056c350a0605d4e7a1370ca33d
@coroutine def executemany_lastrowid(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.lastrowid) finally: cur.close()
Executes the given query against all the given param sequences. We return the lastrowid from the query.
asynctorndb/connection.py
executemany_lastrowid
mayflaver/AsyncTorndb
103
python
@coroutine def executemany_lastrowid(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.lastrowid) finally: cur.close()
@coroutine def executemany_lastrowid(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.lastrowid) finally: cur.close()<|docstring|>Executes the given query against all the given param sequences. We return the lastrowid from the query.<|endoftext|>
a698318ad340832d5bc5225ae7e27fdd1edcf0dcff7df755c5c1229eeca21193
@coroutine def executemany_rowcount(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the rowcount from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.rowcount) finally: cur.close()
Executes the given query against all the given param sequences. We return the rowcount from the query.
asynctorndb/connection.py
executemany_rowcount
mayflaver/AsyncTorndb
103
python
@coroutine def executemany_rowcount(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the rowcount from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.rowcount) finally: cur.close()
@coroutine def executemany_rowcount(self, sql, args): 'Executes the given query against all the given param sequences.\n\n We return the rowcount from the query.\n ' cur = self.cursor() try: (yield cur.executemany(sql, args)) raise Return(cur.rowcount) finally: cur.close()<|docstring|>Executes the given query against all the given param sequences. We return the rowcount from the query.<|endoftext|>
abfec0e564be9158ea64461ec5db6841e63eee266e2cc8f739169dd06b1e3cec
def ping(self, reconnect=True): ' Check if the server is alive ' if (self.socket is None): if reconnect: self._connect() reconnect = False else: raise Error('Already closed') try: self._execute_command(COM_PING, '') return self._read_ok_packet() except Exception: if reconnect: self._connect() return self.ping(False) else: raise
Check if the server is alive
asynctorndb/connection.py
ping
mayflaver/AsyncTorndb
103
python
def ping(self, reconnect=True): ' ' if (self.socket is None): if reconnect: self._connect() reconnect = False else: raise Error('Already closed') try: self._execute_command(COM_PING, ) return self._read_ok_packet() except Exception: if reconnect: self._connect() return self.ping(False) else: raise
def ping(self, reconnect=True): ' ' if (self.socket is None): if reconnect: self._connect() reconnect = False else: raise Error('Already closed') try: self._execute_command(COM_PING, ) return self._read_ok_packet() except Exception: if reconnect: self._connect() return self.ping(False) else: raise<|docstring|>Check if the server is alive<|endoftext|>
b61757288896040b2e9625db22136fac3746c48bc9559b17902694d34b74553b
@coroutine def _read_packet(self, packet_type=MysqlPacket): 'Read an entire "mysql packet" in its entirety from the network\n and return a MysqlPacket type that represents the results.\n ' packet = packet_type(self) (yield packet.recv_packet()) packet.check_error() raise Return(packet)
Read an entire "mysql packet" in its entirety from the network and return a MysqlPacket type that represents the results.
asynctorndb/connection.py
_read_packet
mayflaver/AsyncTorndb
103
python
@coroutine def _read_packet(self, packet_type=MysqlPacket): 'Read an entire "mysql packet" in its entirety from the network\n and return a MysqlPacket type that represents the results.\n ' packet = packet_type(self) (yield packet.recv_packet()) packet.check_error() raise Return(packet)
@coroutine def _read_packet(self, packet_type=MysqlPacket): 'Read an entire "mysql packet" in its entirety from the network\n and return a MysqlPacket type that represents the results.\n ' packet = packet_type(self) (yield packet.recv_packet()) packet.check_error() raise Return(packet)<|docstring|>Read an entire "mysql packet" in its entirety from the network and return a MysqlPacket type that represents the results.<|endoftext|>
c570dc702a17c36f36b6914db926b76d7367bd0f167a20bd49a5f515b8f749a0
@coroutine def _read_rowdata_packet(self): 'Read a rowdata packet for each data row in the result set.' rows = [] while True: packet = MysqlPacket(self.connection) buff = b'' while True: packet_header = (yield self.connection.stream.read_bytes(4)) if DEBUG: dump_packet(packet_header) packet_length_bin = packet_header[:3] self._packet_number = byte2int(packet_header[3]) bin_length = (packet_length_bin + b'\x00') bytes_to_read = struct.unpack('<I', bin_length)[0] recv_data = (yield self.connection.stream.read_bytes(bytes_to_read)) if DEBUG: dump_packet(recv_data) buff += recv_data if (bytes_to_read < MAX_PACKET_LEN): break packet.set_data(buff) if self._check_packet_is_eof(packet): self.connection = None break rows.append(self._read_row_from_packet(packet)) self.affected_rows = len(rows) self.rows = tuple(rows)
Read a rowdata packet for each data row in the result set.
asynctorndb/connection.py
_read_rowdata_packet
mayflaver/AsyncTorndb
103
python
@coroutine def _read_rowdata_packet(self): rows = [] while True: packet = MysqlPacket(self.connection) buff = b while True: packet_header = (yield self.connection.stream.read_bytes(4)) if DEBUG: dump_packet(packet_header) packet_length_bin = packet_header[:3] self._packet_number = byte2int(packet_header[3]) bin_length = (packet_length_bin + b'\x00') bytes_to_read = struct.unpack('<I', bin_length)[0] recv_data = (yield self.connection.stream.read_bytes(bytes_to_read)) if DEBUG: dump_packet(recv_data) buff += recv_data if (bytes_to_read < MAX_PACKET_LEN): break packet.set_data(buff) if self._check_packet_is_eof(packet): self.connection = None break rows.append(self._read_row_from_packet(packet)) self.affected_rows = len(rows) self.rows = tuple(rows)
@coroutine def _read_rowdata_packet(self): rows = [] while True: packet = MysqlPacket(self.connection) buff = b while True: packet_header = (yield self.connection.stream.read_bytes(4)) if DEBUG: dump_packet(packet_header) packet_length_bin = packet_header[:3] self._packet_number = byte2int(packet_header[3]) bin_length = (packet_length_bin + b'\x00') bytes_to_read = struct.unpack('<I', bin_length)[0] recv_data = (yield self.connection.stream.read_bytes(bytes_to_read)) if DEBUG: dump_packet(recv_data) buff += recv_data if (bytes_to_read < MAX_PACKET_LEN): break packet.set_data(buff) if self._check_packet_is_eof(packet): self.connection = None break rows.append(self._read_row_from_packet(packet)) self.affected_rows = len(rows) self.rows = tuple(rows)<|docstring|>Read a rowdata packet for each data row in the result set.<|endoftext|>
e39fa8f7cefdd754379d1309b1ffde548363dcd13e5a38113385442a346639de
@coroutine def _get_descriptions(self): 'Read a column descriptor packet for each column in the result.' self.fields = [] description = [] for i in range_type(self.field_count): field = (yield self.connection._read_packet(FieldDescriptorPacket)) self.fields.append(field) description.append(field.description()) eof_packet = (yield self.connection._read_packet()) assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' self.description = tuple(description)
Read a column descriptor packet for each column in the result.
asynctorndb/connection.py
_get_descriptions
mayflaver/AsyncTorndb
103
python
@coroutine def _get_descriptions(self): self.fields = [] description = [] for i in range_type(self.field_count): field = (yield self.connection._read_packet(FieldDescriptorPacket)) self.fields.append(field) description.append(field.description()) eof_packet = (yield self.connection._read_packet()) assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' self.description = tuple(description)
@coroutine def _get_descriptions(self): self.fields = [] description = [] for i in range_type(self.field_count): field = (yield self.connection._read_packet(FieldDescriptorPacket)) self.fields.append(field) description.append(field.description()) eof_packet = (yield self.connection._read_packet()) assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' self.description = tuple(description)<|docstring|>Read a column descriptor packet for each column in the result.<|endoftext|>
8e651b3718a8e1ed5acb36dda5944acabf2c55bc6120ca0643674f959bd0568f
@property def libs(self): "Export the libraries of SuiteSparse.\n Sample usage: spec['suite-sparse'].libs.ld_flags\n spec['suite-sparse:klu,btf'].libs.ld_flags\n " all_comps = ['klu', 'btf', 'umfpack', 'cholmod', 'colamd', 'amd', 'camd', 'ccolamd', 'cxsparse', 'ldl', 'rbio', 'spqr', 'suitesparseconfig'] query_parameters = self.spec.last_query.extra_parameters comps = (all_comps if (not query_parameters) else query_parameters) libs = find_libraries([('lib' + c) for c in comps], root=self.prefix.lib, shared=True, recursive=False) if (not libs): return None libs += find_system_libraries('librt') return libs
Export the libraries of SuiteSparse. Sample usage: spec['suite-sparse'].libs.ld_flags spec['suite-sparse:klu,btf'].libs.ld_flags
var/spack/repos/builtin/packages/suite-sparse/package.py
libs
kresan/spack
3
python
@property def libs(self): "Export the libraries of SuiteSparse.\n Sample usage: spec['suite-sparse'].libs.ld_flags\n spec['suite-sparse:klu,btf'].libs.ld_flags\n " all_comps = ['klu', 'btf', 'umfpack', 'cholmod', 'colamd', 'amd', 'camd', 'ccolamd', 'cxsparse', 'ldl', 'rbio', 'spqr', 'suitesparseconfig'] query_parameters = self.spec.last_query.extra_parameters comps = (all_comps if (not query_parameters) else query_parameters) libs = find_libraries([('lib' + c) for c in comps], root=self.prefix.lib, shared=True, recursive=False) if (not libs): return None libs += find_system_libraries('librt') return libs
@property def libs(self): "Export the libraries of SuiteSparse.\n Sample usage: spec['suite-sparse'].libs.ld_flags\n spec['suite-sparse:klu,btf'].libs.ld_flags\n " all_comps = ['klu', 'btf', 'umfpack', 'cholmod', 'colamd', 'amd', 'camd', 'ccolamd', 'cxsparse', 'ldl', 'rbio', 'spqr', 'suitesparseconfig'] query_parameters = self.spec.last_query.extra_parameters comps = (all_comps if (not query_parameters) else query_parameters) libs = find_libraries([('lib' + c) for c in comps], root=self.prefix.lib, shared=True, recursive=False) if (not libs): return None libs += find_system_libraries('librt') return libs<|docstring|>Export the libraries of SuiteSparse. Sample usage: spec['suite-sparse'].libs.ld_flags spec['suite-sparse:klu,btf'].libs.ld_flags<|endoftext|>
2381f372cd69d06a1d537f33efad7eb509bcb034362f2c65cf6f64f51a2547a1
def test_get_system_date_time(self): '\n Test we are able to get the correct time\n ' t1 = datetime.datetime.now() res = self.run_function('system.get_system_date_time') t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)
Test we are able to get the correct time
tests/integration/modules/system.py
test_get_system_date_time
ahammond/salt
0
python
def test_get_system_date_time(self): '\n \n ' t1 = datetime.datetime.now() res = self.run_function('system.get_system_date_time') t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)
def test_get_system_date_time(self): '\n \n ' t1 = datetime.datetime.now() res = self.run_function('system.get_system_date_time') t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)<|docstring|>Test we are able to get the correct time<|endoftext|>
763b095b9cdf7c1de8080271b283f9d7c2283c4b040ca3ca8cf62d5c84facc73
def test_get_system_date_time_utc(self): '\n Test we are able to get the correct time with utc\n ' t1 = datetime.datetime.utcnow() res = self.run_function('system.get_system_date_time', utc=True) t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)
Test we are able to get the correct time with utc
tests/integration/modules/system.py
test_get_system_date_time_utc
ahammond/salt
0
python
def test_get_system_date_time_utc(self): '\n \n ' t1 = datetime.datetime.utcnow() res = self.run_function('system.get_system_date_time', utc=True) t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)
def test_get_system_date_time_utc(self): '\n \n ' t1 = datetime.datetime.utcnow() res = self.run_function('system.get_system_date_time', utc=True) t2 = datetime.datetime.strptime(res, self.fmt_str) msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(t1, t2) self.assertTrue(self._same_times(t1, t2), msg=msg)<|docstring|>Test we are able to get the correct time with utc<|endoftext|>
f53e63ba71570e82fec718e93c9c95d40f5a1a2385fbe89ee8a538f777f098d2
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() self._set_time(self._fake_time) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(self._same_times(time_now, self._fake_time), msg=msg) self._restore_time()
Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.
tests/integration/modules/system.py
test_set_system_date_time
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() self._set_time(self._fake_time) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(self._same_times(time_now, self._fake_time), msg=msg) self._restore_time()
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() self._set_time(self._fake_time) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(self._same_times(time_now, self._fake_time), msg=msg) self._restore_time()<|docstring|>Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.<|endoftext|>
92020a1fed25a8a16207de286e32304305e8e48fb0473d55217bc60dc0425f41
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time)), msg=msg) self._restore_time(utc=True)
Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.
tests/integration/modules/system.py
test_set_system_date_time_utc
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time)), msg=msg) self._restore_time(utc=True)
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time)), msg=msg) self._restore_time(utc=True)<|docstring|>Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.<|endoftext|>
06f7c5455a39f1143d72966058dc25288293b1f4fc431fd65150704e91aa12bd
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time()
Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.
tests/integration/modules/system.py
test_set_system_date_time_posix
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time()
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time()<|docstring|>Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.<|endoftext|>
81948af8fc56b3bbe9898f60edc5c58a6d4200fd32e0a002e6d44b9ad16ecee7
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time(utc=True)
Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.
tests/integration/modules/system.py
test_set_system_date_time_posix_utc
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time(utc=True)
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date_time_posix_utc(self): '\n Test changing the system clock. We are only able to set it up to a\n resolution of a second so this test may appear to run in negative time.\n ' self._fake_time = datetime.datetime.strptime('1981-02-03 04:05:06', self.fmt_str) self._save_time() result = self._set_time(self._fake_time, posix=True, utc=True) time_now = datetime.datetime.utcnow() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue((result and self._same_times(time_now, self._fake_time, seconds_diff=60)), msg=msg) self._restore_time(utc=True)<|docstring|>Test changing the system clock. We are only able to set it up to a resolution of a second so this test may appear to run in negative time.<|endoftext|>
5aae2047c1b14f8bb0a7335eabd191b0120c173cca1767a66c76a4297fcedc31
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_time(self): '\n Test setting the system time without adjusting the date.\n ' self._fake_time = datetime.datetime.combine(datetime.date.today(), datetime.time(4, 5, 0)) self._save_time() result = self.run_function('system.set_system_time', ['04:05:00']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.hour == 4) and (time_now.minute == 5) and (time_now.second < 10)), msg=msg) self._restore_time()
Test setting the system time without adjusting the date.
tests/integration/modules/system.py
test_set_system_time
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_time(self): '\n \n ' self._fake_time = datetime.datetime.combine(datetime.date.today(), datetime.time(4, 5, 0)) self._save_time() result = self.run_function('system.set_system_time', ['04:05:00']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.hour == 4) and (time_now.minute == 5) and (time_now.second < 10)), msg=msg) self._restore_time()
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_time(self): '\n \n ' self._fake_time = datetime.datetime.combine(datetime.date.today(), datetime.time(4, 5, 0)) self._save_time() result = self.run_function('system.set_system_time', ['04:05:00']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.hour == 4) and (time_now.minute == 5) and (time_now.second < 10)), msg=msg) self._restore_time()<|docstring|>Test setting the system time without adjusting the date.<|endoftext|>
fa4e4a06ff03ce7b7597b56496581c96ece734c9fa9db4e138796890d2baa1ae
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date(self): '\n Test setting the system date without adjusting the time.\n ' self._fake_time = datetime.datetime.combine(datetime.datetime(2000, 12, 25), datetime.datetime.now().time()) self._save_time() result = self.run_function('system.set_system_date', ['2000-12-25']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.year == 2000) and (time_now.day == 25) and (time_now.month == 12) and (time_now.hour == self._orig_time.hour) and (time_now.minute == self._orig_time.minute)), msg=msg) self._restore_time()
Test setting the system date without adjusting the time.
tests/integration/modules/system.py
test_set_system_date
ahammond/salt
0
python
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date(self): '\n \n ' self._fake_time = datetime.datetime.combine(datetime.datetime(2000, 12, 25), datetime.datetime.now().time()) self._save_time() result = self.run_function('system.set_system_date', ['2000-12-25']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.year == 2000) and (time_now.day == 25) and (time_now.month == 12) and (time_now.hour == self._orig_time.hour) and (time_now.minute == self._orig_time.minute)), msg=msg) self._restore_time()
@destructiveTest @skipIf((os.geteuid() != 0), 'you must be root to run this test') def test_set_system_date(self): '\n \n ' self._fake_time = datetime.datetime.combine(datetime.datetime(2000, 12, 25), datetime.datetime.now().time()) self._save_time() result = self.run_function('system.set_system_date', ['2000-12-25']) time_now = datetime.datetime.now() msg = 'Difference in times is too large. Now: {0} Fake: {1}'.format(time_now, self._fake_time) self.assertTrue(result) self.assertTrue(((time_now.year == 2000) and (time_now.day == 25) and (time_now.month == 12) and (time_now.hour == self._orig_time.hour) and (time_now.minute == self._orig_time.minute)), msg=msg) self._restore_time()<|docstring|>Test setting the system date without adjusting the time.<|endoftext|>
2268b181a174287b1abd966174e76f19d5f96ce71822b4cf74e8ff388ac41f02
def delete_user(self: api_client.Api, path_params: RequestPathParams=frozendict(), stream: bool=False, timeout: typing.Optional[typing.Union[(int, typing.Tuple)]]=None, skip_deserialization: bool=False) -> typing.Union[api_client.ApiResponseWithoutDeserialization]: '\n Delete user\n :param skip_deserialization: If true then api_response.response will be set but\n api_response.body and api_response.headers will not be deserialized into schema\n class instances\n ' self._verify_typed_dict_inputs(RequestPathParams, path_params) _path_params = {} for parameter in (request_path_username,): parameter_data = path_params.get(parameter.name, unset) if (parameter_data is unset): continue serialized_data = parameter.serialize(parameter_data) _path_params.update(serialized_data) response = self.api_client.call_api(resource_path=_path, method=_method, path_params=_path_params, stream=stream, timeout=timeout) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if (not (200 <= response.status <= 299)): raise exceptions.ApiException(api_response=api_response) return api_response
Delete user :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances
samples/openapi3/client/petstore/python-experimental/petstore_api/api/user_api_endpoints/delete_user.py
delete_user
joaocmendes/openapi-generator
0
python
def delete_user(self: api_client.Api, path_params: RequestPathParams=frozendict(), stream: bool=False, timeout: typing.Optional[typing.Union[(int, typing.Tuple)]]=None, skip_deserialization: bool=False) -> typing.Union[api_client.ApiResponseWithoutDeserialization]: '\n Delete user\n :param skip_deserialization: If true then api_response.response will be set but\n api_response.body and api_response.headers will not be deserialized into schema\n class instances\n ' self._verify_typed_dict_inputs(RequestPathParams, path_params) _path_params = {} for parameter in (request_path_username,): parameter_data = path_params.get(parameter.name, unset) if (parameter_data is unset): continue serialized_data = parameter.serialize(parameter_data) _path_params.update(serialized_data) response = self.api_client.call_api(resource_path=_path, method=_method, path_params=_path_params, stream=stream, timeout=timeout) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if (not (200 <= response.status <= 299)): raise exceptions.ApiException(api_response=api_response) return api_response
def delete_user(self: api_client.Api, path_params: RequestPathParams=frozendict(), stream: bool=False, timeout: typing.Optional[typing.Union[(int, typing.Tuple)]]=None, skip_deserialization: bool=False) -> typing.Union[api_client.ApiResponseWithoutDeserialization]: '\n Delete user\n :param skip_deserialization: If true then api_response.response will be set but\n api_response.body and api_response.headers will not be deserialized into schema\n class instances\n ' self._verify_typed_dict_inputs(RequestPathParams, path_params) _path_params = {} for parameter in (request_path_username,): parameter_data = path_params.get(parameter.name, unset) if (parameter_data is unset): continue serialized_data = parameter.serialize(parameter_data) _path_params.update(serialized_data) response = self.api_client.call_api(resource_path=_path, method=_method, path_params=_path_params, stream=stream, timeout=timeout) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if (not (200 <= response.status <= 299)): raise exceptions.ApiException(api_response=api_response) return api_response<|docstring|>Delete user :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances<|endoftext|>
f08fca5774b8a7114e55b445758dc106c276399f6612dd169676c85fab35bfec
def angles_mean(time, rad, angles, path): 'Generate mean along angles for single simulation.' out = readpvd(task_root=path, task_id='model', pcs='GROUNDWATER_FLOW') rt_head = np.zeros((time.shape + rad.shape), dtype=float) for (select, step) in enumerate(time): points = out['DATA'][select]['points'] radii = np.sqrt(((points[(:, 0)] ** 2) + (points[(:, 1)] ** 2))) rad_ids = np.argmin(np.abs(np.subtract.outer(rad, radii)), axis=0) for (i, head) in enumerate(out['DATA'][select]['point_data']['HEAD']): rt_head[(select, rad_ids[i])] += head rt_head[(:, 1:)] = (rt_head[(:, 1:)] / angles) np.savetxt(os.path.join(path, 'rad_mean_head.txt'), rt_head)
Generate mean along angles for single simulation.
src/01_run_sim.py
angles_mean
GeoStat-Examples/gstools-pumping-test-ensemble
2
python
def angles_mean(time, rad, angles, path): out = readpvd(task_root=path, task_id='model', pcs='GROUNDWATER_FLOW') rt_head = np.zeros((time.shape + rad.shape), dtype=float) for (select, step) in enumerate(time): points = out['DATA'][select]['points'] radii = np.sqrt(((points[(:, 0)] ** 2) + (points[(:, 1)] ** 2))) rad_ids = np.argmin(np.abs(np.subtract.outer(rad, radii)), axis=0) for (i, head) in enumerate(out['DATA'][select]['point_data']['HEAD']): rt_head[(select, rad_ids[i])] += head rt_head[(:, 1:)] = (rt_head[(:, 1:)] / angles) np.savetxt(os.path.join(path, 'rad_mean_head.txt'), rt_head)
def angles_mean(time, rad, angles, path): out = readpvd(task_root=path, task_id='model', pcs='GROUNDWATER_FLOW') rt_head = np.zeros((time.shape + rad.shape), dtype=float) for (select, step) in enumerate(time): points = out['DATA'][select]['points'] radii = np.sqrt(((points[(:, 0)] ** 2) + (points[(:, 1)] ** 2))) rad_ids = np.argmin(np.abs(np.subtract.outer(rad, radii)), axis=0) for (i, head) in enumerate(out['DATA'][select]['point_data']['HEAD']): rt_head[(select, rad_ids[i])] += head rt_head[(:, 1:)] = (rt_head[(:, 1:)] / angles) np.savetxt(os.path.join(path, 'rad_mean_head.txt'), rt_head)<|docstring|>Generate mean along angles for single simulation.<|endoftext|>
26495a597c8b366b8186c2d53d5d825addef9008272fae3bbd64263d108aa0f6
def random_pred_test_data(size: int, seed: int=4321): 'Return a tuple of random tensors representing test and pred. data' y_pred = tf.random.stateless_normal((size,), seed=(seed, 0)) y_test = tf.random.stateless_normal((size,), seed=(seed, 1)) return (y_pred, y_test)
Return a tuple of random tensors representing test and pred. data
rlo/test/rlo/test_losses.py
random_pred_test_data
tomjaguarpaw/knossos-ksc
31
python
def random_pred_test_data(size: int, seed: int=4321): y_pred = tf.random.stateless_normal((size,), seed=(seed, 0)) y_test = tf.random.stateless_normal((size,), seed=(seed, 1)) return (y_pred, y_test)
def random_pred_test_data(size: int, seed: int=4321): y_pred = tf.random.stateless_normal((size,), seed=(seed, 0)) y_test = tf.random.stateless_normal((size,), seed=(seed, 1)) return (y_pred, y_test)<|docstring|>Return a tuple of random tensors representing test and pred. data<|endoftext|>
48657ee3292afd08c4c171f4e07591a342b65edf75c65163472b1fe37ad5ea9e
def check_same_as_tensorflow(ours, theirs): 'Test our "unreduced" implementation (ours) gives the same value as the tensorflow implementation (theirs) after averaging' size = 1024 (y_pred, y_test) = random_pred_test_data(size) actual = tf.reduce_mean(ours(y_pred, y_test)) expected = theirs(y_pred, y_test) np.testing.assert_allclose(actual.numpy(), expected.numpy(), rtol=1e-05)
Test our "unreduced" implementation (ours) gives the same value as the tensorflow implementation (theirs) after averaging
rlo/test/rlo/test_losses.py
check_same_as_tensorflow
tomjaguarpaw/knossos-ksc
31
python
def check_same_as_tensorflow(ours, theirs): size = 1024 (y_pred, y_test) = random_pred_test_data(size) actual = tf.reduce_mean(ours(y_pred, y_test)) expected = theirs(y_pred, y_test) np.testing.assert_allclose(actual.numpy(), expected.numpy(), rtol=1e-05)
def check_same_as_tensorflow(ours, theirs): size = 1024 (y_pred, y_test) = random_pred_test_data(size) actual = tf.reduce_mean(ours(y_pred, y_test)) expected = theirs(y_pred, y_test) np.testing.assert_allclose(actual.numpy(), expected.numpy(), rtol=1e-05)<|docstring|>Test our "unreduced" implementation (ours) gives the same value as the tensorflow implementation (theirs) after averaging<|endoftext|>
763c316624ae5b3a3919288d718e4cd2612909272418360a640d4d281caeddaf
def align_face(img, size=(512, 512)): '\n :param img: input photo, numpy array\n :param size: output shape\n :return: output align face image\n ' if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) (detector, predictor, facerec) = generate_detector() dets = detector(img, 1) d = dets[0] bb = np.zeros(4, dtype=np.int32) ext = 8 bb[0] = np.maximum((d.left() - ext), 0) bb[1] = np.maximum(((d.top() - ext) - 20), 0) bb[2] = np.minimum((d.right() + ext), img.shape[1]) bb[3] = np.minimum((d.bottom() + 2), img.shape[0]) rec = dlib.rectangle(bb[0], bb[1], bb[2], bb[3]) shape = predictor(img, rec) face_descriptor = facerec.compute_face_descriptor(img, shape) face_array = np.array(face_descriptor).reshape((1, 128)) cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), (255, 255, 255), 1) cropped = img[(bb[1]:bb[3], bb[0]:bb[2], :)] scaled = cv2.resize(cropped, size, interpolation=cv2.INTER_LINEAR) return scaled
:param img: input photo, numpy array :param size: output shape :return: output align face image
neural/align.py
align_face
Hengle/face-nn
319
python
def align_face(img, size=(512, 512)): '\n :param img: input photo, numpy array\n :param size: output shape\n :return: output align face image\n ' if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) (detector, predictor, facerec) = generate_detector() dets = detector(img, 1) d = dets[0] bb = np.zeros(4, dtype=np.int32) ext = 8 bb[0] = np.maximum((d.left() - ext), 0) bb[1] = np.maximum(((d.top() - ext) - 20), 0) bb[2] = np.minimum((d.right() + ext), img.shape[1]) bb[3] = np.minimum((d.bottom() + 2), img.shape[0]) rec = dlib.rectangle(bb[0], bb[1], bb[2], bb[3]) shape = predictor(img, rec) face_descriptor = facerec.compute_face_descriptor(img, shape) face_array = np.array(face_descriptor).reshape((1, 128)) cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), (255, 255, 255), 1) cropped = img[(bb[1]:bb[3], bb[0]:bb[2], :)] scaled = cv2.resize(cropped, size, interpolation=cv2.INTER_LINEAR) return scaled
def align_face(img, size=(512, 512)): '\n :param img: input photo, numpy array\n :param size: output shape\n :return: output align face image\n ' if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) (detector, predictor, facerec) = generate_detector() dets = detector(img, 1) d = dets[0] bb = np.zeros(4, dtype=np.int32) ext = 8 bb[0] = np.maximum((d.left() - ext), 0) bb[1] = np.maximum(((d.top() - ext) - 20), 0) bb[2] = np.minimum((d.right() + ext), img.shape[1]) bb[3] = np.minimum((d.bottom() + 2), img.shape[0]) rec = dlib.rectangle(bb[0], bb[1], bb[2], bb[3]) shape = predictor(img, rec) face_descriptor = facerec.compute_face_descriptor(img, shape) face_array = np.array(face_descriptor).reshape((1, 128)) cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), (255, 255, 255), 1) cropped = img[(bb[1]:bb[3], bb[0]:bb[2], :)] scaled = cv2.resize(cropped, size, interpolation=cv2.INTER_LINEAR) return scaled<|docstring|>:param img: input photo, numpy array :param size: output shape :return: output align face image<|endoftext|>
21d0927414b6da90443e8f4f629474acf34a8ff56c41af29f177e4d5d6a91e83
def face_features(path_img, path_save=None): '\n 提取脸部特征图片\n :param path_img: input photo path, str\n :param path_save: output save image path, str\n :return:\n ' try: img = cv2.imread(path_img) if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) scaled = align_face(img) if (path_save is not None): cv2.imwrite(path_save, img) cv2.imwrite(path_save.replace('align_', 'align2_'), scaled) return scaled except Exception as e: log.error(e)
提取脸部特征图片 :param path_img: input photo path, str :param path_save: output save image path, str :return:
neural/align.py
face_features
Hengle/face-nn
319
python
def face_features(path_img, path_save=None): '\n 提取脸部特征图片\n :param path_img: input photo path, str\n :param path_save: output save image path, str\n :return:\n ' try: img = cv2.imread(path_img) if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) scaled = align_face(img) if (path_save is not None): cv2.imwrite(path_save, img) cv2.imwrite(path_save.replace('align_', 'align2_'), scaled) return scaled except Exception as e: log.error(e)
def face_features(path_img, path_save=None): '\n 提取脸部特征图片\n :param path_img: input photo path, str\n :param path_save: output save image path, str\n :return:\n ' try: img = cv2.imread(path_img) if ((img.shape[0] * img.shape[1]) > (512 * 512)): img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) scaled = align_face(img) if (path_save is not None): cv2.imwrite(path_save, img) cv2.imwrite(path_save.replace('align_', 'align2_'), scaled) return scaled except Exception as e: log.error(e)<|docstring|>提取脸部特征图片 :param path_img: input photo path, str :param path_save: output save image path, str :return:<|endoftext|>
15518380346f8eb5d27e09b3e785d885b97e8061af4d63eeafec75131402763d
def sampling(args): 'Reparameterization trick by sampling fr an isotropic unit Gaussian.\n Arguments\n args (tensor): mean and log of variance of Q(z|X)\n Returns\n z (tensor): sampled latent vector\n ' epsilon_mean = 0.0 epsilon_std = 1.0 (z_mean, z_log_var) = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim), mean=epsilon_mean, stddev=epsilon_std) return (z_mean + (K.exp((0.5 * z_log_var)) * epsilon))
Reparameterization trick by sampling fr an isotropic unit Gaussian. Arguments args (tensor): mean and log of variance of Q(z|X) Returns z (tensor): sampled latent vector
Generative_Models/Variational_AE/VAE.py
sampling
Romit-Maulik/Tutorials-Demos-Practice
8
python
def sampling(args): 'Reparameterization trick by sampling fr an isotropic unit Gaussian.\n Arguments\n args (tensor): mean and log of variance of Q(z|X)\n Returns\n z (tensor): sampled latent vector\n ' epsilon_mean = 0.0 epsilon_std = 1.0 (z_mean, z_log_var) = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim), mean=epsilon_mean, stddev=epsilon_std) return (z_mean + (K.exp((0.5 * z_log_var)) * epsilon))
def sampling(args): 'Reparameterization trick by sampling fr an isotropic unit Gaussian.\n Arguments\n args (tensor): mean and log of variance of Q(z|X)\n Returns\n z (tensor): sampled latent vector\n ' epsilon_mean = 0.0 epsilon_std = 1.0 (z_mean, z_log_var) = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] epsilon = K.random_normal(shape=(batch, dim), mean=epsilon_mean, stddev=epsilon_std) return (z_mean + (K.exp((0.5 * z_log_var)) * epsilon))<|docstring|>Reparameterization trick by sampling fr an isotropic unit Gaussian. Arguments args (tensor): mean and log of variance of Q(z|X) Returns z (tensor): sampled latent vector<|endoftext|>
f8ee8306b8b4128398be654320cbddffda671037fc0e021158f61c3cb4faf7a6
@plugin.register(chain='pricing', requires=['ventures', 'devices']) def splunk(**kwargs): 'Updates Splunk usage per Venture' if (not settings.SPLUNK_HOST): return (False, 'Not configured.', kwargs) try: splunk_venture = Venture.objects.get(symbol='splunk_unknown_usage') except Venture.DoesNotExist: return (False, 'Splunk venture does not exist!', kwargs) (usage_type, created) = UsageType.objects.get_or_create(name='Splunk Volume 1 MB') date = kwargs['today'] splunk = Splunk() days_ago = (date - datetime.date.today()).days earliest = '{}d@d'.format((days_ago - 1)) latest = ('{}d@d'.format(days_ago) if (days_ago != 0) else 'now') splunk.start(earliest=earliest, latest=latest) percent = splunk.progress while (percent < 100): print(percent) time.sleep(30) percent = splunk.progress hosts = {} for item in splunk.results: host = item['host'] mb = float(item['MBytes']) if (host in hosts): hosts[host] += mb else: hosts[host] = mb for (host, usage) in hosts.iteritems(): set_usages(date, usage, usage_type, host, splunk_venture) return (True, 'done.', kwargs)
Updates Splunk usage per Venture
src/ralph_pricing/plugins/splunk.py
splunk
andrzej-jankowski/ralph_pricing
0
python
@plugin.register(chain='pricing', requires=['ventures', 'devices']) def splunk(**kwargs): if (not settings.SPLUNK_HOST): return (False, 'Not configured.', kwargs) try: splunk_venture = Venture.objects.get(symbol='splunk_unknown_usage') except Venture.DoesNotExist: return (False, 'Splunk venture does not exist!', kwargs) (usage_type, created) = UsageType.objects.get_or_create(name='Splunk Volume 1 MB') date = kwargs['today'] splunk = Splunk() days_ago = (date - datetime.date.today()).days earliest = '{}d@d'.format((days_ago - 1)) latest = ('{}d@d'.format(days_ago) if (days_ago != 0) else 'now') splunk.start(earliest=earliest, latest=latest) percent = splunk.progress while (percent < 100): print(percent) time.sleep(30) percent = splunk.progress hosts = {} for item in splunk.results: host = item['host'] mb = float(item['MBytes']) if (host in hosts): hosts[host] += mb else: hosts[host] = mb for (host, usage) in hosts.iteritems(): set_usages(date, usage, usage_type, host, splunk_venture) return (True, 'done.', kwargs)
@plugin.register(chain='pricing', requires=['ventures', 'devices']) def splunk(**kwargs): if (not settings.SPLUNK_HOST): return (False, 'Not configured.', kwargs) try: splunk_venture = Venture.objects.get(symbol='splunk_unknown_usage') except Venture.DoesNotExist: return (False, 'Splunk venture does not exist!', kwargs) (usage_type, created) = UsageType.objects.get_or_create(name='Splunk Volume 1 MB') date = kwargs['today'] splunk = Splunk() days_ago = (date - datetime.date.today()).days earliest = '{}d@d'.format((days_ago - 1)) latest = ('{}d@d'.format(days_ago) if (days_ago != 0) else 'now') splunk.start(earliest=earliest, latest=latest) percent = splunk.progress while (percent < 100): print(percent) time.sleep(30) percent = splunk.progress hosts = {} for item in splunk.results: host = item['host'] mb = float(item['MBytes']) if (host in hosts): hosts[host] += mb else: hosts[host] = mb for (host, usage) in hosts.iteritems(): set_usages(date, usage, usage_type, host, splunk_venture) return (True, 'done.', kwargs)<|docstring|>Updates Splunk usage per Venture<|endoftext|>
3bf0d0f81f65ce78461c30cf6998d7faa5cf16f0747a0f0c39dfd04298000e6d
def setUp(self): '\n Setup for the tests\n ' super(TestFuzzService, self).setUp() self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor if self.test_config.generate_flavors: self.flavor_id = str(uuid.uuid1()) self.client.create_flavor(flavor_id=self.flavor_id, provider_list=[{'provider': 'fastly', 'links': [{'href': 'www.fastly.com', 'rel': 'provider_url'}]}])
Setup for the tests
tests/security/services/test_fuzz_services.py
setUp
jqxin2006/poppy
0
python
def setUp(self): '\n \n ' super(TestFuzzService, self).setUp() self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor if self.test_config.generate_flavors: self.flavor_id = str(uuid.uuid1()) self.client.create_flavor(flavor_id=self.flavor_id, provider_list=[{'provider': 'fastly', 'links': [{'href': 'www.fastly.com', 'rel': 'provider_url'}]}])
def setUp(self): '\n \n ' super(TestFuzzService, self).setUp() self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor if self.test_config.generate_flavors: self.flavor_id = str(uuid.uuid1()) self.client.create_flavor(flavor_id=self.flavor_id, provider_list=[{'provider': 'fastly', 'links': [{'href': 'www.fastly.com', 'rel': 'provider_url'}]}])<|docstring|>Setup for the tests<|endoftext|>
1f9533a887122573c44459dc29507d7dd6707880cccbcdca84093045994d4a34
def reset_defaults(self): '\n Reset domain_list, origin_list, caching_list, service_name\n and flavor_id to its default value.\n ' self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}]
Reset domain_list, origin_list, caching_list, service_name and flavor_id to its default value.
tests/security/services/test_fuzz_services.py
reset_defaults
jqxin2006/poppy
0
python
def reset_defaults(self): '\n Reset domain_list, origin_list, caching_list, service_name\n and flavor_id to its default value.\n ' self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}]
def reset_defaults(self): '\n Reset domain_list, origin_list, caching_list, service_name\n and flavor_id to its default value.\n ' self.domain_list = [{'domain': 'mywebsite.com'}] self.origin_list = [{'origin': 'mywebsite1.com', 'port': 443, 'ssl': False}] self.caching_list = [{'name': 'default', 'ttl': 3600}, {'name': 'home', 'ttl': 1200, 'rules': [{'name': 'index', 'request_url': '/index.htm'}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor self.restrictions_list = [{u'name': u'website only', u'rules': [{u'name': 'mywebsite.com', u'referrer': 'mywebsite.com'}]}]<|docstring|>Reset domain_list, origin_list, caching_list, service_name and flavor_id to its default value.<|endoftext|>
8e06620f0e33f30dcd452d00a36b284cbe361197025ecd48baf360a98e1f14d7
def check_one_request(self): '\n Check the response of one request to see whether the application\n generates any 500 errors.\n ' resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, restrictions_list=self.restrictions_list, flavor_id=self.flavor_id) if ('location' in resp.headers): self.service_url = resp.headers['location'] else: self.service_url = '' self.assertTrue((resp.status_code < 500)) if (self.service_url != ''): self.client.delete_service(location=self.service_url)
Check the response of one request to see whether the application generates any 500 errors.
tests/security/services/test_fuzz_services.py
check_one_request
jqxin2006/poppy
0
python
def check_one_request(self): '\n Check the response of one request to see whether the application\n generates any 500 errors.\n ' resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, restrictions_list=self.restrictions_list, flavor_id=self.flavor_id) if ('location' in resp.headers): self.service_url = resp.headers['location'] else: self.service_url = self.assertTrue((resp.status_code < 500)) if (self.service_url != ): self.client.delete_service(location=self.service_url)
def check_one_request(self): '\n Check the response of one request to see whether the application\n generates any 500 errors.\n ' resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, restrictions_list=self.restrictions_list, flavor_id=self.flavor_id) if ('location' in resp.headers): self.service_url = resp.headers['location'] else: self.service_url = self.assertTrue((resp.status_code < 500)) if (self.service_url != ): self.client.delete_service(location=self.service_url)<|docstring|>Check the response of one request to see whether the application generates any 500 errors.<|endoftext|>
a8bb756a3bf81772df0cb9c7103324f5d760602fae2d5e3aef6badf967d8f4b9
@attrib.attr('fuzz') @ddt.file_data('data_fuzz.json') def test_fuzz_create_service(self, test_data): '\n Fuzz the create service calls to see whether 500 errors are generated.\n ' test_string = test_data['fuzz_string'] for key in self.domain_list[0]: self.service_name = str(uuid.uuid1()) self.domain_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.origin_list[0]: self.service_name = str(uuid.uuid1()) self.origin_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.caching_list[1]: self.service_name = str(uuid.uuid1()) if isinstance(self.caching_list[1][key], list): for the_key in self.caching_list[1][key][0]: self.caching_list[1][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.caching_list[1][key] = test_string self.check_one_request() self.reset_defaults() for key in self.restrictions_list[0]: self.service_name = str(uuid.uuid1()) if isinstance(self.restrictions_list[0][key], list): for the_key in self.restrictions_list[0][key][0]: self.restrictions_list[0][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.restrictions_list[0][key] = test_string self.check_one_request() self.reset_defaults() self.service_name = test_string self.check_one_request() self.reset_defaults() self.flavor_id = test_string self.check_one_request() self.reset_defaults()
Fuzz the create service calls to see whether 500 errors are generated.
tests/security/services/test_fuzz_services.py
test_fuzz_create_service
jqxin2006/poppy
0
python
@attrib.attr('fuzz') @ddt.file_data('data_fuzz.json') def test_fuzz_create_service(self, test_data): '\n \n ' test_string = test_data['fuzz_string'] for key in self.domain_list[0]: self.service_name = str(uuid.uuid1()) self.domain_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.origin_list[0]: self.service_name = str(uuid.uuid1()) self.origin_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.caching_list[1]: self.service_name = str(uuid.uuid1()) if isinstance(self.caching_list[1][key], list): for the_key in self.caching_list[1][key][0]: self.caching_list[1][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.caching_list[1][key] = test_string self.check_one_request() self.reset_defaults() for key in self.restrictions_list[0]: self.service_name = str(uuid.uuid1()) if isinstance(self.restrictions_list[0][key], list): for the_key in self.restrictions_list[0][key][0]: self.restrictions_list[0][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.restrictions_list[0][key] = test_string self.check_one_request() self.reset_defaults() self.service_name = test_string self.check_one_request() self.reset_defaults() self.flavor_id = test_string self.check_one_request() self.reset_defaults()
@attrib.attr('fuzz') @ddt.file_data('data_fuzz.json') def test_fuzz_create_service(self, test_data): '\n \n ' test_string = test_data['fuzz_string'] for key in self.domain_list[0]: self.service_name = str(uuid.uuid1()) self.domain_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.origin_list[0]: self.service_name = str(uuid.uuid1()) self.origin_list[0][key] = test_string self.check_one_request() self.reset_defaults() for key in self.caching_list[1]: self.service_name = str(uuid.uuid1()) if isinstance(self.caching_list[1][key], list): for the_key in self.caching_list[1][key][0]: self.caching_list[1][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.caching_list[1][key] = test_string self.check_one_request() self.reset_defaults() for key in self.restrictions_list[0]: self.service_name = str(uuid.uuid1()) if isinstance(self.restrictions_list[0][key], list): for the_key in self.restrictions_list[0][key][0]: self.restrictions_list[0][key][0][the_key] = test_string self.check_one_request() self.reset_defaults() else: self.restrictions_list[0][key] = test_string self.check_one_request() self.reset_defaults() self.service_name = test_string self.check_one_request() self.reset_defaults() self.flavor_id = test_string self.check_one_request() self.reset_defaults()<|docstring|>Fuzz the create service calls to see whether 500 errors are generated.<|endoftext|>
555b9296d827ba88c265e461191e1d216599652d2a344093f3443a3952ce54ab
def strip_protocol(url): '\n Function removing the protocol from the given url.\n\n Args:\n url (str): Target URL as a string.\n\n Returns:\n string: The url without protocol.\n\n ' return PROTOCOL_RE.sub('', url)
Function removing the protocol from the given url. Args: url (str): Target URL as a string. Returns: string: The url without protocol.
ural/strip_protocol.py
strip_protocol
Yomguithereal/ural
30
python
def strip_protocol(url): '\n Function removing the protocol from the given url.\n\n Args:\n url (str): Target URL as a string.\n\n Returns:\n string: The url without protocol.\n\n ' return PROTOCOL_RE.sub(, url)
def strip_protocol(url): '\n Function removing the protocol from the given url.\n\n Args:\n url (str): Target URL as a string.\n\n Returns:\n string: The url without protocol.\n\n ' return PROTOCOL_RE.sub(, url)<|docstring|>Function removing the protocol from the given url. Args: url (str): Target URL as a string. Returns: string: The url without protocol.<|endoftext|>
fa14ac2a74ab90f5c2f6b8d2b43c6c6bad136bc5290a300c908ada9b58fdac41
def runrootscript(pathname, donotwait): 'Runs script located at given pathname' if g_dry_run: iaslog(('Dry run executing root script: %s' % pathname)) return True try: if donotwait: iaslog('Do not wait triggered') proc = subprocess.Popen(pathname) iaslog(('Running Script: %s ' % str(pathname))) else: proc = subprocess.Popen(pathname, stdout=subprocess.PIPE, stderr=subprocess.PIPE) iaslog(('Running Script: %s ' % str(pathname))) (out, err) = proc.communicate() if (err and (proc.returncode == 0)): iaslog(('Output from %s on stderr but ran successfully: %s' % (pathname, err))) elif (proc.returncode > 0): iaslog(('Received non-zero exit code: ' + str(err))) return False except OSError as err: iaslog(('Failure running script: ' + str(err))) return False return True
Runs script located at given pathname
payload/Library/installapplications/installapplications.py
runrootscript
BrandwatchLtd/installapplications
0
python
def runrootscript(pathname, donotwait): if g_dry_run: iaslog(('Dry run executing root script: %s' % pathname)) return True try: if donotwait: iaslog('Do not wait triggered') proc = subprocess.Popen(pathname) iaslog(('Running Script: %s ' % str(pathname))) else: proc = subprocess.Popen(pathname, stdout=subprocess.PIPE, stderr=subprocess.PIPE) iaslog(('Running Script: %s ' % str(pathname))) (out, err) = proc.communicate() if (err and (proc.returncode == 0)): iaslog(('Output from %s on stderr but ran successfully: %s' % (pathname, err))) elif (proc.returncode > 0): iaslog(('Received non-zero exit code: ' + str(err))) return False except OSError as err: iaslog(('Failure running script: ' + str(err))) return False return True
def runrootscript(pathname, donotwait): if g_dry_run: iaslog(('Dry run executing root script: %s' % pathname)) return True try: if donotwait: iaslog('Do not wait triggered') proc = subprocess.Popen(pathname) iaslog(('Running Script: %s ' % str(pathname))) else: proc = subprocess.Popen(pathname, stdout=subprocess.PIPE, stderr=subprocess.PIPE) iaslog(('Running Script: %s ' % str(pathname))) (out, err) = proc.communicate() if (err and (proc.returncode == 0)): iaslog(('Output from %s on stderr but ran successfully: %s' % (pathname, err))) elif (proc.returncode > 0): iaslog(('Received non-zero exit code: ' + str(err))) return False except OSError as err: iaslog(('Failure running script: ' + str(err))) return False return True<|docstring|>Runs script located at given pathname<|endoftext|>
c74796352a005b7080d0c39fccb348fbe4cbba9150232351d217b41e96a8c11e
def __init__(self, exp_para, image_para, lithosim_para): '\n Initialization of Neural_ILT_Wrapper\n Args:\n exp_para: experiment-relevant parameters\n image_para: image-relevant parameters\n lithosim_para: lithosim-relevant parameters \n ' print('Launching Neural-ILT on device:', exp_para['device']) self.exp_para = exp_para self.image_para = image_para self.lithosim_para = lithosim_para self.device = exp_para['device'] self.save_mask = exp_para['save_mask'] self.dynamic_beta = exp_para['dynamic_beta'] self.lr = exp_para['lr'] self.beta = exp_para['beta'] self.gamma = exp_para['gamma'] self.refine_iter_num = exp_para['refine_iter_num'] self.step_size = exp_para['step_size'] self.select_by_obj = exp_para['select_by_obj'] self.max_l2 = 1000000000000000.0 self.max_epe = 100000.0 if exp_para['max_l2']: self.max_l2 = exp_para['max_l2'] if exp_para['max_epe']: self.max_epe = exp_para['max_epe'] print('-------- Loading Neural-ILT Model & Data --------') print('MODEL:', self.exp_para['ilt_model_path']) if self.exp_para['data_set_name']: print('DATASET:', self.exp_para['data_set_name']) self.kernels_root = self.lithosim_para['kernels_root'] self.kernels = torch.load(os.path.join(self.kernels_root, 'kernel_focus_tensor.pt'), map_location=self.device) self.kernels_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_focus_tensor.pt'), map_location=self.device) self.kernels_def = torch.load(os.path.join(self.kernels_root, 'kernel_defocus_tensor.pt'), map_location=self.device) self.kernels_def_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_defocus_tensor.pt'), map_location=self.device) self.weight = torch.load(os.path.join(self.kernels_root, 'weight_focus_tensor.pt'), map_location=self.device) self.weight_def = torch.load(os.path.join(self.kernels_root, 'weight_defocus_tensor.pt'), map_location=self.device) self.load_in_backone_model = unet_torch.UNet(n_class=1, in_channels=1).to(self.device) self.load_in_backone_model.load_state_dict(torch.load(self.exp_para['ilt_model_path'], map_location=self.device)) self.refine_backbone_model = ILTNet(1, self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=False, report_epe=True, in_channels=1).to(self.device) self.cplx_loss_layer = ilt_loss_layer(self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=True).to(self.device) pretrain_dict = self.load_in_backone_model.state_dict() self.model_dict = self.refine_backbone_model.state_dict() pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if (k in self.model_dict)} for param in self.refine_backbone_model.parameters(): param.requires_grad = True self.model_dict.update(pretrain_dict) self.refine_backbone_model.load_state_dict(self.model_dict) self.optimizer_ft = optim.Adam(self.refine_backbone_model.parameters(), lr=self.lr) self.opt_init_state = self.optimizer_ft.state_dict()
Initialization of Neural_ILT_Wrapper Args: exp_para: experiment-relevant parameters image_para: image-relevant parameters lithosim_para: lithosim-relevant parameters
neural_ilt.py
__init__
cuhk-eda/neural-ilt
14
python
def __init__(self, exp_para, image_para, lithosim_para): '\n Initialization of Neural_ILT_Wrapper\n Args:\n exp_para: experiment-relevant parameters\n image_para: image-relevant parameters\n lithosim_para: lithosim-relevant parameters \n ' print('Launching Neural-ILT on device:', exp_para['device']) self.exp_para = exp_para self.image_para = image_para self.lithosim_para = lithosim_para self.device = exp_para['device'] self.save_mask = exp_para['save_mask'] self.dynamic_beta = exp_para['dynamic_beta'] self.lr = exp_para['lr'] self.beta = exp_para['beta'] self.gamma = exp_para['gamma'] self.refine_iter_num = exp_para['refine_iter_num'] self.step_size = exp_para['step_size'] self.select_by_obj = exp_para['select_by_obj'] self.max_l2 = 1000000000000000.0 self.max_epe = 100000.0 if exp_para['max_l2']: self.max_l2 = exp_para['max_l2'] if exp_para['max_epe']: self.max_epe = exp_para['max_epe'] print('-------- Loading Neural-ILT Model & Data --------') print('MODEL:', self.exp_para['ilt_model_path']) if self.exp_para['data_set_name']: print('DATASET:', self.exp_para['data_set_name']) self.kernels_root = self.lithosim_para['kernels_root'] self.kernels = torch.load(os.path.join(self.kernels_root, 'kernel_focus_tensor.pt'), map_location=self.device) self.kernels_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_focus_tensor.pt'), map_location=self.device) self.kernels_def = torch.load(os.path.join(self.kernels_root, 'kernel_defocus_tensor.pt'), map_location=self.device) self.kernels_def_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_defocus_tensor.pt'), map_location=self.device) self.weight = torch.load(os.path.join(self.kernels_root, 'weight_focus_tensor.pt'), map_location=self.device) self.weight_def = torch.load(os.path.join(self.kernels_root, 'weight_defocus_tensor.pt'), map_location=self.device) self.load_in_backone_model = unet_torch.UNet(n_class=1, in_channels=1).to(self.device) self.load_in_backone_model.load_state_dict(torch.load(self.exp_para['ilt_model_path'], map_location=self.device)) self.refine_backbone_model = ILTNet(1, self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=False, report_epe=True, in_channels=1).to(self.device) self.cplx_loss_layer = ilt_loss_layer(self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=True).to(self.device) pretrain_dict = self.load_in_backone_model.state_dict() self.model_dict = self.refine_backbone_model.state_dict() pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if (k in self.model_dict)} for param in self.refine_backbone_model.parameters(): param.requires_grad = True self.model_dict.update(pretrain_dict) self.refine_backbone_model.load_state_dict(self.model_dict) self.optimizer_ft = optim.Adam(self.refine_backbone_model.parameters(), lr=self.lr) self.opt_init_state = self.optimizer_ft.state_dict()
def __init__(self, exp_para, image_para, lithosim_para): '\n Initialization of Neural_ILT_Wrapper\n Args:\n exp_para: experiment-relevant parameters\n image_para: image-relevant parameters\n lithosim_para: lithosim-relevant parameters \n ' print('Launching Neural-ILT on device:', exp_para['device']) self.exp_para = exp_para self.image_para = image_para self.lithosim_para = lithosim_para self.device = exp_para['device'] self.save_mask = exp_para['save_mask'] self.dynamic_beta = exp_para['dynamic_beta'] self.lr = exp_para['lr'] self.beta = exp_para['beta'] self.gamma = exp_para['gamma'] self.refine_iter_num = exp_para['refine_iter_num'] self.step_size = exp_para['step_size'] self.select_by_obj = exp_para['select_by_obj'] self.max_l2 = 1000000000000000.0 self.max_epe = 100000.0 if exp_para['max_l2']: self.max_l2 = exp_para['max_l2'] if exp_para['max_epe']: self.max_epe = exp_para['max_epe'] print('-------- Loading Neural-ILT Model & Data --------') print('MODEL:', self.exp_para['ilt_model_path']) if self.exp_para['data_set_name']: print('DATASET:', self.exp_para['data_set_name']) self.kernels_root = self.lithosim_para['kernels_root'] self.kernels = torch.load(os.path.join(self.kernels_root, 'kernel_focus_tensor.pt'), map_location=self.device) self.kernels_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_focus_tensor.pt'), map_location=self.device) self.kernels_def = torch.load(os.path.join(self.kernels_root, 'kernel_defocus_tensor.pt'), map_location=self.device) self.kernels_def_ct = torch.load(os.path.join(self.kernels_root, 'kernel_ct_defocus_tensor.pt'), map_location=self.device) self.weight = torch.load(os.path.join(self.kernels_root, 'weight_focus_tensor.pt'), map_location=self.device) self.weight_def = torch.load(os.path.join(self.kernels_root, 'weight_defocus_tensor.pt'), map_location=self.device) self.load_in_backone_model = unet_torch.UNet(n_class=1, in_channels=1).to(self.device) self.load_in_backone_model.load_state_dict(torch.load(self.exp_para['ilt_model_path'], map_location=self.device)) self.refine_backbone_model = ILTNet(1, self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=False, report_epe=True, in_channels=1).to(self.device) self.cplx_loss_layer = ilt_loss_layer(self.kernels, self.kernels_ct, self.kernels_def, self.kernels_def_ct, self.weight, self.weight_def, cplx_obj=True).to(self.device) pretrain_dict = self.load_in_backone_model.state_dict() self.model_dict = self.refine_backbone_model.state_dict() pretrain_dict = {k: v for (k, v) in pretrain_dict.items() if (k in self.model_dict)} for param in self.refine_backbone_model.parameters(): param.requires_grad = True self.model_dict.update(pretrain_dict) self.refine_backbone_model.load_state_dict(self.model_dict) self.optimizer_ft = optim.Adam(self.refine_backbone_model.parameters(), lr=self.lr) self.opt_init_state = self.optimizer_ft.state_dict()<|docstring|>Initialization of Neural_ILT_Wrapper Args: exp_para: experiment-relevant parameters image_para: image-relevant parameters lithosim_para: lithosim-relevant parameters<|endoftext|>
771fef246d4ef5cce85cb1ca0eca7721b193c38962c8fcfe54a6906aa1d5df28
def MTRmap(*argv): ' Calculate MTR from a MT "on" and a MT "off" acquisition \n\t \n\n\t INTERFACES\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output)\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output,mask_nifti)\n\n\t \n\t PARAMETERS\n\t - mton_nifti: path of a Nifti file storing the 3D MT "on" image (with off-res. pulse)\n\t - mtoff_nifti: path of a Nifti file storing the 3D MT "off" image (without off-res. pulse)\n\t - mtr_output: path of the Nifti file that will store the 3D output MTR image (saved as a \n\t\t\t double-precision floating point image FLOAT64); such an output map is\n\t\t\t calculated as\n\t\t\t\n\t\t\t\tMTR = 100 * (MToff - MTon)/MToff\n\t\t\n\t\t\t above, MTon is the image where the off-resonance pulse is played (so is "on")\n\t\t\t while MToff is the image where the off-resonance pulse is not played (so is "off")\n\n\t - mask_nifti: path of a Nifti file storting a mask (MTR will be calculated only where\n\t\t\t mask_nifti equals 1; 0 will be set in the MTR output map otherwise)\n\t \n\t Dependencies (Python packages): nibabel, numpy (other than standard library).\n\t \n\t References: "T1, T2 relaxation and magnetization transfer in tissue at 3T", Stanisz GJ,\n\t\t Magnetic Resonance in Medicine (2005), 54:507-512\n\t \n\t Author: Francesco Grussu, University College London\n\t\t CDSQuaMRI Project \n\t\t <[email protected]> <[email protected]>' Nargv = len(argv) mton_nifti = argv[0] mtoff_nifti = argv[1] mtr_output = argv[2] print(' ... loading input data') try: mton_obj = nib.load(mton_nifti) except: print('') print('ERROR: the 3D input MT "on" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mton_nifti)) print('') sys.exit(1) mton_data = mton_obj.get_fdata() imgsize = mton_data.shape imgsize = np.array(imgsize) if (imgsize.size != 3): print('') print('ERROR: the 3D input MT "on" file {} is not a 3D NIFTI. Exiting with 1.'.format(mton_nifti)) print('') sys.exit(1) try: mtoff_obj = nib.load(mtoff_nifti) except: print('') print('ERROR: the 3D input MT "off" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mtoff_nifti)) print('') sys.exit(1) mtoff_data = mtoff_obj.get_fdata() mton_header = mton_obj.header mton_affine = mton_header.get_best_affine() mton_dims = mton_obj.shape mtoff_header = mtoff_obj.header mtoff_affine = mtoff_header.get_best_affine() mtoff_dims = mtoff_obj.shape mtoff_size = mtoff_data.shape mtoff_size = np.array(mtoff_size) if (mtoff_size.size != 3): print('') print('ERROR: the 3D input MT "off" file {} is not a 3D NIFTI. Exiting with 1.'.format(mtoff_nifti)) print('') sys.exit(1) elif ((np.sum((mton_affine == mtoff_affine)) != 16) or (mton_dims[0] != mtoff_dims[0]) or (mton_dims[1] != mtoff_dims[1]) or (mton_dims[2] != mtoff_dims[2])): print('') print('ERROR: the geometry of the MT on file {} and the MT off file {} do not match. Exiting with 1.'.format(mton_nifti, mtoff_nifti)) print('') sys.exit(1) if (Nargv == 4): got_mask = True mask_nifti = argv[3] try: mask_obj = nib.load(mask_nifti) except: print('') print('ERROR: the mask file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mask_nifti)) print('') sys.exit(1) mask_data = mask_obj.get_fdata() mask_size = mask_data.shape mask_size = np.array(mask_size) mask_header = mask_obj.header mask_affine = mask_header.get_best_affine() mask_dims = mask_obj.shape if (mask_size.size != 3): print('') print('WARNING: the mask file {} is not a 3D NIFTI file. Ignoring mask...'.format(mask_nifti)) print('') mask_data = np.ones(imgsize[0:3], 'float64') elif ((np.sum((mton_affine == mask_affine)) != 16) or (mton_dims[0] != mask_dims[0]) or (mton_dims[1] != mask_dims[1]) or (mton_dims[2] != mask_dims[2])): print('') print('WARNING: the geometry of the the mask file {} does not match that of the MT data. Ignoring mask...'.format(mask_nifti)) print('') mask_data = np.ones(imgsize[0:3], 'float64') else: mask_data = np.array(mask_data, 'float64') mask_data[(mask_data > 1)] = 1 mask_data[(mask_data < 0)] = 0 else: got_mask = False print(' ... calculating MTR') mton_data = np.array(mton_data, 'float64') mtoff_data = np.array(mtoff_data, 'float64') warnings.filterwarnings('ignore') mtr_map = ((100 * (mtoff_data - mton_data)) / mtoff_data) mtr_map[np.isnan(mtr_map)] = 0.0 mtr_map[np.isinf(mtr_map)] = 0.0 print(' ... substituting any nan and inf values with 0') if (got_mask == True): mtr_map[(mask_data == 0)] = 0 print(' ... saving output file') buffer_header = mton_obj.header buffer_header.set_data_dtype('float64') mtr_obj = nib.Nifti1Image(mtr_map, mton_obj.affine, buffer_header) nib.save(mtr_obj, mtr_output) print('')
Calculate MTR from a MT "on" and a MT "off" acquisition INTERFACES MTRmap(mton_nifti,mtoff_nifti,mtr_output) MTRmap(mton_nifti,mtoff_nifti,mtr_output,mask_nifti) PARAMETERS - mton_nifti: path of a Nifti file storing the 3D MT "on" image (with off-res. pulse) - mtoff_nifti: path of a Nifti file storing the 3D MT "off" image (without off-res. pulse) - mtr_output: path of the Nifti file that will store the 3D output MTR image (saved as a double-precision floating point image FLOAT64); such an output map is calculated as MTR = 100 * (MToff - MTon)/MToff above, MTon is the image where the off-resonance pulse is played (so is "on") while MToff is the image where the off-resonance pulse is not played (so is "off") - mask_nifti: path of a Nifti file storting a mask (MTR will be calculated only where mask_nifti equals 1; 0 will be set in the MTR output map otherwise) Dependencies (Python packages): nibabel, numpy (other than standard library). References: "T1, T2 relaxation and magnetization transfer in tissue at 3T", Stanisz GJ, Magnetic Resonance in Medicine (2005), 54:507-512 Author: Francesco Grussu, University College London CDSQuaMRI Project <[email protected]> <[email protected]>
myrelax/getMTR.py
MTRmap
fragrussu/MyRelax
3
python
def MTRmap(*argv): ' Calculate MTR from a MT "on" and a MT "off" acquisition \n\t \n\n\t INTERFACES\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output)\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output,mask_nifti)\n\n\t \n\t PARAMETERS\n\t - mton_nifti: path of a Nifti file storing the 3D MT "on" image (with off-res. pulse)\n\t - mtoff_nifti: path of a Nifti file storing the 3D MT "off" image (without off-res. pulse)\n\t - mtr_output: path of the Nifti file that will store the 3D output MTR image (saved as a \n\t\t\t double-precision floating point image FLOAT64); such an output map is\n\t\t\t calculated as\n\t\t\t\n\t\t\t\tMTR = 100 * (MToff - MTon)/MToff\n\t\t\n\t\t\t above, MTon is the image where the off-resonance pulse is played (so is "on")\n\t\t\t while MToff is the image where the off-resonance pulse is not played (so is "off")\n\n\t - mask_nifti: path of a Nifti file storting a mask (MTR will be calculated only where\n\t\t\t mask_nifti equals 1; 0 will be set in the MTR output map otherwise)\n\t \n\t Dependencies (Python packages): nibabel, numpy (other than standard library).\n\t \n\t References: "T1, T2 relaxation and magnetization transfer in tissue at 3T", Stanisz GJ,\n\t\t Magnetic Resonance in Medicine (2005), 54:507-512\n\t \n\t Author: Francesco Grussu, University College London\n\t\t CDSQuaMRI Project \n\t\t <[email protected]> <[email protected]>' Nargv = len(argv) mton_nifti = argv[0] mtoff_nifti = argv[1] mtr_output = argv[2] print(' ... loading input data') try: mton_obj = nib.load(mton_nifti) except: print() print('ERROR: the 3D input MT "on" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mton_nifti)) print() sys.exit(1) mton_data = mton_obj.get_fdata() imgsize = mton_data.shape imgsize = np.array(imgsize) if (imgsize.size != 3): print() print('ERROR: the 3D input MT "on" file {} is not a 3D NIFTI. Exiting with 1.'.format(mton_nifti)) print() sys.exit(1) try: mtoff_obj = nib.load(mtoff_nifti) except: print() print('ERROR: the 3D input MT "off" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mtoff_nifti)) print() sys.exit(1) mtoff_data = mtoff_obj.get_fdata() mton_header = mton_obj.header mton_affine = mton_header.get_best_affine() mton_dims = mton_obj.shape mtoff_header = mtoff_obj.header mtoff_affine = mtoff_header.get_best_affine() mtoff_dims = mtoff_obj.shape mtoff_size = mtoff_data.shape mtoff_size = np.array(mtoff_size) if (mtoff_size.size != 3): print() print('ERROR: the 3D input MT "off" file {} is not a 3D NIFTI. Exiting with 1.'.format(mtoff_nifti)) print() sys.exit(1) elif ((np.sum((mton_affine == mtoff_affine)) != 16) or (mton_dims[0] != mtoff_dims[0]) or (mton_dims[1] != mtoff_dims[1]) or (mton_dims[2] != mtoff_dims[2])): print() print('ERROR: the geometry of the MT on file {} and the MT off file {} do not match. Exiting with 1.'.format(mton_nifti, mtoff_nifti)) print() sys.exit(1) if (Nargv == 4): got_mask = True mask_nifti = argv[3] try: mask_obj = nib.load(mask_nifti) except: print() print('ERROR: the mask file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mask_nifti)) print() sys.exit(1) mask_data = mask_obj.get_fdata() mask_size = mask_data.shape mask_size = np.array(mask_size) mask_header = mask_obj.header mask_affine = mask_header.get_best_affine() mask_dims = mask_obj.shape if (mask_size.size != 3): print() print('WARNING: the mask file {} is not a 3D NIFTI file. Ignoring mask...'.format(mask_nifti)) print() mask_data = np.ones(imgsize[0:3], 'float64') elif ((np.sum((mton_affine == mask_affine)) != 16) or (mton_dims[0] != mask_dims[0]) or (mton_dims[1] != mask_dims[1]) or (mton_dims[2] != mask_dims[2])): print() print('WARNING: the geometry of the the mask file {} does not match that of the MT data. Ignoring mask...'.format(mask_nifti)) print() mask_data = np.ones(imgsize[0:3], 'float64') else: mask_data = np.array(mask_data, 'float64') mask_data[(mask_data > 1)] = 1 mask_data[(mask_data < 0)] = 0 else: got_mask = False print(' ... calculating MTR') mton_data = np.array(mton_data, 'float64') mtoff_data = np.array(mtoff_data, 'float64') warnings.filterwarnings('ignore') mtr_map = ((100 * (mtoff_data - mton_data)) / mtoff_data) mtr_map[np.isnan(mtr_map)] = 0.0 mtr_map[np.isinf(mtr_map)] = 0.0 print(' ... substituting any nan and inf values with 0') if (got_mask == True): mtr_map[(mask_data == 0)] = 0 print(' ... saving output file') buffer_header = mton_obj.header buffer_header.set_data_dtype('float64') mtr_obj = nib.Nifti1Image(mtr_map, mton_obj.affine, buffer_header) nib.save(mtr_obj, mtr_output) print()
def MTRmap(*argv): ' Calculate MTR from a MT "on" and a MT "off" acquisition \n\t \n\n\t INTERFACES\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output)\n\t MTRmap(mton_nifti,mtoff_nifti,mtr_output,mask_nifti)\n\n\t \n\t PARAMETERS\n\t - mton_nifti: path of a Nifti file storing the 3D MT "on" image (with off-res. pulse)\n\t - mtoff_nifti: path of a Nifti file storing the 3D MT "off" image (without off-res. pulse)\n\t - mtr_output: path of the Nifti file that will store the 3D output MTR image (saved as a \n\t\t\t double-precision floating point image FLOAT64); such an output map is\n\t\t\t calculated as\n\t\t\t\n\t\t\t\tMTR = 100 * (MToff - MTon)/MToff\n\t\t\n\t\t\t above, MTon is the image where the off-resonance pulse is played (so is "on")\n\t\t\t while MToff is the image where the off-resonance pulse is not played (so is "off")\n\n\t - mask_nifti: path of a Nifti file storting a mask (MTR will be calculated only where\n\t\t\t mask_nifti equals 1; 0 will be set in the MTR output map otherwise)\n\t \n\t Dependencies (Python packages): nibabel, numpy (other than standard library).\n\t \n\t References: "T1, T2 relaxation and magnetization transfer in tissue at 3T", Stanisz GJ,\n\t\t Magnetic Resonance in Medicine (2005), 54:507-512\n\t \n\t Author: Francesco Grussu, University College London\n\t\t CDSQuaMRI Project \n\t\t <[email protected]> <[email protected]>' Nargv = len(argv) mton_nifti = argv[0] mtoff_nifti = argv[1] mtr_output = argv[2] print(' ... loading input data') try: mton_obj = nib.load(mton_nifti) except: print() print('ERROR: the 3D input MT "on" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mton_nifti)) print() sys.exit(1) mton_data = mton_obj.get_fdata() imgsize = mton_data.shape imgsize = np.array(imgsize) if (imgsize.size != 3): print() print('ERROR: the 3D input MT "on" file {} is not a 3D NIFTI. Exiting with 1.'.format(mton_nifti)) print() sys.exit(1) try: mtoff_obj = nib.load(mtoff_nifti) except: print() print('ERROR: the 3D input MT "off" file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mtoff_nifti)) print() sys.exit(1) mtoff_data = mtoff_obj.get_fdata() mton_header = mton_obj.header mton_affine = mton_header.get_best_affine() mton_dims = mton_obj.shape mtoff_header = mtoff_obj.header mtoff_affine = mtoff_header.get_best_affine() mtoff_dims = mtoff_obj.shape mtoff_size = mtoff_data.shape mtoff_size = np.array(mtoff_size) if (mtoff_size.size != 3): print() print('ERROR: the 3D input MT "off" file {} is not a 3D NIFTI. Exiting with 1.'.format(mtoff_nifti)) print() sys.exit(1) elif ((np.sum((mton_affine == mtoff_affine)) != 16) or (mton_dims[0] != mtoff_dims[0]) or (mton_dims[1] != mtoff_dims[1]) or (mton_dims[2] != mtoff_dims[2])): print() print('ERROR: the geometry of the MT on file {} and the MT off file {} do not match. Exiting with 1.'.format(mton_nifti, mtoff_nifti)) print() sys.exit(1) if (Nargv == 4): got_mask = True mask_nifti = argv[3] try: mask_obj = nib.load(mask_nifti) except: print() print('ERROR: the mask file {} does not exist or is not in NIFTI format. Exiting with 1.'.format(mask_nifti)) print() sys.exit(1) mask_data = mask_obj.get_fdata() mask_size = mask_data.shape mask_size = np.array(mask_size) mask_header = mask_obj.header mask_affine = mask_header.get_best_affine() mask_dims = mask_obj.shape if (mask_size.size != 3): print() print('WARNING: the mask file {} is not a 3D NIFTI file. Ignoring mask...'.format(mask_nifti)) print() mask_data = np.ones(imgsize[0:3], 'float64') elif ((np.sum((mton_affine == mask_affine)) != 16) or (mton_dims[0] != mask_dims[0]) or (mton_dims[1] != mask_dims[1]) or (mton_dims[2] != mask_dims[2])): print() print('WARNING: the geometry of the the mask file {} does not match that of the MT data. Ignoring mask...'.format(mask_nifti)) print() mask_data = np.ones(imgsize[0:3], 'float64') else: mask_data = np.array(mask_data, 'float64') mask_data[(mask_data > 1)] = 1 mask_data[(mask_data < 0)] = 0 else: got_mask = False print(' ... calculating MTR') mton_data = np.array(mton_data, 'float64') mtoff_data = np.array(mtoff_data, 'float64') warnings.filterwarnings('ignore') mtr_map = ((100 * (mtoff_data - mton_data)) / mtoff_data) mtr_map[np.isnan(mtr_map)] = 0.0 mtr_map[np.isinf(mtr_map)] = 0.0 print(' ... substituting any nan and inf values with 0') if (got_mask == True): mtr_map[(mask_data == 0)] = 0 print(' ... saving output file') buffer_header = mton_obj.header buffer_header.set_data_dtype('float64') mtr_obj = nib.Nifti1Image(mtr_map, mton_obj.affine, buffer_header) nib.save(mtr_obj, mtr_output) print()<|docstring|>Calculate MTR from a MT "on" and a MT "off" acquisition INTERFACES MTRmap(mton_nifti,mtoff_nifti,mtr_output) MTRmap(mton_nifti,mtoff_nifti,mtr_output,mask_nifti) PARAMETERS - mton_nifti: path of a Nifti file storing the 3D MT "on" image (with off-res. pulse) - mtoff_nifti: path of a Nifti file storing the 3D MT "off" image (without off-res. pulse) - mtr_output: path of the Nifti file that will store the 3D output MTR image (saved as a double-precision floating point image FLOAT64); such an output map is calculated as MTR = 100 * (MToff - MTon)/MToff above, MTon is the image where the off-resonance pulse is played (so is "on") while MToff is the image where the off-resonance pulse is not played (so is "off") - mask_nifti: path of a Nifti file storting a mask (MTR will be calculated only where mask_nifti equals 1; 0 will be set in the MTR output map otherwise) Dependencies (Python packages): nibabel, numpy (other than standard library). References: "T1, T2 relaxation and magnetization transfer in tissue at 3T", Stanisz GJ, Magnetic Resonance in Medicine (2005), 54:507-512 Author: Francesco Grussu, University College London CDSQuaMRI Project <[email protected]> <[email protected]><|endoftext|>
fc1871101f045b8ec772cabd82c92c3767fc1996a1bb8e021f0da5d828fbb4c9
def add_data(self, field, no_ghost=False): 'Adds a source of data for the block collection.\n\n Given a `data_source` and a `field` to populate from, adds the data\n to the block collection so that is able to be rendered.\n\n Parameters\n ----------\n data_source : YTRegion\n A YTRegion object to use as a data source.\n field : string\n A field to populate from.\n no_ghost : bool (False)\n Should we speed things up by skipping ghost zone generation?\n ' self.data_source.tiles.set_fields([field], [False], no_ghost=no_ghost) (vert, dx, le, re) = ([], [], [], []) self.min_val = (+ np.inf) self.max_val = (- np.inf) if self.scale: left_min = (np.ones(3, 'f8') * np.inf) right_max = (np.ones(3, 'f8') * (- np.inf)) for block in self.data_source.tiles.traverse(): np.minimum(left_min, block.LeftEdge, left_min) np.maximum(right_max, block.LeftEdge, right_max) scale = (right_max.max() - left_min.min()) for block in self.data_source.tiles.traverse(): block.LeftEdge -= left_min block.LeftEdge /= scale block.RightEdge -= left_min block.RightEdge /= scale for (i, block) in enumerate(self.data_source.tiles.traverse()): self.min_val = min(self.min_val, np.nanmin(np.abs(block.my_data[0])).min()) self.max_val = max(self.max_val, np.nanmax(np.abs(block.my_data[0])).max()) self.blocks[id(block)] = (i, block) vert.append([1.0, 1.0, 1.0, 1.0]) dds = ((block.RightEdge - block.LeftEdge) / block.source_mask.shape) dx.append(dds.tolist()) le.append(block.LeftEdge.tolist()) re.append(block.RightEdge.tolist()) for (g, node, (sl, _dims, _gi)) in self.data_source.tiles.slice_traverse(): block = node.data self.blocks_by_grid[(g.id - g._id_offset)].append((id(block), i)) self.grids_by_block[id(node.data)] = ((g.id - g._id_offset), sl) if hasattr(self.min_val, 'in_units'): self.min_val = self.min_val.d if hasattr(self.max_val, 'in_units'): self.max_val = self.max_val.d LE = np.array([b.LeftEdge for (i, b) in self.blocks.values()]).min(axis=0) RE = np.array([b.RightEdge for (i, b) in self.blocks.values()]).max(axis=0) self.diagonal = np.sqrt(((RE - LE) ** 2).sum()) vert = np.array(vert, dtype='f4') dx = np.array(dx, dtype='f4') le = np.array(le, dtype='f4') re = np.array(re, dtype='f4') self.vertex_array.attributes.append(VertexAttribute(name='model_vertex', data=vert)) self.vertex_array.attributes.append(VertexAttribute(name='in_dx', data=dx)) self.vertex_array.attributes.append(VertexAttribute(name='in_left_edge', data=le)) self.vertex_array.attributes.append(VertexAttribute(name='in_right_edge', data=re)) self._load_textures()
Adds a source of data for the block collection. Given a `data_source` and a `field` to populate from, adds the data to the block collection so that is able to be rendered. Parameters ---------- data_source : YTRegion A YTRegion object to use as a data source. field : string A field to populate from. no_ghost : bool (False) Should we speed things up by skipping ghost zone generation?
yt_idv/scene_data/block_collection.py
add_data
chrishavlin/tempidv
3
python
def add_data(self, field, no_ghost=False): 'Adds a source of data for the block collection.\n\n Given a `data_source` and a `field` to populate from, adds the data\n to the block collection so that is able to be rendered.\n\n Parameters\n ----------\n data_source : YTRegion\n A YTRegion object to use as a data source.\n field : string\n A field to populate from.\n no_ghost : bool (False)\n Should we speed things up by skipping ghost zone generation?\n ' self.data_source.tiles.set_fields([field], [False], no_ghost=no_ghost) (vert, dx, le, re) = ([], [], [], []) self.min_val = (+ np.inf) self.max_val = (- np.inf) if self.scale: left_min = (np.ones(3, 'f8') * np.inf) right_max = (np.ones(3, 'f8') * (- np.inf)) for block in self.data_source.tiles.traverse(): np.minimum(left_min, block.LeftEdge, left_min) np.maximum(right_max, block.LeftEdge, right_max) scale = (right_max.max() - left_min.min()) for block in self.data_source.tiles.traverse(): block.LeftEdge -= left_min block.LeftEdge /= scale block.RightEdge -= left_min block.RightEdge /= scale for (i, block) in enumerate(self.data_source.tiles.traverse()): self.min_val = min(self.min_val, np.nanmin(np.abs(block.my_data[0])).min()) self.max_val = max(self.max_val, np.nanmax(np.abs(block.my_data[0])).max()) self.blocks[id(block)] = (i, block) vert.append([1.0, 1.0, 1.0, 1.0]) dds = ((block.RightEdge - block.LeftEdge) / block.source_mask.shape) dx.append(dds.tolist()) le.append(block.LeftEdge.tolist()) re.append(block.RightEdge.tolist()) for (g, node, (sl, _dims, _gi)) in self.data_source.tiles.slice_traverse(): block = node.data self.blocks_by_grid[(g.id - g._id_offset)].append((id(block), i)) self.grids_by_block[id(node.data)] = ((g.id - g._id_offset), sl) if hasattr(self.min_val, 'in_units'): self.min_val = self.min_val.d if hasattr(self.max_val, 'in_units'): self.max_val = self.max_val.d LE = np.array([b.LeftEdge for (i, b) in self.blocks.values()]).min(axis=0) RE = np.array([b.RightEdge for (i, b) in self.blocks.values()]).max(axis=0) self.diagonal = np.sqrt(((RE - LE) ** 2).sum()) vert = np.array(vert, dtype='f4') dx = np.array(dx, dtype='f4') le = np.array(le, dtype='f4') re = np.array(re, dtype='f4') self.vertex_array.attributes.append(VertexAttribute(name='model_vertex', data=vert)) self.vertex_array.attributes.append(VertexAttribute(name='in_dx', data=dx)) self.vertex_array.attributes.append(VertexAttribute(name='in_left_edge', data=le)) self.vertex_array.attributes.append(VertexAttribute(name='in_right_edge', data=re)) self._load_textures()
def add_data(self, field, no_ghost=False): 'Adds a source of data for the block collection.\n\n Given a `data_source` and a `field` to populate from, adds the data\n to the block collection so that is able to be rendered.\n\n Parameters\n ----------\n data_source : YTRegion\n A YTRegion object to use as a data source.\n field : string\n A field to populate from.\n no_ghost : bool (False)\n Should we speed things up by skipping ghost zone generation?\n ' self.data_source.tiles.set_fields([field], [False], no_ghost=no_ghost) (vert, dx, le, re) = ([], [], [], []) self.min_val = (+ np.inf) self.max_val = (- np.inf) if self.scale: left_min = (np.ones(3, 'f8') * np.inf) right_max = (np.ones(3, 'f8') * (- np.inf)) for block in self.data_source.tiles.traverse(): np.minimum(left_min, block.LeftEdge, left_min) np.maximum(right_max, block.LeftEdge, right_max) scale = (right_max.max() - left_min.min()) for block in self.data_source.tiles.traverse(): block.LeftEdge -= left_min block.LeftEdge /= scale block.RightEdge -= left_min block.RightEdge /= scale for (i, block) in enumerate(self.data_source.tiles.traverse()): self.min_val = min(self.min_val, np.nanmin(np.abs(block.my_data[0])).min()) self.max_val = max(self.max_val, np.nanmax(np.abs(block.my_data[0])).max()) self.blocks[id(block)] = (i, block) vert.append([1.0, 1.0, 1.0, 1.0]) dds = ((block.RightEdge - block.LeftEdge) / block.source_mask.shape) dx.append(dds.tolist()) le.append(block.LeftEdge.tolist()) re.append(block.RightEdge.tolist()) for (g, node, (sl, _dims, _gi)) in self.data_source.tiles.slice_traverse(): block = node.data self.blocks_by_grid[(g.id - g._id_offset)].append((id(block), i)) self.grids_by_block[id(node.data)] = ((g.id - g._id_offset), sl) if hasattr(self.min_val, 'in_units'): self.min_val = self.min_val.d if hasattr(self.max_val, 'in_units'): self.max_val = self.max_val.d LE = np.array([b.LeftEdge for (i, b) in self.blocks.values()]).min(axis=0) RE = np.array([b.RightEdge for (i, b) in self.blocks.values()]).max(axis=0) self.diagonal = np.sqrt(((RE - LE) ** 2).sum()) vert = np.array(vert, dtype='f4') dx = np.array(dx, dtype='f4') le = np.array(le, dtype='f4') re = np.array(re, dtype='f4') self.vertex_array.attributes.append(VertexAttribute(name='model_vertex', data=vert)) self.vertex_array.attributes.append(VertexAttribute(name='in_dx', data=dx)) self.vertex_array.attributes.append(VertexAttribute(name='in_left_edge', data=le)) self.vertex_array.attributes.append(VertexAttribute(name='in_right_edge', data=re)) self._load_textures()<|docstring|>Adds a source of data for the block collection. Given a `data_source` and a `field` to populate from, adds the data to the block collection so that is able to be rendered. Parameters ---------- data_source : YTRegion A YTRegion object to use as a data source. field : string A field to populate from. no_ghost : bool (False) Should we speed things up by skipping ghost zone generation?<|endoftext|>
29b8107c31ee3490c5a78edab3bc63a2a384849ecf61ccfc266540675fcd9bc4
def eval_fields_3d_no_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): for i_basis_3 in range((1 + f_p3)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_quad_3 in range(k3): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline = ((spline_1 * spline_2) * spline_3) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (spline * coeff_fields)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X,Y and Z directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function
psydac/core/kernels.py
eval_fields_3d_no_weights
mayuri-dhote/psydac
0
python
def eval_fields_3d_no_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): for i_basis_3 in range((1 + f_p3)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_quad_3 in range(k3): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline = ((spline_1 * spline_2) * spline_3) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (spline * coeff_fields)
def eval_fields_3d_no_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): for i_basis_3 in range((1 + f_p3)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_quad_3 in range(k3): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline = ((spline_1 * spline_2) * spline_3) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (spline * coeff_fields)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X,Y and Z directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function<|endoftext|>
34cc5d0e08e3e4a21d096ec44959a7a5b961c2d77e82cc4dcbf1547abcee743b
def eval_fields_2d_no_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', glob_arr_coeff: 'float[:,:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X and Y directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (spline * coeff_fields)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X and Y directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function
psydac/core/kernels.py
eval_fields_2d_no_weights
mayuri-dhote/psydac
0
python
def eval_fields_2d_no_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', glob_arr_coeff: 'float[:,:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X and Y directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (spline * coeff_fields)
def eval_fields_2d_no_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', glob_arr_coeff: 'float[:,:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X and Y directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] for i_basis_1 in range((1 + f_p1)): for i_basis_2 in range((1 + f_p2)): coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] for i_quad_1 in range(k1): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_quad_2 in range(k2): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (spline * coeff_fields)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X and Y directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function<|endoftext|>
732a66e6c7df5071b012a58bf96585620767451b0d3fa603e4ff74bf7190fc27
def eval_fields_3d_weighted(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', global_arr_weights: 'float[:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_fields = np.zeros((k1, k2, k3, out_fields.shape[3])) arr_weights = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] arr_coeff_weights[(:, :, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_fields[(:, :, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] splines = ((spline_1 * spline_2) * spline_3) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] += ((splines * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (splines * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (fields / weight)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X,Y and Z directions global_arr_weights: ndarray of float Coefficients of the weight field in the X,Y and Z directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function
psydac/core/kernels.py
eval_fields_3d_weighted
mayuri-dhote/psydac
0
python
def eval_fields_3d_weighted(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', global_arr_weights: 'float[:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_fields = np.zeros((k1, k2, k3, out_fields.shape[3])) arr_weights = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] arr_coeff_weights[(:, :, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_fields[(:, :, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] splines = ((spline_1 * spline_2) * spline_3) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] += ((splines * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (splines * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (fields / weight)
def eval_fields_3d_weighted(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', glob_arr_coeff: 'float[:,:,:,:]', global_arr_weights: 'float[:,:,:]', out_fields: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n glob_arr_coeff: ndarray of floats\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of floats\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3), out_fields.shape[3])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_fields = np.zeros((k1, k2, k3, out_fields.shape[3])) arr_weights = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_coeff_fields[(:, :, :, :)] = glob_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3), :)] arr_coeff_weights[(:, :, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_fields[(:, :, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] splines = ((spline_1 * spline_2) * spline_3) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, i_basis_3, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] += ((splines * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (splines * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, i_quad_3, :)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :)] += (fields / weight)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction glob_arr_coeff: ndarray of floats Coefficients of the fields in the X,Y and Z directions global_arr_weights: ndarray of float Coefficients of the weight field in the X,Y and Z directions out_fields: ndarray of floats Evaluated fields, filled with the correct values by the function<|endoftext|>
de090c3d0fcfd5ae6f64132caeca160355fc24b02f3f6d5f124b4db133db09ec
def eval_fields_2d_weighted(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff: 'float[:,:,:]', global_arr_weights: 'float[:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n\n global_basis_1: ndarray of float\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of float\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of int\n Spans in the X direction\n global_spans_2: ndarray of int\n Spans in the Y direction\n\n global_arr_coeff: ndarray of float\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of float\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_fields = np.zeros((k1, k2, out_fields.shape[2])) arr_weights = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = global_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] arr_coeff_weights[(:, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_fields[(:, :, :)] = 0.0 arr_weights[(:, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_fields[(i_quad_1, i_quad_2, :)] += ((spline * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2)] += (spline * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, :)] weight = arr_weights[(i_quad_1, i_quad_2)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (fields / weight)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of float Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of float Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of int Spans in the X direction global_spans_2: ndarray of int Spans in the Y direction global_arr_coeff: ndarray of float Coefficients of the fields in the X,Y and Z directions global_arr_weights: ndarray of float Coefficients of the weight field in the X,Y and Z directions out_fields: ndarray of float Evaluated fields, filled with the correct values by the function
psydac/core/kernels.py
eval_fields_2d_weighted
mayuri-dhote/psydac
0
python
def eval_fields_2d_weighted(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff: 'float[:,:,:]', global_arr_weights: 'float[:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n\n global_basis_1: ndarray of float\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of float\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of int\n Spans in the X direction\n global_spans_2: ndarray of int\n Spans in the Y direction\n\n global_arr_coeff: ndarray of float\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of float\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_fields = np.zeros((k1, k2, out_fields.shape[2])) arr_weights = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = global_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] arr_coeff_weights[(:, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_fields[(:, :, :)] = 0.0 arr_weights[(:, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_fields[(i_quad_1, i_quad_2, :)] += ((spline * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2)] += (spline * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, :)] weight = arr_weights[(i_quad_1, i_quad_2)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (fields / weight)
def eval_fields_2d_weighted(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff: 'float[:,:,:]', global_arr_weights: 'float[:,:]', out_fields: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n\n global_basis_1: ndarray of float\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of float\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of int\n Spans in the X direction\n global_spans_2: ndarray of int\n Spans in the Y direction\n\n global_arr_coeff: ndarray of float\n Coefficients of the fields in the X,Y and Z directions\n\n global_arr_weights: ndarray of float\n Coefficients of the weight field in the X,Y and Z directions\n\n out_fields: ndarray of float\n Evaluated fields, filled with the correct values by the function\n ' arr_coeff_fields = np.zeros(((1 + f_p1), (1 + f_p2), out_fields.shape[2])) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_fields = np.zeros((k1, k2, out_fields.shape[2])) arr_weights = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_coeff_fields[(:, :, :)] = global_arr_coeff[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), :)] arr_coeff_weights[(:, :)] = global_arr_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_fields[(:, :, :)] = 0.0 arr_weights[(:, :)] = 0.0 for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline = (spline_1 * spline_2) coeff_fields = arr_coeff_fields[(i_basis_1, i_basis_2, :)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_fields[(i_quad_1, i_quad_2, :)] += ((spline * coeff_fields) * coeff_weight) arr_weights[(i_quad_1, i_quad_2)] += (spline * coeff_weight) fields = arr_fields[(i_quad_1, i_quad_2, :)] weight = arr_weights[(i_quad_1, i_quad_2)] out_fields[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :)] += (fields / weight)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of float Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of float Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of int Spans in the X direction global_spans_2: ndarray of int Spans in the Y direction global_arr_coeff: ndarray of float Coefficients of the fields in the X,Y and Z directions global_arr_weights: ndarray of float Coefficients of the weight field in the X,Y and Z directions out_fields: ndarray of float Evaluated fields, filled with the correct values by the function<|endoftext|>
1be6215d696a2c45a5a140d595b1186121e6c9000c802add063f5695f868653f
def eval_jac_det_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jac_det: ndarray of floats Jacobian determinant on the grid.
psydac/core/kernels.py
eval_jac_det_3d
mayuri-dhote/psydac
0
python
def eval_jac_det_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))
def eval_jac_det_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jac_det: ndarray of floats Jacobian determinant on the grid.<|endoftext|>
698433e449ac3f6d2b437b27800b308878af7fa3f60305b9b80b6231651cc071
def eval_jac_det_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jac_det: ndarray of floats Jacobian determinant on the grid.
psydac/core/kernels.py
eval_jac_det_2d
mayuri-dhote/psydac
0
python
def eval_jac_det_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))
def eval_jac_det_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid.\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jac_det: ndarray of floats Jacobian determinant on the grid.<|endoftext|>
33a6732d9bdc5a9f597f6c0189b810101c1df395ac3489f3a19c361b02f21a6c
def eval_jac_det_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weights: ndarray of floats Coefficients of the weight field jac_det: ndarray of floats Jacobian determinant on the grid
psydac/core/kernels.py
eval_jac_det_3d_weights
mayuri-dhote/psydac
0
python
def eval_jac_det_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))
def eval_jac_det_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jac_det: 'float[:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3))] = ((((((((+ x_x1) * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1))<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weights: ndarray of floats Coefficients of the weight field jac_det: ndarray of floats Jacobian determinant on the grid<|endoftext|>
21ad3d768a5883c214b26e387d7056737bc7abb4e61a87f65cadf4c367399be5
def eval_jac_det_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jac_det: ndarray of floats Jacobian determinant on the grid
psydac/core/kernels.py
eval_jac_det_2d_weights
mayuri-dhote/psydac
0
python
def eval_jac_det_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))
def eval_jac_det_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jac_det: 'float[:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jac_det: ndarray of floats\n Jacobian determinant on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jac_det[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2))] = ((x_x1 * y_x2) - (x_x2 * y_x1))<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jac_det: ndarray of floats Jacobian determinant on the grid<|endoftext|>
9c986a6c80559f3b4e0f592679678754515908c1f241c3ae13a7a4fa44df7216
def eval_jacobians_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jacobians: ndarray of floats Jacobian matrix on the grid
psydac/core/kernels.py
eval_jacobians_3d
mayuri-dhote/psydac
0
python
def eval_jacobians_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])
def eval_jacobians_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jacobians: ndarray of floats Jacobian matrix on the grid<|endoftext|>
5fbda78a4a41482981de66de0138ff6a29dc327ba8ffd9e0d9c415a9a2430f2e
def eval_jacobians_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jacobians: ndarray of floats Jacobian matrix at every point of the grid
psydac/core/kernels.py
eval_jacobians_2d
mayuri-dhote/psydac
0
python
def eval_jacobians_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])
def eval_jacobians_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jacobians: ndarray of floats Jacobian matrix at every point of the grid<|endoftext|>
73183c99ef542e464b6766f2ca2ca132c10a86926fc2a529b80f3249b6ff0e22
def eval_jacobians_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weights: ndarray of floats Coefficients of the weight field jacobians: ndarray of floats Jacobian matrix on the grid
psydac/core/kernels.py
eval_jacobians_3d_weights
mayuri-dhote/psydac
0
python
def eval_jacobians_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])
def eval_jacobians_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weights: 'float[:,:,:]', jacobians: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weight field\n\n jacobians: ndarray of floats\n Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = np.array([[x_x1, x_x2, x_x3], [y_x1, y_x2, y_x3], [z_x1, z_x2, z_x3]])<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weights: ndarray of floats Coefficients of the weight field jacobians: ndarray of floats Jacobian matrix on the grid<|endoftext|>
fc7ee821b452d0d3afe01008a7e56a2a74e26b66f23d3f12e094ee1c46e0f1e8
def eval_jacobians_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jacobians: ndarray of floats Jacobian matrix at every point of the grid
psydac/core/kernels.py
eval_jacobians_2d_weights
mayuri-dhote/psydac
0
python
def eval_jacobians_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])
def eval_jacobians_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians: ndarray of floats\n Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) jacobians[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = np.array([[x_x1, x_x2], [y_x1, y_x2]])<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jacobians: ndarray of floats Jacobian matrix at every point of the grid<|endoftext|>
cbd7e4954d9d9212e091d15aafc4235bdd9a8b8b1e444b0b6864839e2ef92664
def eval_jacobians_inv_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix on the grid
psydac/core/kernels.py
eval_jacobians_inv_3d
mayuri-dhote/psydac
0
python
def eval_jacobians_inv_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)
def eval_jacobians_inv_3d(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x2[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix on the grid<|endoftext|>
3469014c653485fba3ee3e1ff66e8450f97de0a5e53f6b01a9fb4f84dc4e9907
def eval_jacobians_inv_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix at every point of the grid
psydac/core/kernels.py
eval_jacobians_inv_2d
mayuri-dhote/psydac
0
python
def eval_jacobians_inv_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)
def eval_jacobians_inv_2d(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix at every point of the grid<|endoftext|>
05bb51661e2b69c31c28ca57a6de4d171d22bb943211e69236dc8d1c81a86f6b
def eval_jacobians_inv_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weigths: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weigths: ndarray of floats\n Coefficients of the weight field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weigths[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weigths: ndarray of floats Coefficients of the weight field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix on the grid
psydac/core/kernels.py
eval_jacobians_inv_3d_weights
mayuri-dhote/psydac
0
python
def eval_jacobians_inv_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weigths: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weigths: ndarray of floats\n Coefficients of the weight field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weigths[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)
def eval_jacobians_inv_3d_weights(nc1: int, nc2: int, nc3: int, pad1: int, pad2: int, pad3: int, f_p1: int, f_p2: int, f_p3: int, k1: int, k2: int, k3: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_basis_3: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_spans_3: 'int[:]', global_arr_coeff_x: 'float[:,:,:]', global_arr_coeff_y: 'float[:,:,:]', global_arr_coeff_z: 'float[:,:,:]', global_arr_coeff_weigths: 'float[:,:,:]', jacobians_inv: 'float[:,:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n nc3: int\n Number of cells in the Z direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n pad3: int\n Padding in the Z direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n f_p3: int\n Degree in the Z direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n k3: int\n Number of evaluation points in the Z direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n global_basis_3: ndarray of floats\n Basis functions values at each cell and quadrature points in the Z direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n global_spans_3: ndarray of ints\n Spans in the Z direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n global_arr_coeff_z: ndarray of floats\n Coefficients of the Z field\n\n global_arr_coeff_weigths: ndarray of floats\n Coefficients of the weight field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix on the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeffs_z = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2), (1 + f_p3))) arr_x = np.zeros((k1, k2, k3)) arr_y = np.zeros((k1, k2, k3)) arr_z = np.zeros((k1, k2, k3)) arr_x_x1 = np.zeros((k1, k2, k3)) arr_x_x2 = np.zeros((k1, k2, k3)) arr_x_x3 = np.zeros((k1, k2, k3)) arr_y_x1 = np.zeros((k1, k2, k3)) arr_y_x2 = np.zeros((k1, k2, k3)) arr_y_x3 = np.zeros((k1, k2, k3)) arr_z_x1 = np.zeros((k1, k2, k3)) arr_z_x2 = np.zeros((k1, k2, k3)) arr_z_x3 = np.zeros((k1, k2, k3)) arr_weights = np.zeros((k1, k2, k3)) arr_weights_x1 = np.zeros((k1, k2, k3)) arr_weights_x2 = np.zeros((k1, k2, k3)) arr_weights_x3 = np.zeros((k1, k2, k3)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] for i_cell_3 in range(nc3): span_3 = global_spans_3[i_cell_3] arr_x[(:, :, :)] = 0.0 arr_y[(:, :, :)] = 0.0 arr_z[(:, :, :)] = 0.0 arr_x_x1[(:, :, :)] = 0.0 arr_x_x2[(:, :, :)] = 0.0 arr_x_x3[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x1[(:, :, :)] = 0.0 arr_y_x3[(:, :, :)] = 0.0 arr_z_x1[(:, :, :)] = 0.0 arr_z_x2[(:, :, :)] = 0.0 arr_z_x3[(:, :, :)] = 0.0 arr_weights[(:, :, :)] = 0.0 arr_weights_x1[(:, :, :)] = 0.0 arr_weights_x2[(:, :, :)] = 0.0 arr_weights_x3[(:, :, :)] = 0.0 arr_coeffs_x[(:, :, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_y[(:, :, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeffs_z[(:, :, :)] = global_arr_coeff_z[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] arr_coeff_weights[(:, :, :)] = global_arr_coeff_weigths[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2), ((pad3 + span_3) - f_p3):((1 + pad3) + span_3))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_quad_3 in range(k3): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] for i_basis_3 in range((1 + f_p3)): spline_3 = global_basis_3[(i_cell_3, i_basis_3, 0, i_quad_3)] spline_x3 = global_basis_3[(i_cell_3, i_basis_3, 1, i_quad_3)] mapping = ((spline_1 * spline_2) * spline_3) mapping_x1 = ((spline_x1 * spline_2) * spline_3) mapping_x2 = ((spline_1 * spline_x2) * spline_3) mapping_x3 = ((spline_1 * spline_2) * spline_x3) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2, i_basis_3)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2, i_basis_3)] coeff_z = arr_coeffs_z[(i_basis_1, i_basis_2, i_basis_3)] coeff_weight = arr_coeff_weights[(i_basis_1, i_basis_2, i_basis_3)] arr_x[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_y) arr_z[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_z) arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_x) arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_y) arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_y) arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_z) arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_z) arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_z) arr_weights[(i_quad_1, i_quad_2, i_quad_3)] += (mapping * coeff_weight) arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x1 * coeff_weight) arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x2 * coeff_weight) arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] += (mapping_x3 * coeff_weight) x = arr_x[(i_quad_1, i_quad_2, i_quad_3)] y = arr_y[(i_quad_1, i_quad_2, i_quad_3)] z = arr_z[(i_quad_1, i_quad_2, i_quad_3)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2, i_quad_3)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2, i_quad_3)] x_x3 = arr_x_x3[(i_quad_1, i_quad_2, i_quad_3)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2, i_quad_3)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2, i_quad_3)] y_x3 = arr_y_x3[(i_quad_1, i_quad_2, i_quad_3)] z_x1 = arr_z_x1[(i_quad_1, i_quad_2, i_quad_3)] z_x2 = arr_z_x2[(i_quad_1, i_quad_2, i_quad_3)] z_x3 = arr_z_x3[(i_quad_1, i_quad_2, i_quad_3)] weight = arr_weights[(i_quad_1, i_quad_2, i_quad_3)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2, i_quad_3)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2, i_quad_3)] weight_x3 = arr_weights_x3[(i_quad_1, i_quad_2, i_quad_3)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) x_x3 = ((x_x3 - ((weight_x3 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) y_x3 = ((y_x3 - ((weight_x3 * y) * inv_weight)) * inv_weight) z_x1 = ((z_x1 - ((weight_x1 * z) * inv_weight)) * inv_weight) z_x2 = ((z_x2 - ((weight_x2 * z) * inv_weight)) * inv_weight) z_x3 = ((z_x3 - ((weight_x3 * z) * inv_weight)) * inv_weight) det = (((((((x_x1 * y_x2) * z_x3) + ((x_x2 * y_x3) * z_x1)) + ((x_x3 * y_x1) * z_x2)) - ((x_x1 * y_x3) * z_x2)) - ((x_x2 * y_x1) * z_x3)) - ((x_x3 * y_x2) * z_x1)) a_11 = ((y_x2 * z_x3) - (y_x3 * z_x2)) a_12 = (((- y_x1) * z_x3) + (y_x3 * z_x1)) a_13 = ((y_x1 * z_x2) - (y_x2 * z_x1)) a_21 = (((- x_x2) * z_x3) + (x_x3 * z_x2)) a_22 = ((x_x1 * z_x3) - (x_x3 * z_x1)) a_23 = (((- x_x1) * z_x2) + (x_x2 * z_x1)) a_31 = ((x_x2 * y_x3) - (x_x3 * y_x2)) a_32 = (((- x_x1) * y_x3) + (x_x3 * y_x1)) a_33 = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), ((i_cell_3 * k3) + i_quad_3), :, :)] = (np.array([[a_11, a_21, a_31], [a_12, a_22, a_32], [a_13, a_23, a_33]]) / det)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction nc3: int Number of cells in the Z direction pad1: int Padding in the X direction pad2: int Padding in the Y direction pad3: int Padding in the Z direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction f_p3: int Degree in the Z direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction k3: int Number of evaluation points in the Z direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_basis_3: ndarray of floats Basis functions values at each cell and quadrature points in the Z direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_spans_3: ndarray of ints Spans in the Z direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_z: ndarray of floats Coefficients of the Z field global_arr_coeff_weigths: ndarray of floats Coefficients of the weight field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix on the grid<|endoftext|>
246b08b03c90f60c0f2ef0fa94ec2b1250e9d26e5cc46c9045b813eecc47c731
def eval_jacobians_inv_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)
Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix at every point of the grid
psydac/core/kernels.py
eval_jacobians_inv_2d_weights
mayuri-dhote/psydac
0
python
def eval_jacobians_inv_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)
def eval_jacobians_inv_2d_weights(nc1: int, nc2: int, pad1: int, pad2: int, f_p1: int, f_p2: int, k1: int, k2: int, global_basis_1: 'float[:,:,:,:]', global_basis_2: 'float[:,:,:,:]', global_spans_1: 'int[:]', global_spans_2: 'int[:]', global_arr_coeff_x: 'float[:,:]', global_arr_coeff_y: 'float[:,:]', global_arr_coeff_weights: 'float[:,:]', jacobians_inv: 'float[:,:,:,:]'): '\n Parameters\n ----------\n nc1: int\n Number of cells in the X direction\n nc2: int\n Number of cells in the Y direction\n\n pad1: int\n Padding in the X direction\n pad2: int\n Padding in the Y direction\n\n f_p1: int\n Degree in the X direction\n f_p2: int\n Degree in the Y direction\n\n k1: int\n Number of evaluation points in the X direction\n k2: int\n Number of evaluation points in the Y direction\n\n global_basis_1: ndarray of floats\n Basis functions values at each cell and quadrature points in the X direction\n global_basis_2: ndarray of floats\n Basis functions values at each cell and quadrature points in the Y direction\n\n global_spans_1: ndarray of ints\n Spans in the X direction\n global_spans_2: ndarray of ints\n Spans in the Y direction\n\n global_arr_coeff_x: ndarray of floats\n Coefficients of the X field\n global_arr_coeff_y: ndarray of floats\n Coefficients of the Y field\n\n global_arr_coeff_weights: ndarray of floats\n Coefficients of the weights field\n\n jacobians_inv: ndarray of floats\n Inverse of the Jacobian matrix at every point of the grid\n ' arr_coeffs_x = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeffs_y = np.zeros(((1 + f_p1), (1 + f_p2))) arr_coeff_weights = np.zeros(((1 + f_p1), (1 + f_p2))) arr_x = np.zeros((k1, k2)) arr_y = np.zeros((k1, k2)) arr_x_x1 = np.zeros((k1, k2)) arr_x_x2 = np.zeros((k1, k2)) arr_y_x1 = np.zeros((k1, k2)) arr_y_x2 = np.zeros((k1, k2)) arr_weights = np.zeros((k1, k2)) arr_weights_x1 = np.zeros((k1, k2)) arr_weights_x2 = np.zeros((k1, k2)) for i_cell_1 in range(nc1): span_1 = global_spans_1[i_cell_1] for i_cell_2 in range(nc2): span_2 = global_spans_2[i_cell_2] arr_x[(:, :)] = 0.0 arr_y[(:, :)] = 0.0 arr_x_x1[(:, :)] = 0.0 arr_x_x2[(:, :)] = 0.0 arr_y_x1[(:, :)] = 0.0 arr_y_x2[(:, :)] = 0.0 arr_weights[(:, :)] = 0.0 arr_weights_x1[(:, :)] = 0.0 arr_weights_x2[(:, :)] = 0.0 arr_coeffs_x[(:, :)] = global_arr_coeff_x[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeffs_y[(:, :)] = global_arr_coeff_y[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] arr_coeff_weights[(:, :)] = global_arr_coeff_weights[(((pad1 + span_1) - f_p1):((1 + pad1) + span_1), ((pad2 + span_2) - f_p2):((1 + pad2) + span_2))] for i_quad_1 in range(k1): for i_quad_2 in range(k2): for i_basis_1 in range((1 + f_p1)): spline_1 = global_basis_1[(i_cell_1, i_basis_1, 0, i_quad_1)] spline_x1 = global_basis_1[(i_cell_1, i_basis_1, 1, i_quad_1)] for i_basis_2 in range((1 + f_p2)): spline_2 = global_basis_2[(i_cell_2, i_basis_2, 0, i_quad_2)] spline_x2 = global_basis_2[(i_cell_2, i_basis_2, 1, i_quad_2)] mapping = (spline_1 * spline_2) mapping_x1 = (spline_x1 * spline_2) mapping_x2 = (spline_1 * spline_x2) coeff_x = arr_coeffs_x[(i_basis_1, i_basis_2)] coeff_y = arr_coeffs_y[(i_basis_1, i_basis_2)] coeff_weights = arr_coeff_weights[(i_basis_1, i_basis_2)] arr_x[(i_quad_1, i_quad_2)] += (mapping * coeff_x) arr_y[(i_quad_1, i_quad_2)] += (mapping * coeff_y) arr_x_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_x) arr_x_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_x) arr_y_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_y) arr_y_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_y) arr_weights[(i_quad_1, i_quad_2)] += (mapping * coeff_weights) arr_weights_x1[(i_quad_1, i_quad_2)] += (mapping_x1 * coeff_weights) arr_weights_x2[(i_quad_1, i_quad_2)] += (mapping_x2 * coeff_weights) x = arr_x[(i_quad_1, i_quad_2)] y = arr_y[(i_quad_1, i_quad_2)] x_x1 = arr_x_x1[(i_quad_1, i_quad_2)] x_x2 = arr_x_x2[(i_quad_1, i_quad_2)] y_x1 = arr_y_x1[(i_quad_1, i_quad_2)] y_x2 = arr_y_x2[(i_quad_1, i_quad_2)] weight = arr_weights[(i_quad_1, i_quad_2)] weight_x1 = arr_weights_x1[(i_quad_1, i_quad_2)] weight_x2 = arr_weights_x2[(i_quad_1, i_quad_2)] inv_weight = (1.0 / weight) x_x1 = ((x_x1 - ((weight_x1 * x) * inv_weight)) * inv_weight) x_x2 = ((x_x2 - ((weight_x2 * x) * inv_weight)) * inv_weight) y_x1 = ((y_x1 - ((weight_x1 * y) * inv_weight)) * inv_weight) y_x2 = ((y_x2 - ((weight_x2 * y) * inv_weight)) * inv_weight) det = ((x_x1 * y_x2) - (x_x2 * y_x1)) jacobians_inv[(((i_cell_1 * k1) + i_quad_1), ((i_cell_2 * k2) + i_quad_2), :, :)] = (np.array([[y_x2, (- x_x2)], [(- y_x1), x_x1]]) / det)<|docstring|>Parameters ---------- nc1: int Number of cells in the X direction nc2: int Number of cells in the Y direction pad1: int Padding in the X direction pad2: int Padding in the Y direction f_p1: int Degree in the X direction f_p2: int Degree in the Y direction k1: int Number of evaluation points in the X direction k2: int Number of evaluation points in the Y direction global_basis_1: ndarray of floats Basis functions values at each cell and quadrature points in the X direction global_basis_2: ndarray of floats Basis functions values at each cell and quadrature points in the Y direction global_spans_1: ndarray of ints Spans in the X direction global_spans_2: ndarray of ints Spans in the Y direction global_arr_coeff_x: ndarray of floats Coefficients of the X field global_arr_coeff_y: ndarray of floats Coefficients of the Y field global_arr_coeff_weights: ndarray of floats Coefficients of the weights field jacobians_inv: ndarray of floats Inverse of the Jacobian matrix at every point of the grid<|endoftext|>
5478164fcab40a880ecee12ab22ffef6f5d7cd94d6a112c88a8a18aa4dd609bf
def pushforward_2d_l2(fields_to_push: 'float[:,:,:]', jac_dets: 'float[:,:]', pushed_fields: 'float[:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[2]): pushed_fields[(:, :, i_f)] = (fields_to_push[(:, :, i_f)] / jac_dets[(:, :)])
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (n_x1, n_x2, n_f) where: * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the L2 space. jac_dets: ndarray Values of the Jacobian determinant of the Mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_2d_l2
mayuri-dhote/psydac
0
python
def pushforward_2d_l2(fields_to_push: 'float[:,:,:]', jac_dets: 'float[:,:]', pushed_fields: 'float[:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[2]): pushed_fields[(:, :, i_f)] = (fields_to_push[(:, :, i_f)] / jac_dets[(:, :)])
def pushforward_2d_l2(fields_to_push: 'float[:,:,:]', jac_dets: 'float[:,:]', pushed_fields: 'float[:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[2]): pushed_fields[(:, :, i_f)] = (fields_to_push[(:, :, i_f)] / jac_dets[(:, :)])<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (n_x1, n_x2, n_f) where: * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the L2 space. jac_dets: ndarray Values of the Jacobian determinant of the Mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
0ee9aac1fb956f0957be9cd043136c014e948370a1957776e9aaf08d81fadd0a
def pushforward_3d_l2(fields_to_push: 'float[:,:,:,:]', jac_dets: 'float[:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_x3, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, :, i_f)] = (fields_to_push[(:, :, :, i_f)] / jac_dets[(:, :, :)])
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (n_x1, n_x2, n_x3, n_f) where: * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid. * n_f is the number of fields to push-forward in the L2 space. jac_dets: ndarray Values of the Jacobian determinant of the Mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_3d_l2
mayuri-dhote/psydac
0
python
def pushforward_3d_l2(fields_to_push: 'float[:,:,:,:]', jac_dets: 'float[:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_x3, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, :, i_f)] = (fields_to_push[(:, :, :, i_f)] / jac_dets[(:, :, :)])
def pushforward_3d_l2(fields_to_push: 'float[:,:,:,:]', jac_dets: 'float[:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (n_x1, n_x2, n_x3, n_f) where:\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid.\n * n_f is the number of fields to push-forward in the L2 space.\n\n jac_dets: ndarray\n Values of the Jacobian determinant of the Mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, :, i_f)] = (fields_to_push[(:, :, :, i_f)] / jac_dets[(:, :, :)])<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (n_x1, n_x2, n_x3, n_f) where: * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid. * n_f is the number of fields to push-forward in the L2 space. jac_dets: ndarray Values of the Jacobian determinant of the Mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
e036acc054f5234109be8a7e4b8f4a70fc6a7d402fb16f0b05ffb5f003f92d44
def pushforward_2d_hcurl(fields_to_push: 'float[:,:,:,:]', inv_jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ inv_jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 0)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ inv_jac_mats[(:, :, 0, 1)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (2, n_x1, n_x2, n_f) where: * 2 is the logical dimension of the problem (2 here) * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the Hcurl space. inv_jac_mats: ndarray Inverses of the Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_2d_hcurl
mayuri-dhote/psydac
0
python
def pushforward_2d_hcurl(fields_to_push: 'float[:,:,:,:]', inv_jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ inv_jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 0)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ inv_jac_mats[(:, :, 0, 1)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))
def pushforward_2d_hcurl(fields_to_push: 'float[:,:,:,:]', inv_jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ inv_jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 0)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ inv_jac_mats[(:, :, 0, 1)]) * fields_to_push[(0, :, :, i_f)]) + (inv_jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (2, n_x1, n_x2, n_f) where: * 2 is the logical dimension of the problem (2 here) * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the Hcurl space. inv_jac_mats: ndarray Inverses of the Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
a8bfc125fc1271ebf9bfa3c0ea58cb52e176877b58eceb256a745b12f8b6c718
def pushforward_3d_hcurl(fields_to_push: 'float[:,:,:,:,:]', inv_jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 0)]) * x) + (inv_jac_mats[(:, :, :, 1, 0)] * y)) + (inv_jac_mats[(:, :, :, 2, 0)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 1)]) * x) + (inv_jac_mats[(:, :, :, 1, 1)] * y)) + (inv_jac_mats[(:, :, :, 2, 1)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 2)]) * x) + (inv_jac_mats[(:, :, :, 1, 2)] * y)) + (inv_jac_mats[(:, :, :, 2, 2)] * z))
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (3, n_x1, n_x2, n_x3, n_f) where: * 3 is the logical dimension of the problem * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid * n_f is the number of fields to push-forward in the Hcurl space. inv_jac_mats: ndarray Inverses of the Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_3d_hcurl
mayuri-dhote/psydac
0
python
def pushforward_3d_hcurl(fields_to_push: 'float[:,:,:,:,:]', inv_jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 0)]) * x) + (inv_jac_mats[(:, :, :, 1, 0)] * y)) + (inv_jac_mats[(:, :, :, 2, 0)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 1)]) * x) + (inv_jac_mats[(:, :, :, 1, 1)] * y)) + (inv_jac_mats[(:, :, :, 2, 1)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 2)]) * x) + (inv_jac_mats[(:, :, :, 1, 2)] * y)) + (inv_jac_mats[(:, :, :, 2, 2)] * z))
def pushforward_3d_hcurl(fields_to_push: 'float[:,:,:,:,:]', inv_jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hcurl space.\n\n inv_jac_mats: ndarray\n Inverses of the Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 0)]) * x) + (inv_jac_mats[(:, :, :, 1, 0)] * y)) + (inv_jac_mats[(:, :, :, 2, 0)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 1)]) * x) + (inv_jac_mats[(:, :, :, 1, 1)] * y)) + (inv_jac_mats[(:, :, :, 2, 1)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ inv_jac_mats[(:, :, :, 0, 2)]) * x) + (inv_jac_mats[(:, :, :, 1, 2)] * y)) + (inv_jac_mats[(:, :, :, 2, 2)] * z))<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (3, n_x1, n_x2, n_x3, n_f) where: * 3 is the logical dimension of the problem * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid * n_f is the number of fields to push-forward in the Hcurl space. inv_jac_mats: ndarray Inverses of the Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
303a12cbd3d9176551d57d926d553b05e2bab2860dd50c56090798c1c0fceeb1
def pushforward_2d_hdiv(fields_to_push: 'float[:,:,:,:]', jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 0, 1)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ jac_mats[(:, :, 1, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (2, n_x1, n_x2, n_f) where: * 2 is the logical dimension of the problem (2 here) * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the Hdiv space. jac_mats: ndarray Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_2d_hdiv
mayuri-dhote/psydac
0
python
def pushforward_2d_hdiv(fields_to_push: 'float[:,:,:,:]', jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 0, 1)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ jac_mats[(:, :, 1, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))
def pushforward_2d_hdiv(fields_to_push: 'float[:,:,:,:]', jac_mats: 'float[:,:,:,:]', pushed_fields: 'float[:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (2, n_x1, n_x2, n_f) where:\n * 2 is the logical dimension of the problem (2 here)\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[3]): pushed_fields[(:, :, 0, i_f)] = (((+ jac_mats[(:, :, 0, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 0, 1)] * fields_to_push[(1, :, :, i_f)])) pushed_fields[(:, :, 1, i_f)] = (((+ jac_mats[(:, :, 1, 0)]) * fields_to_push[(0, :, :, i_f)]) + (jac_mats[(:, :, 1, 1)] * fields_to_push[(1, :, :, i_f)]))<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (2, n_x1, n_x2, n_f) where: * 2 is the logical dimension of the problem (2 here) * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_f is the number of fields to push-forward in the Hdiv space. jac_mats: ndarray Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
e6ef1442fa8b22830e674ca23c8a4ebb9d96c88c50771f459b16ca2aa4f76f34
def pushforward_3d_hdiv(fields_to_push: 'float[:,:,:,:,:]', jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ jac_mats[(:, :, :, 0, 0)]) * x) + (jac_mats[(:, :, :, 0, 1)] * y)) + (jac_mats[(:, :, :, 0, 2)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ jac_mats[(:, :, :, 1, 0)]) * x) + (jac_mats[(:, :, :, 1, 1)] * y)) + (jac_mats[(:, :, :, 1, 2)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ jac_mats[(:, :, :, 2, 0)]) * x) + (jac_mats[(:, :, :, 2, 1)] * y)) + (jac_mats[(:, :, :, 2, 2)] * z))
Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (3, n_x1, n_x2, n_x3, n_f) where: * 3 is the logical dimension of the problem * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid * n_f is the number of fields to push-forward in the Hdiv space. jac_mats: ndarray Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields
psydac/core/kernels.py
pushforward_3d_hdiv
mayuri-dhote/psydac
0
python
def pushforward_3d_hdiv(fields_to_push: 'float[:,:,:,:,:]', jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ jac_mats[(:, :, :, 0, 0)]) * x) + (jac_mats[(:, :, :, 0, 1)] * y)) + (jac_mats[(:, :, :, 0, 2)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ jac_mats[(:, :, :, 1, 0)]) * x) + (jac_mats[(:, :, :, 1, 1)] * y)) + (jac_mats[(:, :, :, 1, 2)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ jac_mats[(:, :, :, 2, 0)]) * x) + (jac_mats[(:, :, :, 2, 1)] * y)) + (jac_mats[(:, :, :, 2, 2)] * z))
def pushforward_3d_hdiv(fields_to_push: 'float[:,:,:,:,:]', jac_mats: 'float[:,:,:,:,:]', pushed_fields: 'float[:,:,:,:,:]'): '\n\n Parameters\n ----------\n fields_to_push: ndarray\n Field values to push forward on the mapping\n This array as shape (3, n_x1, n_x2, n_x3, n_f) where:\n * 3 is the logical dimension of the problem\n * n_x1 is the number of points in direction 1 of the implicit grid.\n * n_x2 is the number of points in direction 2 of the implicit grid.\n * n_x3 is the number of points in direction 3 of the implicit grid\n * n_f is the number of fields to push-forward in the Hdiv space.\n\n jac_mats: ndarray\n Jacobian matrix of the mapping\n\n pushed_fields: ndarray\n Push forwarded fields\n ' for i_f in range(pushed_fields.shape[4]): x = fields_to_push[(0, :, :, :, i_f)] y = fields_to_push[(1, :, :, :, i_f)] z = fields_to_push[(2, :, :, :, i_f)] pushed_fields[(:, :, :, 0, i_f)] = ((((+ jac_mats[(:, :, :, 0, 0)]) * x) + (jac_mats[(:, :, :, 0, 1)] * y)) + (jac_mats[(:, :, :, 0, 2)] * z)) pushed_fields[(:, :, :, 1, i_f)] = ((((+ jac_mats[(:, :, :, 1, 0)]) * x) + (jac_mats[(:, :, :, 1, 1)] * y)) + (jac_mats[(:, :, :, 1, 2)] * z)) pushed_fields[(:, :, :, 2, i_f)] = ((((+ jac_mats[(:, :, :, 2, 0)]) * x) + (jac_mats[(:, :, :, 2, 1)] * y)) + (jac_mats[(:, :, :, 2, 2)] * z))<|docstring|>Parameters ---------- fields_to_push: ndarray Field values to push forward on the mapping This array as shape (3, n_x1, n_x2, n_x3, n_f) where: * 3 is the logical dimension of the problem * n_x1 is the number of points in direction 1 of the implicit grid. * n_x2 is the number of points in direction 2 of the implicit grid. * n_x3 is the number of points in direction 3 of the implicit grid * n_f is the number of fields to push-forward in the Hdiv space. jac_mats: ndarray Jacobian matrix of the mapping pushed_fields: ndarray Push forwarded fields<|endoftext|>
aefc9a4718a37d026dc989f2dc1776bb949ec473bdadf4e5d6aa501f1acbd9b3
def test_sqpdfo_truncated_cg(self): '\n Test comparing matlab results with python results\n ' (u, info_t) = sqpdfo_truncated_cg_(self.A, self.b, self.delta, self.max_iter, self.tol) self.assertEqual(u, 0.816496580927725) self.assertEqual(info_t.flag, 0) self.assertEqual(info_t.iter, 2) self.assertEqual(info_t.prec, 0) self.assertEqual(info_t.curv, 1)
Test comparing matlab results with python results
tests/sqpdfo_truncated_cg_test.py
test_sqpdfo_truncated_cg
DLR-SC/sqpdfo
10
python
def test_sqpdfo_truncated_cg(self): '\n \n ' (u, info_t) = sqpdfo_truncated_cg_(self.A, self.b, self.delta, self.max_iter, self.tol) self.assertEqual(u, 0.816496580927725) self.assertEqual(info_t.flag, 0) self.assertEqual(info_t.iter, 2) self.assertEqual(info_t.prec, 0) self.assertEqual(info_t.curv, 1)
def test_sqpdfo_truncated_cg(self): '\n \n ' (u, info_t) = sqpdfo_truncated_cg_(self.A, self.b, self.delta, self.max_iter, self.tol) self.assertEqual(u, 0.816496580927725) self.assertEqual(info_t.flag, 0) self.assertEqual(info_t.iter, 2) self.assertEqual(info_t.prec, 0) self.assertEqual(info_t.curv, 1)<|docstring|>Test comparing matlab results with python results<|endoftext|>
4791d8d39aa70310f1b8d298d60014f5df48dc0a41722f87775b8c61a0c1354c
def vgg16(pretrained=False, progress=True, **kwargs): 'VGG 16-layer model (configuration "D")\n `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>\'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
CDS_pretraining/models/vgg.py
vgg16
VisionLearningGroup/CDS
7
python
def vgg16(pretrained=False, progress=True, **kwargs): 'VGG 16-layer model (configuration "D")\n `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>\'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs): 'VGG 16-layer model (configuration "D")\n `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>\'_\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)<|docstring|>VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>'_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr<|endoftext|>
4671c165072e6e9da11c8ef778b5ef1c7eb659cfcf98a40db2db757ab8bdf351
def load_templates(self): '\n Loads templates from configuration templates file.\n ' with open(SETTINGS['TEMPLATESFILE'], 'r') as stream: try: self.templates = yaml.load(stream) except yaml.YAMLError as exc: print(exc)
Loads templates from configuration templates file.
virtapi/model/template.py
load_templates
spiperac/virtapi
11
python
def load_templates(self): '\n \n ' with open(SETTINGS['TEMPLATESFILE'], 'r') as stream: try: self.templates = yaml.load(stream) except yaml.YAMLError as exc: print(exc)
def load_templates(self): '\n \n ' with open(SETTINGS['TEMPLATESFILE'], 'r') as stream: try: self.templates = yaml.load(stream) except yaml.YAMLError as exc: print(exc)<|docstring|>Loads templates from configuration templates file.<|endoftext|>
7546d8919afdfbc73cd6a5de39d1ce0950728e3613245fa7494d21b21063d0b6
def save_templates(self): '\n Save templates from templates object.\n ' with open(SETTINGS['TEMPLATESFILE'], 'w') as stream: try: yaml.dump(self.templates, stream) except yaml.YAMLError as exc: print(exc)
Save templates from templates object.
virtapi/model/template.py
save_templates
spiperac/virtapi
11
python
def save_templates(self): '\n \n ' with open(SETTINGS['TEMPLATESFILE'], 'w') as stream: try: yaml.dump(self.templates, stream) except yaml.YAMLError as exc: print(exc)
def save_templates(self): '\n \n ' with open(SETTINGS['TEMPLATESFILE'], 'w') as stream: try: yaml.dump(self.templates, stream) except yaml.YAMLError as exc: print(exc)<|docstring|>Save templates from templates object.<|endoftext|>
7fc7ef37fed9c4307f3e0fe22235a41caab7a71aaadb203430fb39d5d1dc358a
def load_operating_systems(self): '\n Loading operating systems from templates.\n ' if (self.templates == None): pass else: for template in self.templates: self.operating_systems.append(str(template['os']))
Loading operating systems from templates.
virtapi/model/template.py
load_operating_systems
spiperac/virtapi
11
python
def load_operating_systems(self): '\n \n ' if (self.templates == None): pass else: for template in self.templates: self.operating_systems.append(str(template['os']))
def load_operating_systems(self): '\n \n ' if (self.templates == None): pass else: for template in self.templates: self.operating_systems.append(str(template['os']))<|docstring|>Loading operating systems from templates.<|endoftext|>
7b6bb83a53d5ff641471cb0c9c9dc13fc1eb079919822a039025a41672a2e91c
def get_os_list(self): '\n Returns just loaded list of operating systems.\n ' return self.operating_systems
Returns just loaded list of operating systems.
virtapi/model/template.py
get_os_list
spiperac/virtapi
11
python
def get_os_list(self): '\n \n ' return self.operating_systems
def get_os_list(self): '\n \n ' return self.operating_systems<|docstring|>Returns just loaded list of operating systems.<|endoftext|>
b2f4a9fa6e0dc6cbaa27b33093ebf2244d554ebde83253c24137c3edec28a592
def get_os_versions(self, os): '\n Returns a list of versions for operating system.\n ' versions = [] for template in self.templates: if (template['os'] == os): versions.append(template['version']) return versions
Returns a list of versions for operating system.
virtapi/model/template.py
get_os_versions
spiperac/virtapi
11
python
def get_os_versions(self, os): '\n \n ' versions = [] for template in self.templates: if (template['os'] == os): versions.append(template['version']) return versions
def get_os_versions(self, os): '\n \n ' versions = [] for template in self.templates: if (template['os'] == os): versions.append(template['version']) return versions<|docstring|>Returns a list of versions for operating system.<|endoftext|>
d1c70d04b13ae12e3e993d11827b74e770d2b3ba7631334ef1296a99563bf433
def get_iso_link(self, os, version): '\n Returns iso path for selected os and version.\n ' iso_link = None for template in self.templates: if ((template['os'] == os) and (str(template['version']) == str(version))): iso_link = template['iso'] return iso_link
Returns iso path for selected os and version.
virtapi/model/template.py
get_iso_link
spiperac/virtapi
11
python
def get_iso_link(self, os, version): '\n \n ' iso_link = None for template in self.templates: if ((template['os'] == os) and (str(template['version']) == str(version))): iso_link = template['iso'] return iso_link
def get_iso_link(self, os, version): '\n \n ' iso_link = None for template in self.templates: if ((template['os'] == os) and (str(template['version']) == str(version))): iso_link = template['iso'] return iso_link<|docstring|>Returns iso path for selected os and version.<|endoftext|>
abe5ecd75bb1563eaca36b7f332071f186942573604920ca5efea91d535b615c
def fetch_template(self, os, version): '\n Checks if template exist and if not, download and save it in the standard templates path.\n ' link = self.get_iso_link(os, version) save_path = SETTINGS['TEMPLATESPATH'] if self.check_exists(link): print('Template already exists.') return os.path.basename(link) try: filename = download(link, save_path) return filename except Exception as e: print('Download of the template failed.') raise e
Checks if template exist and if not, download and save it in the standard templates path.
virtapi/model/template.py
fetch_template
spiperac/virtapi
11
python
def fetch_template(self, os, version): '\n \n ' link = self.get_iso_link(os, version) save_path = SETTINGS['TEMPLATESPATH'] if self.check_exists(link): print('Template already exists.') return os.path.basename(link) try: filename = download(link, save_path) return filename except Exception as e: print('Download of the template failed.') raise e
def fetch_template(self, os, version): '\n \n ' link = self.get_iso_link(os, version) save_path = SETTINGS['TEMPLATESPATH'] if self.check_exists(link): print('Template already exists.') return os.path.basename(link) try: filename = download(link, save_path) return filename except Exception as e: print('Download of the template failed.') raise e<|docstring|>Checks if template exist and if not, download and save it in the standard templates path.<|endoftext|>
bd2a77b643eac32f49fe6f49cddf93cdd3020974eeebc196b21d11fce7752604
def check_exists(self, template_iso): '\n Check if template exists.\n ' template_file = '{}/{}'.format(SETTINGS['TEMPLATESPATH'], os.path.basename(template_iso)) exists = os.path.exists(template_file) return exists
Check if template exists.
virtapi/model/template.py
check_exists
spiperac/virtapi
11
python
def check_exists(self, template_iso): '\n \n ' template_file = '{}/{}'.format(SETTINGS['TEMPLATESPATH'], os.path.basename(template_iso)) exists = os.path.exists(template_file) return exists
def check_exists(self, template_iso): '\n \n ' template_file = '{}/{}'.format(SETTINGS['TEMPLATESPATH'], os.path.basename(template_iso)) exists = os.path.exists(template_file) return exists<|docstring|>Check if template exists.<|endoftext|>
da4015206e4c77c296b18f3cad705e565f26e8b6137117faf99aa35a47334e40
def uninstall(project_list): "Uninstall a list of projects.\n\n Returns a dictionary with the following keys and values:\n\n * `'uninstalled'` - a list of strings containing the names of the projects\n that were successfully uninstalled.\n\n * `'failed'` - a list of strings containing the names of the projects that\n failed to uninstall.\n\n :param project_list: the names of the projects to uninstall.\n :type project_list: iterable of strings\n :rtype: dictionary\n\n " result = {'uninstalled': [], 'failed': []} for project in project_list: try: success = packaging.install.remove(project) except Exception as e: logger.exception(e) raise if success: result['uninstalled'].append(project) else: result['failed'].append(project) return result
Uninstall a list of projects. Returns a dictionary with the following keys and values: * `'uninstalled'` - a list of strings containing the names of the projects that were successfully uninstalled. * `'failed'` - a list of strings containing the names of the projects that failed to uninstall. :param project_list: the names of the projects to uninstall. :type project_list: iterable of strings :rtype: dictionary
pip2/commands/uninstall.py
uninstall
osupython/pip2
4
python
def uninstall(project_list): "Uninstall a list of projects.\n\n Returns a dictionary with the following keys and values:\n\n * `'uninstalled'` - a list of strings containing the names of the projects\n that were successfully uninstalled.\n\n * `'failed'` - a list of strings containing the names of the projects that\n failed to uninstall.\n\n :param project_list: the names of the projects to uninstall.\n :type project_list: iterable of strings\n :rtype: dictionary\n\n " result = {'uninstalled': [], 'failed': []} for project in project_list: try: success = packaging.install.remove(project) except Exception as e: logger.exception(e) raise if success: result['uninstalled'].append(project) else: result['failed'].append(project) return result
def uninstall(project_list): "Uninstall a list of projects.\n\n Returns a dictionary with the following keys and values:\n\n * `'uninstalled'` - a list of strings containing the names of the projects\n that were successfully uninstalled.\n\n * `'failed'` - a list of strings containing the names of the projects that\n failed to uninstall.\n\n :param project_list: the names of the projects to uninstall.\n :type project_list: iterable of strings\n :rtype: dictionary\n\n " result = {'uninstalled': [], 'failed': []} for project in project_list: try: success = packaging.install.remove(project) except Exception as e: logger.exception(e) raise if success: result['uninstalled'].append(project) else: result['failed'].append(project) return result<|docstring|>Uninstall a list of projects. Returns a dictionary with the following keys and values: * `'uninstalled'` - a list of strings containing the names of the projects that were successfully uninstalled. * `'failed'` - a list of strings containing the names of the projects that failed to uninstall. :param project_list: the names of the projects to uninstall. :type project_list: iterable of strings :rtype: dictionary<|endoftext|>
850534c40c30e2162d03710742cc7aa43c7263cd86fac3a93bfd4408b8507364
def get_predicate_subject_complement_phrases(doc, sent): '\n extract predicates with:\n -subject phrase\n -complement phrase\n\n :param spacy.tokens.Sent sent: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, complement)\n ' output = [] predicates = {} for token in sent: if (token.dep_ == 'nsubj'): predicates[token.head.i] = token.i for (pred_token, pred_info) in predicates.items(): subject_id = pred_info subject_tokens = get_dependent_tokens(subject_id, pred_token, sent) subject_tokens.extend([subject_id]) subject_tokens.sort() subject_phrase = '' for token in subject_tokens: subject_phrase += (' ' + doc[token].text) complement_tokens = get_dependent_tokens(pred_token, subject_id, sent) complement_tokens.sort() if complement_tokens: complement_phrase = '' for token in complement_tokens: complement_phrase += (' ' + doc[token].text) one_row = (doc[pred_token].lemma_, subject_phrase, complement_phrase) output.append(one_row) return output
extract predicates with: -subject phrase -complement phrase :param spacy.tokens.Sent sent: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, complement)
src/cltl/triple_extraction/spacy_triples/dep_to_triple.py
get_predicate_subject_complement_phrases
leolani/cltl-knowledgeextraction
0
python
def get_predicate_subject_complement_phrases(doc, sent): '\n extract predicates with:\n -subject phrase\n -complement phrase\n\n :param spacy.tokens.Sent sent: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, complement)\n ' output = [] predicates = {} for token in sent: if (token.dep_ == 'nsubj'): predicates[token.head.i] = token.i for (pred_token, pred_info) in predicates.items(): subject_id = pred_info subject_tokens = get_dependent_tokens(subject_id, pred_token, sent) subject_tokens.extend([subject_id]) subject_tokens.sort() subject_phrase = for token in subject_tokens: subject_phrase += (' ' + doc[token].text) complement_tokens = get_dependent_tokens(pred_token, subject_id, sent) complement_tokens.sort() if complement_tokens: complement_phrase = for token in complement_tokens: complement_phrase += (' ' + doc[token].text) one_row = (doc[pred_token].lemma_, subject_phrase, complement_phrase) output.append(one_row) return output
def get_predicate_subject_complement_phrases(doc, sent): '\n extract predicates with:\n -subject phrase\n -complement phrase\n\n :param spacy.tokens.Sent sent: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, complement)\n ' output = [] predicates = {} for token in sent: if (token.dep_ == 'nsubj'): predicates[token.head.i] = token.i for (pred_token, pred_info) in predicates.items(): subject_id = pred_info subject_tokens = get_dependent_tokens(subject_id, pred_token, sent) subject_tokens.extend([subject_id]) subject_tokens.sort() subject_phrase = for token in subject_tokens: subject_phrase += (' ' + doc[token].text) complement_tokens = get_dependent_tokens(pred_token, subject_id, sent) complement_tokens.sort() if complement_tokens: complement_phrase = for token in complement_tokens: complement_phrase += (' ' + doc[token].text) one_row = (doc[pred_token].lemma_, subject_phrase, complement_phrase) output.append(one_row) return output<|docstring|>extract predicates with: -subject phrase -complement phrase :param spacy.tokens.Sent sent: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, complement)<|endoftext|>
63a0bccf23d109b81348716082facc6aea56979a062ef9c63a11b1a0bd440723
def get_subj_obj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_obj_triples_with_spacy') rels = {'nsubj', 'dobj', 'xcomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (head_id not in predicates): predicates[head_id] = dict() predicates[head_id]['head'] = None predicates[head_id]['tail'] = None if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'dobj') or (token.dep == 'xcomp')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)
src/cltl/triple_extraction/spacy_triples/dep_to_triple.py
get_subj_obj_triples_with_spacy
leolani/cltl-knowledgeextraction
0
python
def get_subj_obj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_obj_triples_with_spacy') rels = {'nsubj', 'dobj', 'xcomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (head_id not in predicates): predicates[head_id] = dict() predicates[head_id]['head'] = None predicates[head_id]['tail'] = None if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'dobj') or (token.dep == 'xcomp')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
def get_subj_obj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_obj_triples_with_spacy') rels = {'nsubj', 'dobj', 'xcomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (head_id not in predicates): predicates[head_id] = dict() predicates[head_id]['head'] = None predicates[head_id]['tail'] = None if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'dobj') or (token.dep == 'xcomp')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))<|docstring|>extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)<|endoftext|>
d5ab5e0908fd639e259adcf9351bf82480039c2bd19212f932ee7611d08dbec3
def get_subj_amod_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_amod_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'acomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'nsubjpass')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif (token.pos_ == 'PROPN'): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'acomp') or (token.dep == 'auxpass')): predicates[head_id]['tail'] = token.lemma_ for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - aux - amod', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)
src/cltl/triple_extraction/spacy_triples/dep_to_triple.py
get_subj_amod_triples_with_spacy
leolani/cltl-knowledgeextraction
0
python
def get_subj_amod_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_amod_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'acomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'nsubjpass')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif (token.pos_ == 'PROPN'): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'acomp') or (token.dep == 'auxpass')): predicates[head_id]['tail'] = token.lemma_ for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - aux - amod', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
def get_subj_amod_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_amod_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'acomp'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'nsubjpass')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif (token.pos_ == 'PROPN'): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'acomp') or (token.dep == 'auxpass')): predicates[head_id]['tail'] = token.lemma_ for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - aux - amod', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))<|docstring|>extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)<|endoftext|>
3f46ac1daacfbdf0c8a0b1b238849695b5580746022b1f806b0548e0df556865
def get_subj_attr_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_attr_triples_with_spacy') rels = {'nsubj', 'intj', 'apposattr'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'intj')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'attr') or (token.dep_ == 'appos')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ print(predicate, pred_info) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - attr', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)
src/cltl/triple_extraction/spacy_triples/dep_to_triple.py
get_subj_attr_triples_with_spacy
leolani/cltl-knowledgeextraction
0
python
def get_subj_attr_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_attr_triples_with_spacy') rels = {'nsubj', 'intj', 'apposattr'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'intj')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'attr') or (token.dep_ == 'appos')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ print(predicate, pred_info) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - attr', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
def get_subj_attr_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_attr_triples_with_spacy') rels = {'nsubj', 'intj', 'apposattr'} doc = nlp(utterance) triples = [] predicates = {} subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if ((token.dep_ == 'nsubj') or (token.dep_ == 'intj')): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) if ((token.dep_ == 'attr') or (token.dep_ == 'appos')): if (token.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN') or (token.pos_ == 'ADJ')): predicates[head_id]['tail'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) for (pred_token, pred_info) in predicates.items(): predicate = doc[pred_token].lemma_ print(predicate, pred_info) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - attr', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))<|docstring|>extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)<|endoftext|>
66a043b964881eee94c84606e4dc1e4e9bfcb79bc8bd727de9d23d57c9871674
def get_subj_prep_pobj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_prep_pobj_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'prep', 'pobj'} doc = nlp(utterance) triples = [] predicates = {} acomp = [] subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) elif (token.dep_ == 'prep'): predicates[head_id]['prep'] = token.lemma_ for token_dep in doc: if ((token_dep.dep_ == 'pobj') and (token_dep.head.i == token.i)): if (token_dep.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token_dep) speaker_mentions.append(token_dep.text) elif (token_dep.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token_dep) hearer_mentions.append(token_dep.text) elif ((token_dep.pos_ == 'PROPN') or (token_dep.pos_ == 'NOUN') or (token_dep.pos_ == 'ADJ')): predicates[head_id]['tail'] = token_dep.lemma_ subject_tokens.append(token_dep) subject_mentions.append(token_dep.text) for (pred_token, pred_info) in predicates.items(): predicate = ((doc[pred_token].lemma_ + '-') + pred_info.get('prep', str(None))) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - prep-obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)
src/cltl/triple_extraction/spacy_triples/dep_to_triple.py
get_subj_prep_pobj_triples_with_spacy
leolani/cltl-knowledgeextraction
0
python
def get_subj_prep_pobj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_prep_pobj_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'prep', 'pobj'} doc = nlp(utterance) triples = [] predicates = {} acomp = [] subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) elif (token.dep_ == 'prep'): predicates[head_id]['prep'] = token.lemma_ for token_dep in doc: if ((token_dep.dep_ == 'pobj') and (token_dep.head.i == token.i)): if (token_dep.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token_dep) speaker_mentions.append(token_dep.text) elif (token_dep.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token_dep) hearer_mentions.append(token_dep.text) elif ((token_dep.pos_ == 'PROPN') or (token_dep.pos_ == 'NOUN') or (token_dep.pos_ == 'ADJ')): predicates[head_id]['tail'] = token_dep.lemma_ subject_tokens.append(token_dep) subject_mentions.append(token_dep.text) for (pred_token, pred_info) in predicates.items(): predicate = ((doc[pred_token].lemma_ + '-') + pred_info.get('prep', str(None))) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - prep-obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))
def get_subj_prep_pobj_triples_with_spacy(nlp, utterance: str, SPEAKER: str, HEARER: str): '\n extract predicates with:\n -subject\n -object\n\n :param spacy.tokens.doc.Doc doc: spaCy object after processing text\n\n :rtype: list\n :return: list of tuples (predicate, subject, object)\n ' print('get_subj_prep_pobj_triples_with_spacy') rels = {'nsubj', 'nsubjpass', 'prep', 'pobj'} doc = nlp(utterance) triples = [] predicates = {} acomp = [] subject_tokens = [] subject_mentions = [] object_tokens = [] object_mentions = [] speaker_mentions = [] hearer_mentions = [] speaker_tokens = [] hearer_tokens = [] for token in doc: if (token.dep_ == 'ROOT'): predicates[token.i] = dict() predicates[token.i]['head'] = None predicates[token.i]['tail'] = None for token in doc: if predicates.get(token.head.i): head_id = token.head.i if (token.dep_ == 'nsubj'): if (token.text.lower() == 'i'): predicates[head_id]['head'] = SPEAKER speaker_tokens.append(token) speaker_mentions.append(token.text) elif (token.text.lower() == 'you'): predicates[head_id]['head'] = HEARER hearer_tokens.append(token) hearer_mentions.append(token.text) elif ((token.pos_ == 'PROPN') or (token.pos_ == 'NOUN')): predicates[head_id]['head'] = token.lemma_ subject_tokens.append(token) subject_mentions.append(token.text) elif (token.dep_ == 'prep'): predicates[head_id]['prep'] = token.lemma_ for token_dep in doc: if ((token_dep.dep_ == 'pobj') and (token_dep.head.i == token.i)): if (token_dep.text.lower() == 'i'): predicates[head_id]['tail'] = SPEAKER speaker_tokens.append(token_dep) speaker_mentions.append(token_dep.text) elif (token_dep.text.lower() == 'you'): predicates[head_id]['tail'] = HEARER hearer_tokens.append(token_dep) hearer_mentions.append(token_dep.text) elif ((token_dep.pos_ == 'PROPN') or (token_dep.pos_ == 'NOUN') or (token_dep.pos_ == 'ADJ')): predicates[head_id]['tail'] = token_dep.lemma_ subject_tokens.append(token_dep) subject_mentions.append(token_dep.text) for (pred_token, pred_info) in predicates.items(): predicate = ((doc[pred_token].lemma_ + '-') + pred_info.get('prep', str(None))) triple = predicateInfoToTriple(pred_info, predicate) if (triple and (not (triple in triples))): triples.append(triple) print('Triples subj - pred - prep-obj', triples) return (triples, zip(speaker_tokens, speaker_mentions), zip(hearer_tokens, hearer_mentions), zip(subject_tokens, subject_mentions), zip(object_tokens, object_mentions))<|docstring|>extract predicates with: -subject -object :param spacy.tokens.doc.Doc doc: spaCy object after processing text :rtype: list :return: list of tuples (predicate, subject, object)<|endoftext|>
ac8fd136bb68e15a4a8fa229cb06291029ea1ea3d8e0e1507a39d83c77b93647
def get(self, spider, **params): "Get a spider object for a given spider name.\n\n The method gets/sets spider id (and checks if spider exists).\n\n :param spider: a string spider name.\n :return: a spider object.\n :rtype: :class:`scrapinghub.client.spiders.Spider`\n\n Usage::\n\n >>> project.spiders.get('spider2')\n <scrapinghub.client.spiders.Spider at 0x106ee3748>\n >>> project.spiders.get('non-existing')\n NotFound: Spider non-existing doesn't exist.\n " project = self._client._hsclient.get_project(self.project_id) spider_id = project.ids.spider(spider, **params) if (spider_id is None): raise NotFound("Spider {} doesn't exist.".format(spider)) return Spider(self._client, self.project_id, spider_id, spider)
Get a spider object for a given spider name. The method gets/sets spider id (and checks if spider exists). :param spider: a string spider name. :return: a spider object. :rtype: :class:`scrapinghub.client.spiders.Spider` Usage:: >>> project.spiders.get('spider2') <scrapinghub.client.spiders.Spider at 0x106ee3748> >>> project.spiders.get('non-existing') NotFound: Spider non-existing doesn't exist.
scrapinghub/client/spiders.py
get
noviluni/python-scrapinghub
163
python
def get(self, spider, **params): "Get a spider object for a given spider name.\n\n The method gets/sets spider id (and checks if spider exists).\n\n :param spider: a string spider name.\n :return: a spider object.\n :rtype: :class:`scrapinghub.client.spiders.Spider`\n\n Usage::\n\n >>> project.spiders.get('spider2')\n <scrapinghub.client.spiders.Spider at 0x106ee3748>\n >>> project.spiders.get('non-existing')\n NotFound: Spider non-existing doesn't exist.\n " project = self._client._hsclient.get_project(self.project_id) spider_id = project.ids.spider(spider, **params) if (spider_id is None): raise NotFound("Spider {} doesn't exist.".format(spider)) return Spider(self._client, self.project_id, spider_id, spider)
def get(self, spider, **params): "Get a spider object for a given spider name.\n\n The method gets/sets spider id (and checks if spider exists).\n\n :param spider: a string spider name.\n :return: a spider object.\n :rtype: :class:`scrapinghub.client.spiders.Spider`\n\n Usage::\n\n >>> project.spiders.get('spider2')\n <scrapinghub.client.spiders.Spider at 0x106ee3748>\n >>> project.spiders.get('non-existing')\n NotFound: Spider non-existing doesn't exist.\n " project = self._client._hsclient.get_project(self.project_id) spider_id = project.ids.spider(spider, **params) if (spider_id is None): raise NotFound("Spider {} doesn't exist.".format(spider)) return Spider(self._client, self.project_id, spider_id, spider)<|docstring|>Get a spider object for a given spider name. The method gets/sets spider id (and checks if spider exists). :param spider: a string spider name. :return: a spider object. :rtype: :class:`scrapinghub.client.spiders.Spider` Usage:: >>> project.spiders.get('spider2') <scrapinghub.client.spiders.Spider at 0x106ee3748> >>> project.spiders.get('non-existing') NotFound: Spider non-existing doesn't exist.<|endoftext|>
c41375c4222812872d9b01acdfdbb71e59202a2b6a56d60914c980470a66de76
def list(self): "Get a list of spiders for a project.\n\n :return: a list of dictionaries with spiders metadata.\n :rtype: :class:`list[dict]`\n\n Usage::\n\n >>> project.spiders.list()\n [{'id': 'spider1', 'tags': [], 'type': 'manual', 'version': '123'},\n {'id': 'spider2', 'tags': [], 'type': 'manual', 'version': '123'}]\n " project = self._client._connection[self.project_id] return project.spiders()
Get a list of spiders for a project. :return: a list of dictionaries with spiders metadata. :rtype: :class:`list[dict]` Usage:: >>> project.spiders.list() [{'id': 'spider1', 'tags': [], 'type': 'manual', 'version': '123'}, {'id': 'spider2', 'tags': [], 'type': 'manual', 'version': '123'}]
scrapinghub/client/spiders.py
list
noviluni/python-scrapinghub
163
python
def list(self): "Get a list of spiders for a project.\n\n :return: a list of dictionaries with spiders metadata.\n :rtype: :class:`list[dict]`\n\n Usage::\n\n >>> project.spiders.list()\n [{'id': 'spider1', 'tags': [], 'type': 'manual', 'version': '123'},\n {'id': 'spider2', 'tags': [], 'type': 'manual', 'version': '123'}]\n " project = self._client._connection[self.project_id] return project.spiders()
def list(self): "Get a list of spiders for a project.\n\n :return: a list of dictionaries with spiders metadata.\n :rtype: :class:`list[dict]`\n\n Usage::\n\n >>> project.spiders.list()\n [{'id': 'spider1', 'tags': [], 'type': 'manual', 'version': '123'},\n {'id': 'spider2', 'tags': [], 'type': 'manual', 'version': '123'}]\n " project = self._client._connection[self.project_id] return project.spiders()<|docstring|>Get a list of spiders for a project. :return: a list of dictionaries with spiders metadata. :rtype: :class:`list[dict]` Usage:: >>> project.spiders.list() [{'id': 'spider1', 'tags': [], 'type': 'manual', 'version': '123'}, {'id': 'spider2', 'tags': [], 'type': 'manual', 'version': '123'}]<|endoftext|>
8bd57f0c3e3e73e378561e363cc5441d62ceaf6f96e5184b7d9e6600e614c5d8
def iter(self): 'Iterate through a list of spiders for a project.\n\n :return: an iterator over spiders list where each spider is represented\n as a dict containing its metadata.\n :rtype: :class:`collection.Iterable[dict]`\n\n Provided for the sake of API consistency.\n ' return iter(self.list())
Iterate through a list of spiders for a project. :return: an iterator over spiders list where each spider is represented as a dict containing its metadata. :rtype: :class:`collection.Iterable[dict]` Provided for the sake of API consistency.
scrapinghub/client/spiders.py
iter
noviluni/python-scrapinghub
163
python
def iter(self): 'Iterate through a list of spiders for a project.\n\n :return: an iterator over spiders list where each spider is represented\n as a dict containing its metadata.\n :rtype: :class:`collection.Iterable[dict]`\n\n Provided for the sake of API consistency.\n ' return iter(self.list())
def iter(self): 'Iterate through a list of spiders for a project.\n\n :return: an iterator over spiders list where each spider is represented\n as a dict containing its metadata.\n :rtype: :class:`collection.Iterable[dict]`\n\n Provided for the sake of API consistency.\n ' return iter(self.list())<|docstring|>Iterate through a list of spiders for a project. :return: an iterator over spiders list where each spider is represented as a dict containing its metadata. :rtype: :class:`collection.Iterable[dict]` Provided for the sake of API consistency.<|endoftext|>
64ba6fcf4d4ff7351ae67e294e61c6e076e2280764dbefbb6fef38ebf1201914
@_wrap_http_errors def update_tags(self, add=None, remove=None): 'Update tags for the spider.\n\n :param add: (optional) a list of string tags to add.\n :param remove: (optional) a list of string tags to remove.\n ' params = get_tags_for_update(add=add, remove=remove) path = 'v2/projects/{}/spiders/{}/tags'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.patch(url, json=params) response.raise_for_status()
Update tags for the spider. :param add: (optional) a list of string tags to add. :param remove: (optional) a list of string tags to remove.
scrapinghub/client/spiders.py
update_tags
noviluni/python-scrapinghub
163
python
@_wrap_http_errors def update_tags(self, add=None, remove=None): 'Update tags for the spider.\n\n :param add: (optional) a list of string tags to add.\n :param remove: (optional) a list of string tags to remove.\n ' params = get_tags_for_update(add=add, remove=remove) path = 'v2/projects/{}/spiders/{}/tags'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.patch(url, json=params) response.raise_for_status()
@_wrap_http_errors def update_tags(self, add=None, remove=None): 'Update tags for the spider.\n\n :param add: (optional) a list of string tags to add.\n :param remove: (optional) a list of string tags to remove.\n ' params = get_tags_for_update(add=add, remove=remove) path = 'v2/projects/{}/spiders/{}/tags'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.patch(url, json=params) response.raise_for_status()<|docstring|>Update tags for the spider. :param add: (optional) a list of string tags to add. :param remove: (optional) a list of string tags to remove.<|endoftext|>
27c2426c9006902fb585e482b6c61aafb8f0feed41778f3000a2a3e17b34e3c3
@_wrap_http_errors def list_tags(self): 'List spider tags.\n\n :return: a list of spider tags.\n :rtype: :class:`list[str]`\n ' path = 'v2/projects/{}/spiders/{}'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.get(url) response.raise_for_status() return response.json().get('tags', [])
List spider tags. :return: a list of spider tags. :rtype: :class:`list[str]`
scrapinghub/client/spiders.py
list_tags
noviluni/python-scrapinghub
163
python
@_wrap_http_errors def list_tags(self): 'List spider tags.\n\n :return: a list of spider tags.\n :rtype: :class:`list[str]`\n ' path = 'v2/projects/{}/spiders/{}'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.get(url) response.raise_for_status() return response.json().get('tags', [])
@_wrap_http_errors def list_tags(self): 'List spider tags.\n\n :return: a list of spider tags.\n :rtype: :class:`list[str]`\n ' path = 'v2/projects/{}/spiders/{}'.format(self.project_id, self._id) url = urljoin(self._client._connection.url, path) response = self._client._connection._session.get(url) response.raise_for_status() return response.json().get('tags', [])<|docstring|>List spider tags. :return: a list of spider tags. :rtype: :class:`list[str]`<|endoftext|>
5be92139adfbb5db2af3119b7e913d9db6add8799a51dde490d0913cda81f65a
def series(power: float, units: Tuple[str], fallback: Optional[str]=None): "Define a callable to format units in a series.\n\n A series is a collection of units of values increasing linearly from an\n initial unit. For example there're 1024 bytes in 1 KB, 1024 KB in 1 MB,\n 1024 MB in 1 GB. In this case every time we multiply a number by 1024 we\n move to the next unit in the series.\n\n Parameters\n ----------\n power\n The power by which numbers in the series will increase.\n units\n The units in the power series from smallest to largest values.\n fallback\n The unit value to return when a given input is too large to\n fit into any of the specified units.\n\n " def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback) return series_wrapper
Define a callable to format units in a series. A series is a collection of units of values increasing linearly from an initial unit. For example there're 1024 bytes in 1 KB, 1024 KB in 1 MB, 1024 MB in 1 GB. In this case every time we multiply a number by 1024 we move to the next unit in the series. Parameters ---------- power The power by which numbers in the series will increase. units The units in the power series from smallest to largest values. fallback The unit value to return when a given input is too large to fit into any of the specified units.
bin/lib/python/hurry.py
series
MoHKale/.dotfiles
9
python
def series(power: float, units: Tuple[str], fallback: Optional[str]=None): "Define a callable to format units in a series.\n\n A series is a collection of units of values increasing linearly from an\n initial unit. For example there're 1024 bytes in 1 KB, 1024 KB in 1 MB,\n 1024 MB in 1 GB. In this case every time we multiply a number by 1024 we\n move to the next unit in the series.\n\n Parameters\n ----------\n power\n The power by which numbers in the series will increase.\n units\n The units in the power series from smallest to largest values.\n fallback\n The unit value to return when a given input is too large to\n fit into any of the specified units.\n\n " def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback) return series_wrapper
def series(power: float, units: Tuple[str], fallback: Optional[str]=None): "Define a callable to format units in a series.\n\n A series is a collection of units of values increasing linearly from an\n initial unit. For example there're 1024 bytes in 1 KB, 1024 KB in 1 MB,\n 1024 MB in 1 GB. In this case every time we multiply a number by 1024 we\n move to the next unit in the series.\n\n Parameters\n ----------\n power\n The power by which numbers in the series will increase.\n units\n The units in the power series from smallest to largest values.\n fallback\n The unit value to return when a given input is too large to\n fit into any of the specified units.\n\n " def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback) return series_wrapper<|docstring|>Define a callable to format units in a series. A series is a collection of units of values increasing linearly from an initial unit. For example there're 1024 bytes in 1 KB, 1024 KB in 1 MB, 1024 MB in 1 GB. In this case every time we multiply a number by 1024 we move to the next unit in the series. Parameters ---------- power The power by which numbers in the series will increase. units The units in the power series from smallest to largest values. fallback The unit value to return when a given input is too large to fit into any of the specified units.<|endoftext|>
4a92bd8495836270eaf283e6f154ee96cbccd7bda02928daa3e4e1cc5293f802
def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback)
Return the value of count in the current series. Parameters ---------- count The numerical value to reduce to a unit value in the current series. limit The maximum possible value in a given unit that's acceptable. For example if this is value is set to 100 then the returned count is guaranteed to be the first unit value less than 100.
bin/lib/python/hurry.py
series_wrapper
MoHKale/.dotfiles
9
python
def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback)
def series_wrapper(count: float, limit: float=power) -> Tuple[(float, str)]: "Return the value of count in the current series.\n\n Parameters\n ----------\n count\n The numerical value to reduce to a unit value in the current\n series.\n limit\n The maximum possible value in a given unit that's acceptable.\n For example if this is value is set to 100 then the returned\n count is guaranteed to be the first unit value less than 100.\n\n " for unit in units: if (count < limit): return (count, unit) count /= power return (count, fallback)<|docstring|>Return the value of count in the current series. Parameters ---------- count The numerical value to reduce to a unit value in the current series. limit The maximum possible value in a given unit that's acceptable. For example if this is value is set to 100 then the returned count is guaranteed to be the first unit value less than 100.<|endoftext|>
4b669446e5648852a79514c09a09ff44c87b616511c2fc48b40a43fa52fcd284
def forward(self, hidden, encoder_outputs): '\n hidden : Previous hidden state of the Decoder (Num. Layers * Num. Directions x Batch Size x Hidden Size)\n encoder_outputs: Outputs from Encoder (Sequence Length x Batch Size x Hidden Size)\n\n return: Attention energies in shape (Batch Size x Sequence Length)\n ' max_len = encoder_outputs.size(0) batch_size = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) attn_energies = self.score(H, encoder_outputs.transpose(0, 1)) return F.softmax(attn_energies, dim=1).unsqueeze(1)
hidden : Previous hidden state of the Decoder (Num. Layers * Num. Directions x Batch Size x Hidden Size) encoder_outputs: Outputs from Encoder (Sequence Length x Batch Size x Hidden Size) return: Attention energies in shape (Batch Size x Sequence Length)
attention.py
forward
fionn-mac/seq2seq-PyTorch
1
python
def forward(self, hidden, encoder_outputs): '\n hidden : Previous hidden state of the Decoder (Num. Layers * Num. Directions x Batch Size x Hidden Size)\n encoder_outputs: Outputs from Encoder (Sequence Length x Batch Size x Hidden Size)\n\n return: Attention energies in shape (Batch Size x Sequence Length)\n ' max_len = encoder_outputs.size(0) batch_size = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) attn_energies = self.score(H, encoder_outputs.transpose(0, 1)) return F.softmax(attn_energies, dim=1).unsqueeze(1)
def forward(self, hidden, encoder_outputs): '\n hidden : Previous hidden state of the Decoder (Num. Layers * Num. Directions x Batch Size x Hidden Size)\n encoder_outputs: Outputs from Encoder (Sequence Length x Batch Size x Hidden Size)\n\n return: Attention energies in shape (Batch Size x Sequence Length)\n ' max_len = encoder_outputs.size(0) batch_size = encoder_outputs.size(1) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) attn_energies = self.score(H, encoder_outputs.transpose(0, 1)) return F.softmax(attn_energies, dim=1).unsqueeze(1)<|docstring|>hidden : Previous hidden state of the Decoder (Num. Layers * Num. Directions x Batch Size x Hidden Size) encoder_outputs: Outputs from Encoder (Sequence Length x Batch Size x Hidden Size) return: Attention energies in shape (Batch Size x Sequence Length)<|endoftext|>
4cb00dbb1e3b0e2c35b46665a2605a03e1037847c09be22616b13bcc4ce2e516
def loss_func(y_true, y_pred): 'Content loss based on VGG19' c_loss = content_loss_22(y_true, y_pred) l1 = tf.keras.losses.mean_absolute_error(y_true, y_pred) l2 = tf.keras.losses.mean_squared_error(y_true, y_pred) total_loss = (((loss_weights[0] * c_loss) + (loss_weights[1] * l1)) + (loss_weights[2] * l2)) return total_loss
Content loss based on VGG19
DFCAN_SR.py
loss_func
m-bizhani/Digital-rock-image-processing
0
python
def loss_func(y_true, y_pred): c_loss = content_loss_22(y_true, y_pred) l1 = tf.keras.losses.mean_absolute_error(y_true, y_pred) l2 = tf.keras.losses.mean_squared_error(y_true, y_pred) total_loss = (((loss_weights[0] * c_loss) + (loss_weights[1] * l1)) + (loss_weights[2] * l2)) return total_loss
def loss_func(y_true, y_pred): c_loss = content_loss_22(y_true, y_pred) l1 = tf.keras.losses.mean_absolute_error(y_true, y_pred) l2 = tf.keras.losses.mean_squared_error(y_true, y_pred) total_loss = (((loss_weights[0] * c_loss) + (loss_weights[1] * l1)) + (loss_weights[2] * l2)) return total_loss<|docstring|>Content loss based on VGG19<|endoftext|>
8b60aa3dd3a19ab4c9973e9c5f23cb03cfb45c9a0ab1de4b923cb4608325c8b8
def make_video(images, outvid=None, fps=5, size=None, is_color=True, format='XVID'): '\n Create a video from a list of images.\n\n @param outvid output video\n @param images list of images to use in the video\n @param fps frame per second\n @param size size of each frame\n @param is_color color\n @param format see http://www.fourcc.org/codecs.php\n @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n\n The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/.\n By default, the video will have the size of the first image.\n It will resize every image to this size before adding them to the video.\n ' from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize fourcc = VideoWriter_fourcc(*format) vid = None for image in images: if (not os.path.exists(image)): raise FileNotFoundError(image) img = cv2.imread(image) if (vid is None): if (size is None): size = (img.shape[1], img.shape[0]) vid = VideoWriter(outvid, fourcc, float(fps), size, is_color) if ((size[0] != img.shape[1]) and (size[1] != img.shape[0])): img = resize(img, size) vid.write(img) vid.release() return vid
Create a video from a list of images. @param outvid output video @param images list of images to use in the video @param fps frame per second @param size size of each frame @param is_color color @param format see http://www.fourcc.org/codecs.php @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/. By default, the video will have the size of the first image. It will resize every image to this size before adding them to the video.
PCA_method/Result_PCA/make_video.py
make_video
YingnanMa/Background_Subtraction_with_a_Freely_Moving_Camera
0
python
def make_video(images, outvid=None, fps=5, size=None, is_color=True, format='XVID'): '\n Create a video from a list of images.\n\n @param outvid output video\n @param images list of images to use in the video\n @param fps frame per second\n @param size size of each frame\n @param is_color color\n @param format see http://www.fourcc.org/codecs.php\n @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n\n The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/.\n By default, the video will have the size of the first image.\n It will resize every image to this size before adding them to the video.\n ' from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize fourcc = VideoWriter_fourcc(*format) vid = None for image in images: if (not os.path.exists(image)): raise FileNotFoundError(image) img = cv2.imread(image) if (vid is None): if (size is None): size = (img.shape[1], img.shape[0]) vid = VideoWriter(outvid, fourcc, float(fps), size, is_color) if ((size[0] != img.shape[1]) and (size[1] != img.shape[0])): img = resize(img, size) vid.write(img) vid.release() return vid
def make_video(images, outvid=None, fps=5, size=None, is_color=True, format='XVID'): '\n Create a video from a list of images.\n\n @param outvid output video\n @param images list of images to use in the video\n @param fps frame per second\n @param size size of each frame\n @param is_color color\n @param format see http://www.fourcc.org/codecs.php\n @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html\n\n The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/.\n By default, the video will have the size of the first image.\n It will resize every image to this size before adding them to the video.\n ' from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize fourcc = VideoWriter_fourcc(*format) vid = None for image in images: if (not os.path.exists(image)): raise FileNotFoundError(image) img = cv2.imread(image) if (vid is None): if (size is None): size = (img.shape[1], img.shape[0]) vid = VideoWriter(outvid, fourcc, float(fps), size, is_color) if ((size[0] != img.shape[1]) and (size[1] != img.shape[0])): img = resize(img, size) vid.write(img) vid.release() return vid<|docstring|>Create a video from a list of images. @param outvid output video @param images list of images to use in the video @param fps frame per second @param size size of each frame @param is_color color @param format see http://www.fourcc.org/codecs.php @return see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/. By default, the video will have the size of the first image. It will resize every image to this size before adding them to the video.<|endoftext|>
7b2792e9669dc7309532e11d0a6a61d29b9688307e2afbe139b678042f1fd49e
@commands.group(autohelp=True, aliases=['userlog']) @commands.guild_only() @checks.admin() async def userlogset(self, ctx: commands.Context): 'Various User Log settings.'
Various User Log settings.
userlog/userlog.py
userlogset
salazar-brodart/enclave-cog
0
python
@commands.group(autohelp=True, aliases=['userlog']) @commands.guild_only() @checks.admin() async def userlogset(self, ctx: commands.Context):
@commands.group(autohelp=True, aliases=['userlog']) @commands.guild_only() @checks.admin() async def userlogset(self, ctx: commands.Context): <|docstring|>Various User Log settings.<|endoftext|>
1f6f4b4924cb85751529a70a240bef486c313b46bd6b7c6da1e4334805385172
@userlogset.command(name='channel') async def user_channel_log(self, ctx: commands.Context, channel: typing.Optional[discord.TextChannel]): 'Set the channel for logs.\n\n If the channel is not provided, logging will be disabled.' if channel: (await self.config.guild(ctx.guild).channel.set(channel.id)) else: (await self.config.guild(ctx.guild).channel.clear()) (await ctx.tick())
Set the channel for logs. If the channel is not provided, logging will be disabled.
userlog/userlog.py
user_channel_log
salazar-brodart/enclave-cog
0
python
@userlogset.command(name='channel') async def user_channel_log(self, ctx: commands.Context, channel: typing.Optional[discord.TextChannel]): 'Set the channel for logs.\n\n If the channel is not provided, logging will be disabled.' if channel: (await self.config.guild(ctx.guild).channel.set(channel.id)) else: (await self.config.guild(ctx.guild).channel.clear()) (await ctx.tick())
@userlogset.command(name='channel') async def user_channel_log(self, ctx: commands.Context, channel: typing.Optional[discord.TextChannel]): 'Set the channel for logs.\n\n If the channel is not provided, logging will be disabled.' if channel: (await self.config.guild(ctx.guild).channel.set(channel.id)) else: (await self.config.guild(ctx.guild).channel.clear()) (await ctx.tick())<|docstring|>Set the channel for logs. If the channel is not provided, logging will be disabled.<|endoftext|>
905227055cbb32267608c906ea19dcc8ffa7d4e7959fa58d8c426b09523a46ae
@userlogset.command(name='join') async def user_join_log(self, ctx: commands.Context, on_off: typing.Optional[bool]): 'Toggle logging when users join the current server.\n\n If `on_off` is not provided, the state will be flipped.' target_state = (on_off or (not (await self.config.guild(ctx.guild).join()))) (await self.config.guild(ctx.guild).join.set(target_state)) if target_state: (await ctx.send('Logging users joining is now enabled.')) else: (await ctx.send('Logging users joining is now disabled.'))
Toggle logging when users join the current server. If `on_off` is not provided, the state will be flipped.
userlog/userlog.py
user_join_log
salazar-brodart/enclave-cog
0
python
@userlogset.command(name='join') async def user_join_log(self, ctx: commands.Context, on_off: typing.Optional[bool]): 'Toggle logging when users join the current server.\n\n If `on_off` is not provided, the state will be flipped.' target_state = (on_off or (not (await self.config.guild(ctx.guild).join()))) (await self.config.guild(ctx.guild).join.set(target_state)) if target_state: (await ctx.send('Logging users joining is now enabled.')) else: (await ctx.send('Logging users joining is now disabled.'))
@userlogset.command(name='join') async def user_join_log(self, ctx: commands.Context, on_off: typing.Optional[bool]): 'Toggle logging when users join the current server.\n\n If `on_off` is not provided, the state will be flipped.' target_state = (on_off or (not (await self.config.guild(ctx.guild).join()))) (await self.config.guild(ctx.guild).join.set(target_state)) if target_state: (await ctx.send('Logging users joining is now enabled.')) else: (await ctx.send('Logging users joining is now disabled.'))<|docstring|>Toggle logging when users join the current server. If `on_off` is not provided, the state will be flipped.<|endoftext|>