body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
0d7f47d277ff0c9e41edef0b29ec414fa622e574188a571c35abceb76e156896
def loads(data, handler=None): 'Load configuration data from C{data}.\n\n @param handler: callable or C{None} that will be invoked for\n augmenting an object. The handler will be passed a term\n that was provided. The handler must return a C{dict}.\n\n @return: The configuration data\n @rtype: C{dict}\n ' if (not isinstance(data, six.text_type)): data = six.text_type(data, 'utf-8') return _parse(data, handler)
Load configuration data from C{data}. @param handler: callable or C{None} that will be invoked for augmenting an object. The handler will be passed a term that was provided. The handler must return a C{dict}. @return: The configuration data @rtype: C{dict}
structprop/__init__.py
loads
edgeware/structprop
1
python
def loads(data, handler=None): 'Load configuration data from C{data}.\n\n @param handler: callable or C{None} that will be invoked for\n augmenting an object. The handler will be passed a term\n that was provided. The handler must return a C{dict}.\n\n @return: The configuration data\n @rtype: C{dict}\n ' if (not isinstance(data, six.text_type)): data = six.text_type(data, 'utf-8') return _parse(data, handler)
def loads(data, handler=None): 'Load configuration data from C{data}.\n\n @param handler: callable or C{None} that will be invoked for\n augmenting an object. The handler will be passed a term\n that was provided. The handler must return a C{dict}.\n\n @return: The configuration data\n @rtype: C{dict}\n ' if (not isinstance(data, six.text_type)): data = six.text_type(data, 'utf-8') return _parse(data, handler)<|docstring|>Load configuration data from C{data}. @param handler: callable or C{None} that will be invoked for augmenting an object. The handler will be passed a term that was provided. The handler must return a C{dict}. @return: The configuration data @rtype: C{dict}<|endoftext|>
412d5dc0413bdd380380a021d83a026d6269524553d67a1dae9b23a3f9a9f70a
def dumps(data): 'Dump configuration data to a string.\n\n @rtype: C{str}\n ' def _dump(d, indent=0): for (key, value) in six.iteritems(d): if isinstance(value, dict): (yield ('%s%s {\n' % ((' ' * indent), _escape(key)))) for subs in _dump(value, (indent + 2)): (yield subs) (yield ('%s}\n' % (' ' * indent))) elif isinstance(value, list): (yield ('%s%s = {\n' % ((' ' * indent), _escape(key)))) for subvalue in value: if (type(subvalue) == dict): (yield ('%s{\n' % (' ' * (indent + 2)))) for subs in _dump(subvalue, (indent + 4)): (yield subs) (yield ('%s}\n' % (' ' * (indent + 2)))) else: (yield ('%s%s\n' % ((' ' * (indent + 2)), _escape(subvalue)))) (yield ('%s}\n' % (' ' * indent))) elif (type(value) == bool): (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value).lower())))) else: (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value))))) return ''.join(list(_dump(data)))
Dump configuration data to a string. @rtype: C{str}
structprop/__init__.py
dumps
edgeware/structprop
1
python
def dumps(data): 'Dump configuration data to a string.\n\n @rtype: C{str}\n ' def _dump(d, indent=0): for (key, value) in six.iteritems(d): if isinstance(value, dict): (yield ('%s%s {\n' % ((' ' * indent), _escape(key)))) for subs in _dump(value, (indent + 2)): (yield subs) (yield ('%s}\n' % (' ' * indent))) elif isinstance(value, list): (yield ('%s%s = {\n' % ((' ' * indent), _escape(key)))) for subvalue in value: if (type(subvalue) == dict): (yield ('%s{\n' % (' ' * (indent + 2)))) for subs in _dump(subvalue, (indent + 4)): (yield subs) (yield ('%s}\n' % (' ' * (indent + 2)))) else: (yield ('%s%s\n' % ((' ' * (indent + 2)), _escape(subvalue)))) (yield ('%s}\n' % (' ' * indent))) elif (type(value) == bool): (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value).lower())))) else: (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value))))) return .join(list(_dump(data)))
def dumps(data): 'Dump configuration data to a string.\n\n @rtype: C{str}\n ' def _dump(d, indent=0): for (key, value) in six.iteritems(d): if isinstance(value, dict): (yield ('%s%s {\n' % ((' ' * indent), _escape(key)))) for subs in _dump(value, (indent + 2)): (yield subs) (yield ('%s}\n' % (' ' * indent))) elif isinstance(value, list): (yield ('%s%s = {\n' % ((' ' * indent), _escape(key)))) for subvalue in value: if (type(subvalue) == dict): (yield ('%s{\n' % (' ' * (indent + 2)))) for subs in _dump(subvalue, (indent + 4)): (yield subs) (yield ('%s}\n' % (' ' * (indent + 2)))) else: (yield ('%s%s\n' % ((' ' * (indent + 2)), _escape(subvalue)))) (yield ('%s}\n' % (' ' * indent))) elif (type(value) == bool): (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value).lower())))) else: (yield ('%s%s = %s\n' % ((' ' * indent), _escape(key), _escape(str(value))))) return .join(list(_dump(data)))<|docstring|>Dump configuration data to a string. @rtype: C{str}<|endoftext|>
382ff1e81aa42b34538e9475235774a19248dedcd52a8a2bbc65482fe67f3d35
def stmts(obj, next, token): 'Process statements until EOF.' while (token is not EOF): token = assignlist(obj, next, token)
Process statements until EOF.
structprop/__init__.py
stmts
edgeware/structprop
1
python
def stmts(obj, next, token): while (token is not EOF): token = assignlist(obj, next, token)
def stmts(obj, next, token): while (token is not EOF): token = assignlist(obj, next, token)<|docstring|>Process statements until EOF.<|endoftext|>
d6ed83790a2a123df57b68f29b05fea87f8cf11e48d61c955d5e59143d1b8cdf
def handle_init(self): 'Initialization of plugin\n\n - set the periodic call back for the process monitoring (at loop_rate)\n - create the listening UDP socket\n ' self.period = ioloop.PeriodicCallback(self.look_after, (self.loop_rate * 1000)) self.period.start() self._bind_socket()
Initialization of plugin - set the periodic call back for the process monitoring (at loop_rate) - create the listening UDP socket
circus/plugins/watchdog.py
handle_init
JetDrag/circus
820
python
def handle_init(self): 'Initialization of plugin\n\n - set the periodic call back for the process monitoring (at loop_rate)\n - create the listening UDP socket\n ' self.period = ioloop.PeriodicCallback(self.look_after, (self.loop_rate * 1000)) self.period.start() self._bind_socket()
def handle_init(self): 'Initialization of plugin\n\n - set the periodic call back for the process monitoring (at loop_rate)\n - create the listening UDP socket\n ' self.period = ioloop.PeriodicCallback(self.look_after, (self.loop_rate * 1000)) self.period.start() self._bind_socket()<|docstring|>Initialization of plugin - set the periodic call back for the process monitoring (at loop_rate) - create the listening UDP socket<|endoftext|>
2cba7a65a44447b8a14b9f77700836396bec2558fddb5f1329b24da0f614cbf5
def handle_recv(self, data): 'Handle received message from circusd\n\n We need to handle two messages:\n - spawn: add a new monitored child pid\n - reap: remove a killed child pid from monitoring\n ' (watcher_name, action, msg) = self.split_data(data) logger.debug('received data from circusd: watcher.%s.%s, %s', watcher_name, action, msg) if self._match_watcher_name(watcher_name): try: message = self.load_message(msg) except ValueError: logger.error('Error while decoding json for message: %s', msg) else: if ('process_pid' not in message): logger.warning('no process_pid in message') return pid = str(message.get('process_pid')) if (action == 'spawn'): self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('added new monitored pid for %s:%s', watcher_name, pid) elif ((action == 'reap') and (pid in self.pid_status)): old_pid = self.pid_status.pop(pid) logger.info('removed monitored pid for %s:%s', old_pid['watcher'], pid)
Handle received message from circusd We need to handle two messages: - spawn: add a new monitored child pid - reap: remove a killed child pid from monitoring
circus/plugins/watchdog.py
handle_recv
JetDrag/circus
820
python
def handle_recv(self, data): 'Handle received message from circusd\n\n We need to handle two messages:\n - spawn: add a new monitored child pid\n - reap: remove a killed child pid from monitoring\n ' (watcher_name, action, msg) = self.split_data(data) logger.debug('received data from circusd: watcher.%s.%s, %s', watcher_name, action, msg) if self._match_watcher_name(watcher_name): try: message = self.load_message(msg) except ValueError: logger.error('Error while decoding json for message: %s', msg) else: if ('process_pid' not in message): logger.warning('no process_pid in message') return pid = str(message.get('process_pid')) if (action == 'spawn'): self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('added new monitored pid for %s:%s', watcher_name, pid) elif ((action == 'reap') and (pid in self.pid_status)): old_pid = self.pid_status.pop(pid) logger.info('removed monitored pid for %s:%s', old_pid['watcher'], pid)
def handle_recv(self, data): 'Handle received message from circusd\n\n We need to handle two messages:\n - spawn: add a new monitored child pid\n - reap: remove a killed child pid from monitoring\n ' (watcher_name, action, msg) = self.split_data(data) logger.debug('received data from circusd: watcher.%s.%s, %s', watcher_name, action, msg) if self._match_watcher_name(watcher_name): try: message = self.load_message(msg) except ValueError: logger.error('Error while decoding json for message: %s', msg) else: if ('process_pid' not in message): logger.warning('no process_pid in message') return pid = str(message.get('process_pid')) if (action == 'spawn'): self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('added new monitored pid for %s:%s', watcher_name, pid) elif ((action == 'reap') and (pid in self.pid_status)): old_pid = self.pid_status.pop(pid) logger.info('removed monitored pid for %s:%s', old_pid['watcher'], pid)<|docstring|>Handle received message from circusd We need to handle two messages: - spawn: add a new monitored child pid - reap: remove a killed child pid from monitoring<|endoftext|>
8a20ed738d80408ad2c012ff73961b0c546e499b514194c420891ab64abfec33
def _discover_monitored_pids(self): 'Try to discover all the monitored pids.\n\n This should be done only at startup time, because if new watchers or\n pids are created in running time, we should receive the message\n from circusd which is handled by self.handle_recv\n ' self.pid_status = dict() all_watchers = self.call('list') for watcher_name in all_watchers['watchers']: if self._match_watcher_name(watcher_name): processes = self.call('list', name=watcher_name) if ('pids' in processes): for pid in processes['pids']: pid = str(pid) self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('discovered: %s, pid:%s', watcher_name, pid)
Try to discover all the monitored pids. This should be done only at startup time, because if new watchers or pids are created in running time, we should receive the message from circusd which is handled by self.handle_recv
circus/plugins/watchdog.py
_discover_monitored_pids
JetDrag/circus
820
python
def _discover_monitored_pids(self): 'Try to discover all the monitored pids.\n\n This should be done only at startup time, because if new watchers or\n pids are created in running time, we should receive the message\n from circusd which is handled by self.handle_recv\n ' self.pid_status = dict() all_watchers = self.call('list') for watcher_name in all_watchers['watchers']: if self._match_watcher_name(watcher_name): processes = self.call('list', name=watcher_name) if ('pids' in processes): for pid in processes['pids']: pid = str(pid) self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('discovered: %s, pid:%s', watcher_name, pid)
def _discover_monitored_pids(self): 'Try to discover all the monitored pids.\n\n This should be done only at startup time, because if new watchers or\n pids are created in running time, we should receive the message\n from circusd which is handled by self.handle_recv\n ' self.pid_status = dict() all_watchers = self.call('list') for watcher_name in all_watchers['watchers']: if self._match_watcher_name(watcher_name): processes = self.call('list', name=watcher_name) if ('pids' in processes): for pid in processes['pids']: pid = str(pid) self.pid_status[pid] = dict(watcher=watcher_name, last_activity=time.time()) logger.info('discovered: %s, pid:%s', watcher_name, pid)<|docstring|>Try to discover all the monitored pids. This should be done only at startup time, because if new watchers or pids are created in running time, we should receive the message from circusd which is handled by self.handle_recv<|endoftext|>
a10ef9160558db1a85ce58e243d924b9e25f8916c751f5f15425949ea58a49a7
def _bind_socket(self): 'bind the listening socket for watchdog udp and start an event\n handler for handling udp received messages.\n ' self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: self.sock.bind((self.watchdog_ip, self.watchdog_port)) except socket.error as socket_error: logger.error('Problem while binding watchdog socket on %s:%s (err %s', self.watchdog_ip, self.watchdog_port, str(socket_error)) self.sock = None else: self.sock.settimeout(1) self.loop.add_handler(self.sock.fileno(), self.receive_udp_socket, ioloop.IOLoop.READ) logger.info('Watchdog listening UDP on %s:%s', self.watchdog_ip, self.watchdog_port)
bind the listening socket for watchdog udp and start an event handler for handling udp received messages.
circus/plugins/watchdog.py
_bind_socket
JetDrag/circus
820
python
def _bind_socket(self): 'bind the listening socket for watchdog udp and start an event\n handler for handling udp received messages.\n ' self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: self.sock.bind((self.watchdog_ip, self.watchdog_port)) except socket.error as socket_error: logger.error('Problem while binding watchdog socket on %s:%s (err %s', self.watchdog_ip, self.watchdog_port, str(socket_error)) self.sock = None else: self.sock.settimeout(1) self.loop.add_handler(self.sock.fileno(), self.receive_udp_socket, ioloop.IOLoop.READ) logger.info('Watchdog listening UDP on %s:%s', self.watchdog_ip, self.watchdog_port)
def _bind_socket(self): 'bind the listening socket for watchdog udp and start an event\n handler for handling udp received messages.\n ' self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: self.sock.bind((self.watchdog_ip, self.watchdog_port)) except socket.error as socket_error: logger.error('Problem while binding watchdog socket on %s:%s (err %s', self.watchdog_ip, self.watchdog_port, str(socket_error)) self.sock = None else: self.sock.settimeout(1) self.loop.add_handler(self.sock.fileno(), self.receive_udp_socket, ioloop.IOLoop.READ) logger.info('Watchdog listening UDP on %s:%s', self.watchdog_ip, self.watchdog_port)<|docstring|>bind the listening socket for watchdog udp and start an event handler for handling udp received messages.<|endoftext|>
74901970e87f48cc0e7387e6cc46cd02b41ad2cdda0e8e141715b39c36b6b17c
def _match_watcher_name(self, name): 'Match the given watcher name with the watcher_regex given in config\n\n :return: re.match object or None\n ' return re.match(self.watchers_regex, name)
Match the given watcher name with the watcher_regex given in config :return: re.match object or None
circus/plugins/watchdog.py
_match_watcher_name
JetDrag/circus
820
python
def _match_watcher_name(self, name): 'Match the given watcher name with the watcher_regex given in config\n\n :return: re.match object or None\n ' return re.match(self.watchers_regex, name)
def _match_watcher_name(self, name): 'Match the given watcher name with the watcher_regex given in config\n\n :return: re.match object or None\n ' return re.match(self.watchers_regex, name)<|docstring|>Match the given watcher name with the watcher_regex given in config :return: re.match object or None<|endoftext|>
fbe2a71892e2f7b3a0a7b7fb8c1e89ed393f6e13f1056f0d181ba20042791c14
def _decode_received_udp_message(self, data): 'decode the received message according to the msg_regex\n\n :return: decoded message\n :rtype: dict or None\n ' result = re.match(self.msg_regex, data.decode()) if (result is not None): return result.groupdict()
decode the received message according to the msg_regex :return: decoded message :rtype: dict or None
circus/plugins/watchdog.py
_decode_received_udp_message
JetDrag/circus
820
python
def _decode_received_udp_message(self, data): 'decode the received message according to the msg_regex\n\n :return: decoded message\n :rtype: dict or None\n ' result = re.match(self.msg_regex, data.decode()) if (result is not None): return result.groupdict()
def _decode_received_udp_message(self, data): 'decode the received message according to the msg_regex\n\n :return: decoded message\n :rtype: dict or None\n ' result = re.match(self.msg_regex, data.decode()) if (result is not None): return result.groupdict()<|docstring|>decode the received message according to the msg_regex :return: decoded message :rtype: dict or None<|endoftext|>
18be94e03a6b1421b25e51e50afa03ae2a861241226d8931a9b74ed9469387c1
def receive_udp_socket(self, fd, events): 'Check the socket for received UDP message.\n This method is periodically called by the ioloop.\n If messages are received and parsed, update the status of\n the corresponing pid.\n ' (data, _) = self.sock.recvfrom(1024) heartbeat = self._decode_received_udp_message(data) if ('pid' in heartbeat): if (heartbeat['pid'] in self.pid_status): self.pid_status[heartbeat['pid']]['last_activity'] = time.time() else: logger.warning('received watchdog for a non monitored process:%s', heartbeat) logger.debug('watchdog message: %s', heartbeat)
Check the socket for received UDP message. This method is periodically called by the ioloop. If messages are received and parsed, update the status of the corresponing pid.
circus/plugins/watchdog.py
receive_udp_socket
JetDrag/circus
820
python
def receive_udp_socket(self, fd, events): 'Check the socket for received UDP message.\n This method is periodically called by the ioloop.\n If messages are received and parsed, update the status of\n the corresponing pid.\n ' (data, _) = self.sock.recvfrom(1024) heartbeat = self._decode_received_udp_message(data) if ('pid' in heartbeat): if (heartbeat['pid'] in self.pid_status): self.pid_status[heartbeat['pid']]['last_activity'] = time.time() else: logger.warning('received watchdog for a non monitored process:%s', heartbeat) logger.debug('watchdog message: %s', heartbeat)
def receive_udp_socket(self, fd, events): 'Check the socket for received UDP message.\n This method is periodically called by the ioloop.\n If messages are received and parsed, update the status of\n the corresponing pid.\n ' (data, _) = self.sock.recvfrom(1024) heartbeat = self._decode_received_udp_message(data) if ('pid' in heartbeat): if (heartbeat['pid'] in self.pid_status): self.pid_status[heartbeat['pid']]['last_activity'] = time.time() else: logger.warning('received watchdog for a non monitored process:%s', heartbeat) logger.debug('watchdog message: %s', heartbeat)<|docstring|>Check the socket for received UDP message. This method is periodically called by the ioloop. If messages are received and parsed, update the status of the corresponing pid.<|endoftext|>
f07dd19556cbdcfd710da4d13cd257d437ad8604ba0340cd7004dbf248b8ad10
def look_after(self): 'Checks for the watchdoged watchers and restart a process if no\n received watchdog after the loop_rate * max_count period.\n ' if self.starting: self._discover_monitored_pids() self.starting = False max_timeout = (self.loop_rate * self.max_count) too_old_time = (time.time() - max_timeout) for (pid, detail) in self.pid_status.items(): if (detail['last_activity'] < too_old_time): logger.info('watcher:%s, pid:%s is not responding. Kill it !', detail['watcher'], pid) props = dict(name=detail['watcher'], pid=int(pid)) if (self.stop_signal is not None): props['signum'] = self.stop_signal if (self.graceful_timeout is not None): props['graceful_timeout'] = self.graceful_timeout self.cast('kill', **props) del self.pid_status[pid]
Checks for the watchdoged watchers and restart a process if no received watchdog after the loop_rate * max_count period.
circus/plugins/watchdog.py
look_after
JetDrag/circus
820
python
def look_after(self): 'Checks for the watchdoged watchers and restart a process if no\n received watchdog after the loop_rate * max_count period.\n ' if self.starting: self._discover_monitored_pids() self.starting = False max_timeout = (self.loop_rate * self.max_count) too_old_time = (time.time() - max_timeout) for (pid, detail) in self.pid_status.items(): if (detail['last_activity'] < too_old_time): logger.info('watcher:%s, pid:%s is not responding. Kill it !', detail['watcher'], pid) props = dict(name=detail['watcher'], pid=int(pid)) if (self.stop_signal is not None): props['signum'] = self.stop_signal if (self.graceful_timeout is not None): props['graceful_timeout'] = self.graceful_timeout self.cast('kill', **props) del self.pid_status[pid]
def look_after(self): 'Checks for the watchdoged watchers and restart a process if no\n received watchdog after the loop_rate * max_count period.\n ' if self.starting: self._discover_monitored_pids() self.starting = False max_timeout = (self.loop_rate * self.max_count) too_old_time = (time.time() - max_timeout) for (pid, detail) in self.pid_status.items(): if (detail['last_activity'] < too_old_time): logger.info('watcher:%s, pid:%s is not responding. Kill it !', detail['watcher'], pid) props = dict(name=detail['watcher'], pid=int(pid)) if (self.stop_signal is not None): props['signum'] = self.stop_signal if (self.graceful_timeout is not None): props['graceful_timeout'] = self.graceful_timeout self.cast('kill', **props) del self.pid_status[pid]<|docstring|>Checks for the watchdoged watchers and restart a process if no received watchdog after the loop_rate * max_count period.<|endoftext|>
484e2d4d1270e4381d4472fe5f03c6afa318eef82ad974e8151240e0760323f0
def expire_all_unassigned_hits(self): '\n Move through the whole hit_id list and attempt to expire the HITs\n ' for hit in view.all(): if ((not hit.complete) and (hit.hit_id in self.hit_ids)): print(hit.hit_id) mturk_utils.expire_hit(mturk_config['is_sandbox'], hit.hit_id)
Move through the whole hit_id list and attempt to expire the HITs
app/mturk/api.py
expire_all_unassigned_hits
yooli23/MTurk
0
python
def expire_all_unassigned_hits(self): '\n \n ' for hit in view.all(): if ((not hit.complete) and (hit.hit_id in self.hit_ids)): print(hit.hit_id) mturk_utils.expire_hit(mturk_config['is_sandbox'], hit.hit_id)
def expire_all_unassigned_hits(self): '\n \n ' for hit in view.all(): if ((not hit.complete) and (hit.hit_id in self.hit_ids)): print(hit.hit_id) mturk_utils.expire_hit(mturk_config['is_sandbox'], hit.hit_id)<|docstring|>Move through the whole hit_id list and attempt to expire the HITs<|endoftext|>
4e0aa83647d0586843605ed28b2ab30bc8bf982937d746f567bfd49dbc08c13f
def approve_work(self, assignment_id, override_rejection=False): '\n approve work for a given assignment through the mturk client.\n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) assignment_status = None approve_attempt_num = 0 if ((assignment_status != SUBMIT_STATUS) and (approve_attempt_num < APPROVE_TIME_LIMIT)): try: response = client.get_assignment(AssignmentId=assignment_id) if response: assignment_status = response['Assignment']['AssignmentStatus'] except Exception as error: approve_attempt_num += 1 timer = Timer(10.0, self.approve_work, [assignment_id, override_rejection]) timer.start() return try: client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection) print('Assignment {} approved.'.format(assignment_id)) except Exception as error: print(error) client = mturk_utils.get_mturk_client(mturk_config['is_sandbox'])
approve work for a given assignment through the mturk client.
app/mturk/api.py
approve_work
yooli23/MTurk
0
python
def approve_work(self, assignment_id, override_rejection=False): '\n \n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) assignment_status = None approve_attempt_num = 0 if ((assignment_status != SUBMIT_STATUS) and (approve_attempt_num < APPROVE_TIME_LIMIT)): try: response = client.get_assignment(AssignmentId=assignment_id) if response: assignment_status = response['Assignment']['AssignmentStatus'] except Exception as error: approve_attempt_num += 1 timer = Timer(10.0, self.approve_work, [assignment_id, override_rejection]) timer.start() return try: client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection) print('Assignment {} approved.'.format(assignment_id)) except Exception as error: print(error) client = mturk_utils.get_mturk_client(mturk_config['is_sandbox'])
def approve_work(self, assignment_id, override_rejection=False): '\n \n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) assignment_status = None approve_attempt_num = 0 if ((assignment_status != SUBMIT_STATUS) and (approve_attempt_num < APPROVE_TIME_LIMIT)): try: response = client.get_assignment(AssignmentId=assignment_id) if response: assignment_status = response['Assignment']['AssignmentStatus'] except Exception as error: approve_attempt_num += 1 timer = Timer(10.0, self.approve_work, [assignment_id, override_rejection]) timer.start() return try: client.approve_assignment(AssignmentId=assignment_id, OverrideRejection=override_rejection) print('Assignment {} approved.'.format(assignment_id)) except Exception as error: print(error) client = mturk_utils.get_mturk_client(mturk_config['is_sandbox'])<|docstring|>approve work for a given assignment through the mturk client.<|endoftext|>
96983e16ce51c67dced216ed4b032b38838e83c77556a572e7b9f69c915bca3d
def pay_worker_bonus(self, worker_id, assignment_id): '\n Handles paying bonus to a turker.\n\n Returns True on success and False on failure\n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) unique_request_token = str(uuid.uuid4()) try: client.send_bonus(WorkerId=worker_id, BonusAmount=str(mturk_config['bonus']), AssignmentId=assignment_id, Reason='You complete the task successfully, thank you!', UniqueRequestToken=unique_request_token) print('Paid ${} bonus to WorkerId: {}'.format(mturk_config['bonus'], worker_id)) except Exception as error: print(error) return True
Handles paying bonus to a turker. Returns True on success and False on failure
app/mturk/api.py
pay_worker_bonus
yooli23/MTurk
0
python
def pay_worker_bonus(self, worker_id, assignment_id): '\n Handles paying bonus to a turker.\n\n Returns True on success and False on failure\n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) unique_request_token = str(uuid.uuid4()) try: client.send_bonus(WorkerId=worker_id, BonusAmount=str(mturk_config['bonus']), AssignmentId=assignment_id, Reason='You complete the task successfully, thank you!', UniqueRequestToken=unique_request_token) print('Paid ${} bonus to WorkerId: {}'.format(mturk_config['bonus'], worker_id)) except Exception as error: print(error) return True
def pay_worker_bonus(self, worker_id, assignment_id): '\n Handles paying bonus to a turker.\n\n Returns True on success and False on failure\n ' client = mturk_utils.get_mturk_client(mturk_config['is_sandbox']) unique_request_token = str(uuid.uuid4()) try: client.send_bonus(WorkerId=worker_id, BonusAmount=str(mturk_config['bonus']), AssignmentId=assignment_id, Reason='You complete the task successfully, thank you!', UniqueRequestToken=unique_request_token) print('Paid ${} bonus to WorkerId: {}'.format(mturk_config['bonus'], worker_id)) except Exception as error: print(error) return True<|docstring|>Handles paying bonus to a turker. Returns True on success and False on failure<|endoftext|>
f8069d53eb61341563f67eca974d20a1ccd85c04866e70f3803c6f8c648743ac
def give_worker_qualification(self, worker_id, qual_name, qual_value=None): '\n Give a worker a particular qualification.\n ' qual_id = mturk_utils.find_or_create_qualification(qual_name, 'Worker has done this task', mturk_config['is_sandbox']) if ((qual_id is False) or (qual_id is None)): print('Could not give worker {} qualification {}, as the qualification could not be found to exist.'.format(worker_id, qual_name)) return mturk_utils.give_worker_qualification(worker_id, qual_id, qual_value, mturk_config['is_sandbox']) print('gave {} qualification {}'.format(worker_id, qual_name))
Give a worker a particular qualification.
app/mturk/api.py
give_worker_qualification
yooli23/MTurk
0
python
def give_worker_qualification(self, worker_id, qual_name, qual_value=None): '\n \n ' qual_id = mturk_utils.find_or_create_qualification(qual_name, 'Worker has done this task', mturk_config['is_sandbox']) if ((qual_id is False) or (qual_id is None)): print('Could not give worker {} qualification {}, as the qualification could not be found to exist.'.format(worker_id, qual_name)) return mturk_utils.give_worker_qualification(worker_id, qual_id, qual_value, mturk_config['is_sandbox']) print('gave {} qualification {}'.format(worker_id, qual_name))
def give_worker_qualification(self, worker_id, qual_name, qual_value=None): '\n \n ' qual_id = mturk_utils.find_or_create_qualification(qual_name, 'Worker has done this task', mturk_config['is_sandbox']) if ((qual_id is False) or (qual_id is None)): print('Could not give worker {} qualification {}, as the qualification could not be found to exist.'.format(worker_id, qual_name)) return mturk_utils.give_worker_qualification(worker_id, qual_id, qual_value, mturk_config['is_sandbox']) print('gave {} qualification {}'.format(worker_id, qual_name))<|docstring|>Give a worker a particular qualification.<|endoftext|>
a6d74919c3227fcd314f2d27ce197afe596c9f22a2affd66303e84523fea8af4
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)
Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.
chi/_predictive_models.py
get_dosing_regimen
DavAug/chi
2
python
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)<|docstring|>Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.<|endoftext|>
2e0b3e8f5a1959950ebf0808ee7f559f9b007c26393a16425c5a2d10b327ecc8
def get_n_outputs(self): '\n Returns the number of outputs.\n ' return self._predictive_model.get_n_outputs()
Returns the number of outputs.
chi/_predictive_models.py
get_n_outputs
DavAug/chi
2
python
def get_n_outputs(self): '\n \n ' return self._predictive_model.get_n_outputs()
def get_n_outputs(self): '\n \n ' return self._predictive_model.get_n_outputs()<|docstring|>Returns the number of outputs.<|endoftext|>
321ebe265017664b599be6ba28cb5e43a74a5d27dd4b4594844dbefd5e106686
def get_output_names(self): '\n Returns the output names.\n ' return self._predictive_model.get_output_names()
Returns the output names.
chi/_predictive_models.py
get_output_names
DavAug/chi
2
python
def get_output_names(self): '\n \n ' return self._predictive_model.get_output_names()
def get_output_names(self): '\n \n ' return self._predictive_model.get_output_names()<|docstring|>Returns the output names.<|endoftext|>
93c9e240dec8b495eb39eef35dc6b68e394805ff6bd6b68bbe91959e0b3f8ab4
def get_predictive_model(self): '\n Returns the predictive model.\n ' return self._predictive_model
Returns the predictive model.
chi/_predictive_models.py
get_predictive_model
DavAug/chi
2
python
def get_predictive_model(self): '\n \n ' return self._predictive_model
def get_predictive_model(self): '\n \n ' return self._predictive_model<|docstring|>Returns the predictive model.<|endoftext|>
8f971cd28f9b5b88cd67e48b473880c25092bfb1753da0b6a7b715a88f1cf18d
def sample(self, times, n_samples=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame`.\n ' raise NotImplementedError
Samples "measurements" of the biomarkers from the predictive model and returns them in form of a :class:`pandas.DataFrame`.
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, times, n_samples=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame`.\n ' raise NotImplementedError
def sample(self, times, n_samples=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame`.\n ' raise NotImplementedError<|docstring|>Samples "measurements" of the biomarkers from the predictive model and returns them in form of a :class:`pandas.DataFrame`.<|endoftext|>
6dac706a67d0069e978e7b0a29d160d3ef0429bec1d558570bdb100fc530de10
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)
Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports dose administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.
chi/_predictive_models.py
set_dosing_regimen
DavAug/chi
2
python
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)<|docstring|>Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports dose administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.<|endoftext|>
0b8b5ed89ac4250d547e423b0699b01c8a604cb7f82af269bda29f936f72962b
def _check_parameters(self, posterior_samples, param_map): '\n Checks whether the parameters of the posterior exist in the dataset\n and returns them.\n ' model_names = self._predictive_model.get_parameter_names() for (param_id, name) in enumerate(model_names): try: model_names[param_id] = param_map[name] except KeyError: pass for parameter in model_names: if (parameter not in posterior_samples.data_vars): raise ValueError((('The parameter <' + str(parameter)) + '> cannot be found in the posterior.')) return model_names
Checks whether the parameters of the posterior exist in the dataset and returns them.
chi/_predictive_models.py
_check_parameters
DavAug/chi
2
python
def _check_parameters(self, posterior_samples, param_map): '\n Checks whether the parameters of the posterior exist in the dataset\n and returns them.\n ' model_names = self._predictive_model.get_parameter_names() for (param_id, name) in enumerate(model_names): try: model_names[param_id] = param_map[name] except KeyError: pass for parameter in model_names: if (parameter not in posterior_samples.data_vars): raise ValueError((('The parameter <' + str(parameter)) + '> cannot be found in the posterior.')) return model_names
def _check_parameters(self, posterior_samples, param_map): '\n Checks whether the parameters of the posterior exist in the dataset\n and returns them.\n ' model_names = self._predictive_model.get_parameter_names() for (param_id, name) in enumerate(model_names): try: model_names[param_id] = param_map[name] except KeyError: pass for parameter in model_names: if (parameter not in posterior_samples.data_vars): raise ValueError((('The parameter <' + str(parameter)) + '> cannot be found in the posterior.')) return model_names<|docstring|>Checks whether the parameters of the posterior exist in the dataset and returns them.<|endoftext|>
2b7d6c20d236ae0c0e0eeac5d66923432d23d43db3aa2d3668823d2c6831b01f
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) is_population_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_population_pred_model: if (individual is not None): raise ValueError("Individual ID's cannot be selected for a chi.PopulationPredictiveModel. To model an individual create a chi.PosteriorPredictiveModel with a chi.PredictiveModel.") else: ids = self._posterior.individual if (individual is None): individual = str(ids.data[0]) if (individual not in ids): raise ValueError((('The individual <' + str(individual)) + '> could not be found in the ID column.')) times = np.sort(times) rng = np.random.default_rng(seed=seed) n_chains = len(self._posterior.chain) n_parameters = self._predictive_model.n_parameters() try: n_draws = len(self._posterior.sel(individual=individual).dropna(dim='draw').draw) except (ValueError, KeyError): n_draws = len(self._posterior.dropna(dim='draw').draw) posterior = np.empty(shape=((n_chains * n_draws), n_parameters)) for (param_id, parameter) in enumerate(self._parameter_names): try: posterior[(:, param_id)] = self._posterior[parameter].sel(individual=individual).dropna(dim='draw').values.flatten() except (ValueError, KeyError): samples = self._posterior[parameter].dropna(dim='draw').values.flatten() try: posterior[(:, param_id)] = samples except ValueError: target = (n_chains * n_draws) now = len(samples) posterior[(:, param_id)] = samples[rng.choice(now, target)] container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = rng.choice(posterior) if is_population_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container
Samples "measurements" of the biomarkers from the posterior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the approximate posterior distribution. These paramaters are then used to sample from the predictive model. :param times: Times for the virtual "measurements". :type times: list, numpy.ndarray of shape (n,) :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param individual: The ID of the modelled individual. If ``None``, either the first ID or the population is simulated. :type individual: str, optional :param seed: A seed for the pseudo-random number generator. :type seed: int :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional :param covariates: An array-like object with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. :type covariates: List or np.ndarray, optional :param covariate_map: A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models. :type covariate_map: List[List[int]], optional
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) is_population_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_population_pred_model: if (individual is not None): raise ValueError("Individual ID's cannot be selected for a chi.PopulationPredictiveModel. To model an individual create a chi.PosteriorPredictiveModel with a chi.PredictiveModel.") else: ids = self._posterior.individual if (individual is None): individual = str(ids.data[0]) if (individual not in ids): raise ValueError((('The individual <' + str(individual)) + '> could not be found in the ID column.')) times = np.sort(times) rng = np.random.default_rng(seed=seed) n_chains = len(self._posterior.chain) n_parameters = self._predictive_model.n_parameters() try: n_draws = len(self._posterior.sel(individual=individual).dropna(dim='draw').draw) except (ValueError, KeyError): n_draws = len(self._posterior.dropna(dim='draw').draw) posterior = np.empty(shape=((n_chains * n_draws), n_parameters)) for (param_id, parameter) in enumerate(self._parameter_names): try: posterior[(:, param_id)] = self._posterior[parameter].sel(individual=individual).dropna(dim='draw').values.flatten() except (ValueError, KeyError): samples = self._posterior[parameter].dropna(dim='draw').values.flatten() try: posterior[(:, param_id)] = samples except ValueError: target = (n_chains * n_draws) now = len(samples) posterior[(:, param_id)] = samples[rng.choice(now, target)] container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = rng.choice(posterior) if is_population_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) is_population_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_population_pred_model: if (individual is not None): raise ValueError("Individual ID's cannot be selected for a chi.PopulationPredictiveModel. To model an individual create a chi.PosteriorPredictiveModel with a chi.PredictiveModel.") else: ids = self._posterior.individual if (individual is None): individual = str(ids.data[0]) if (individual not in ids): raise ValueError((('The individual <' + str(individual)) + '> could not be found in the ID column.')) times = np.sort(times) rng = np.random.default_rng(seed=seed) n_chains = len(self._posterior.chain) n_parameters = self._predictive_model.n_parameters() try: n_draws = len(self._posterior.sel(individual=individual).dropna(dim='draw').draw) except (ValueError, KeyError): n_draws = len(self._posterior.dropna(dim='draw').draw) posterior = np.empty(shape=((n_chains * n_draws), n_parameters)) for (param_id, parameter) in enumerate(self._parameter_names): try: posterior[(:, param_id)] = self._posterior[parameter].sel(individual=individual).dropna(dim='draw').values.flatten() except (ValueError, KeyError): samples = self._posterior[parameter].dropna(dim='draw').values.flatten() try: posterior[(:, param_id)] = samples except ValueError: target = (n_chains * n_draws) now = len(samples) posterior[(:, param_id)] = samples[rng.choice(now, target)] container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = rng.choice(posterior) if is_population_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, rng, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container<|docstring|>Samples "measurements" of the biomarkers from the posterior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the approximate posterior distribution. These paramaters are then used to sample from the predictive model. :param times: Times for the virtual "measurements". :type times: list, numpy.ndarray of shape (n,) :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param individual: The ID of the modelled individual. If ``None``, either the first ID or the population is simulated. :type individual: str, optional :param seed: A seed for the pseudo-random number generator. :type seed: int :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional :param covariates: An array-like object with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. :type covariates: List or np.ndarray, optional :param covariate_map: A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models. :type covariate_map: List[List[int]], optional<|endoftext|>
76b0cbdeeda512316e0d91738415a428f646ece564486d249d9fc6c7ce711381
def _set_error_model_parameter_names(self): '\n Resets the error model parameter names and prepends the output name\n if more than one output exists.\n ' for error_model in self._error_models: error_model.set_parameter_names(None) n_outputs = self._mechanistic_model.n_outputs() if (n_outputs > 1): outputs = self._mechanistic_model.outputs() for (output_id, error_model) in enumerate(self._error_models): names = error_model.get_parameter_names() output = outputs[output_id] names = [((output + ' ') + name) for name in names] error_model.set_parameter_names(names)
Resets the error model parameter names and prepends the output name if more than one output exists.
chi/_predictive_models.py
_set_error_model_parameter_names
DavAug/chi
2
python
def _set_error_model_parameter_names(self): '\n Resets the error model parameter names and prepends the output name\n if more than one output exists.\n ' for error_model in self._error_models: error_model.set_parameter_names(None) n_outputs = self._mechanistic_model.n_outputs() if (n_outputs > 1): outputs = self._mechanistic_model.outputs() for (output_id, error_model) in enumerate(self._error_models): names = error_model.get_parameter_names() output = outputs[output_id] names = [((output + ' ') + name) for name in names] error_model.set_parameter_names(names)
def _set_error_model_parameter_names(self): '\n Resets the error model parameter names and prepends the output name\n if more than one output exists.\n ' for error_model in self._error_models: error_model.set_parameter_names(None) n_outputs = self._mechanistic_model.n_outputs() if (n_outputs > 1): outputs = self._mechanistic_model.outputs() for (output_id, error_model) in enumerate(self._error_models): names = error_model.get_parameter_names() output = outputs[output_id] names = [((output + ' ') + name) for name in names] error_model.set_parameter_names(names)<|docstring|>Resets the error model parameter names and prepends the output name if more than one output exists.<|endoftext|>
347dc7f1521ed92e881a356ad885d49f3a43f3578c5771a195d38a62e49caa59
def _set_number_and_parameter_names(self): '\n Sets the number and names of the free model parameters.\n ' parameter_names = self._mechanistic_model.parameters() for error_model in self._error_models: parameter_names += error_model.get_parameter_names() self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)
Sets the number and names of the free model parameters.
chi/_predictive_models.py
_set_number_and_parameter_names
DavAug/chi
2
python
def _set_number_and_parameter_names(self): '\n \n ' parameter_names = self._mechanistic_model.parameters() for error_model in self._error_models: parameter_names += error_model.get_parameter_names() self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)
def _set_number_and_parameter_names(self): '\n \n ' parameter_names = self._mechanistic_model.parameters() for error_model in self._error_models: parameter_names += error_model.get_parameter_names() self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)<|docstring|>Sets the number and names of the free model parameters.<|endoftext|>
6a57bce2889315d7f79ac813f12e4f0ab652e2d705cd08c542c8f4c92431055c
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n value as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') mechanistic_model = self._mechanistic_model error_models = self._error_models if (not isinstance(mechanistic_model, chi.ReducedMechanisticModel)): mechanistic_model = chi.ReducedMechanisticModel(mechanistic_model) for (model_id, error_model) in enumerate(error_models): if (not isinstance(error_model, chi.ReducedErrorModel)): error_models[model_id] = chi.ReducedErrorModel(error_model) mechanistic_model.fix_parameters(name_value_dict) for error_model in error_models: error_model.fix_parameters(name_value_dict) if (mechanistic_model.n_fixed_parameters() == 0): mechanistic_model = mechanistic_model.mechanistic_model() for (model_id, error_model) in enumerate(error_models): if (error_model.n_fixed_parameters() == 0): error_model = error_model.get_error_model() error_models[model_id] = error_model self._mechanistic_model = mechanistic_model self._error_models = error_models self._set_number_and_parameter_names()
Fixes the value of model parameters, and effectively removes them as a parameter from the model. Fixing the value of a parameter at ``None``, sets the parameter free again. Parameters ---------- name_value_dict A dictionary with model parameter names as keys, and parameter value as values.
chi/_predictive_models.py
fix_parameters
DavAug/chi
2
python
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n value as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') mechanistic_model = self._mechanistic_model error_models = self._error_models if (not isinstance(mechanistic_model, chi.ReducedMechanisticModel)): mechanistic_model = chi.ReducedMechanisticModel(mechanistic_model) for (model_id, error_model) in enumerate(error_models): if (not isinstance(error_model, chi.ReducedErrorModel)): error_models[model_id] = chi.ReducedErrorModel(error_model) mechanistic_model.fix_parameters(name_value_dict) for error_model in error_models: error_model.fix_parameters(name_value_dict) if (mechanistic_model.n_fixed_parameters() == 0): mechanistic_model = mechanistic_model.mechanistic_model() for (model_id, error_model) in enumerate(error_models): if (error_model.n_fixed_parameters() == 0): error_model = error_model.get_error_model() error_models[model_id] = error_model self._mechanistic_model = mechanistic_model self._error_models = error_models self._set_number_and_parameter_names()
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n value as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') mechanistic_model = self._mechanistic_model error_models = self._error_models if (not isinstance(mechanistic_model, chi.ReducedMechanisticModel)): mechanistic_model = chi.ReducedMechanisticModel(mechanistic_model) for (model_id, error_model) in enumerate(error_models): if (not isinstance(error_model, chi.ReducedErrorModel)): error_models[model_id] = chi.ReducedErrorModel(error_model) mechanistic_model.fix_parameters(name_value_dict) for error_model in error_models: error_model.fix_parameters(name_value_dict) if (mechanistic_model.n_fixed_parameters() == 0): mechanistic_model = mechanistic_model.mechanistic_model() for (model_id, error_model) in enumerate(error_models): if (error_model.n_fixed_parameters() == 0): error_model = error_model.get_error_model() error_models[model_id] = error_model self._mechanistic_model = mechanistic_model self._error_models = error_models self._set_number_and_parameter_names()<|docstring|>Fixes the value of model parameters, and effectively removes them as a parameter from the model. Fixing the value of a parameter at ``None``, sets the parameter free again. Parameters ---------- name_value_dict A dictionary with model parameter names as keys, and parameter value as values.<|endoftext|>
ff28f4693f16e8102dbd5e3d9c762b9c2246d52503131e4db62759844a89aea7
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' try: regimen = self._mechanistic_model.dosing_regimen() except AttributeError: return None if (regimen is None): return regimen if (final_time is None): final_time = np.inf regimen_df = pd.DataFrame(columns=['Time', 'Duration', 'Dose']) for dose_event in regimen.events(): dose_rate = dose_event.level() dose_duration = dose_event.duration() dose_amount = (dose_rate * dose_duration) start_time = dose_event.start() period = dose_event.period() n_doses = dose_event.multiplier() if (start_time > final_time): continue if (period == 0): regimen_df = regimen_df.append(pd.DataFrame({'Time': [start_time], 'Duration': [dose_duration], 'Dose': [dose_amount]})) continue if (n_doses == 0): n_doses = 1 if np.isfinite(final_time): n_doses = int((abs(final_time) // period)) dose_times = [(start_time + (n * period)) for n in range(n_doses)] dose_times = np.array(dose_times) mask = (dose_times <= final_time) dose_times = dose_times[mask] regimen_df = regimen_df.append(pd.DataFrame({'Time': dose_times, 'Duration': dose_duration, 'Dose': dose_amount})) if regimen_df.empty: return None return regimen_df
Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.
chi/_predictive_models.py
get_dosing_regimen
DavAug/chi
2
python
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' try: regimen = self._mechanistic_model.dosing_regimen() except AttributeError: return None if (regimen is None): return regimen if (final_time is None): final_time = np.inf regimen_df = pd.DataFrame(columns=['Time', 'Duration', 'Dose']) for dose_event in regimen.events(): dose_rate = dose_event.level() dose_duration = dose_event.duration() dose_amount = (dose_rate * dose_duration) start_time = dose_event.start() period = dose_event.period() n_doses = dose_event.multiplier() if (start_time > final_time): continue if (period == 0): regimen_df = regimen_df.append(pd.DataFrame({'Time': [start_time], 'Duration': [dose_duration], 'Dose': [dose_amount]})) continue if (n_doses == 0): n_doses = 1 if np.isfinite(final_time): n_doses = int((abs(final_time) // period)) dose_times = [(start_time + (n * period)) for n in range(n_doses)] dose_times = np.array(dose_times) mask = (dose_times <= final_time) dose_times = dose_times[mask] regimen_df = regimen_df.append(pd.DataFrame({'Time': dose_times, 'Duration': dose_duration, 'Dose': dose_amount})) if regimen_df.empty: return None return regimen_df
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' try: regimen = self._mechanistic_model.dosing_regimen() except AttributeError: return None if (regimen is None): return regimen if (final_time is None): final_time = np.inf regimen_df = pd.DataFrame(columns=['Time', 'Duration', 'Dose']) for dose_event in regimen.events(): dose_rate = dose_event.level() dose_duration = dose_event.duration() dose_amount = (dose_rate * dose_duration) start_time = dose_event.start() period = dose_event.period() n_doses = dose_event.multiplier() if (start_time > final_time): continue if (period == 0): regimen_df = regimen_df.append(pd.DataFrame({'Time': [start_time], 'Duration': [dose_duration], 'Dose': [dose_amount]})) continue if (n_doses == 0): n_doses = 1 if np.isfinite(final_time): n_doses = int((abs(final_time) // period)) dose_times = [(start_time + (n * period)) for n in range(n_doses)] dose_times = np.array(dose_times) mask = (dose_times <= final_time) dose_times = dose_times[mask] regimen_df = regimen_df.append(pd.DataFrame({'Time': dose_times, 'Duration': dose_duration, 'Dose': dose_amount})) if regimen_df.empty: return None return regimen_df<|docstring|>Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.<|endoftext|>
86db6d5dc06fa27f799e0f0ac9d7b715b5dc522063a44d3ae9c5039502471ef1
def get_n_outputs(self): '\n Returns the number of outputs.\n ' return self._mechanistic_model.n_outputs()
Returns the number of outputs.
chi/_predictive_models.py
get_n_outputs
DavAug/chi
2
python
def get_n_outputs(self): '\n \n ' return self._mechanistic_model.n_outputs()
def get_n_outputs(self): '\n \n ' return self._mechanistic_model.n_outputs()<|docstring|>Returns the number of outputs.<|endoftext|>
c008830b536d1c1153d59626490a46dad6ff1dd00d38f530f2e12efb8dee03f0
def get_output_names(self): '\n Returns the output names.\n ' return self._mechanistic_model.outputs()
Returns the output names.
chi/_predictive_models.py
get_output_names
DavAug/chi
2
python
def get_output_names(self): '\n \n ' return self._mechanistic_model.outputs()
def get_output_names(self): '\n \n ' return self._mechanistic_model.outputs()<|docstring|>Returns the output names.<|endoftext|>
df5549d1fb5707e9f96b2fee76838fa0f962babac4e31793c55018e393604b37
def get_parameter_names(self): '\n Returns the parameter names of the predictive model.\n ' return copy.copy(self._parameter_names)
Returns the parameter names of the predictive model.
chi/_predictive_models.py
get_parameter_names
DavAug/chi
2
python
def get_parameter_names(self): '\n \n ' return copy.copy(self._parameter_names)
def get_parameter_names(self): '\n \n ' return copy.copy(self._parameter_names)<|docstring|>Returns the parameter names of the predictive model.<|endoftext|>
bc6e5d9077c88de30d36cb7b33300add11ee112fb5c46d3c3eebf50885901f0d
def get_submodels(self): '\n Returns the submodels of the predictive model in form of a dictionary.\n ' mechanistic_model = self._mechanistic_model if isinstance(mechanistic_model, chi.ReducedMechanisticModel): mechanistic_model = mechanistic_model.mechanistic_model() error_models = [] for error_model in self._error_models: if isinstance(error_model, chi.ReducedErrorModel): error_model = error_model.get_error_model() error_models.append(error_model) submodels = dict({'Mechanistic model': mechanistic_model, 'Error models': error_models}) return submodels
Returns the submodels of the predictive model in form of a dictionary.
chi/_predictive_models.py
get_submodels
DavAug/chi
2
python
def get_submodels(self): '\n \n ' mechanistic_model = self._mechanistic_model if isinstance(mechanistic_model, chi.ReducedMechanisticModel): mechanistic_model = mechanistic_model.mechanistic_model() error_models = [] for error_model in self._error_models: if isinstance(error_model, chi.ReducedErrorModel): error_model = error_model.get_error_model() error_models.append(error_model) submodels = dict({'Mechanistic model': mechanistic_model, 'Error models': error_models}) return submodels
def get_submodels(self): '\n \n ' mechanistic_model = self._mechanistic_model if isinstance(mechanistic_model, chi.ReducedMechanisticModel): mechanistic_model = mechanistic_model.mechanistic_model() error_models = [] for error_model in self._error_models: if isinstance(error_model, chi.ReducedErrorModel): error_model = error_model.get_error_model() error_models.append(error_model) submodels = dict({'Mechanistic model': mechanistic_model, 'Error models': error_models}) return submodels<|docstring|>Returns the submodels of the predictive model in form of a dictionary.<|endoftext|>
b57ff66b1bce0fc7397bac585ac4d5034fab1596f98b8a39f0f02f8826952152
def n_parameters(self): '\n Returns the number of parameters of the predictive model.\n ' return self._n_parameters
Returns the number of parameters of the predictive model.
chi/_predictive_models.py
n_parameters
DavAug/chi
2
python
def n_parameters(self): '\n \n ' return self._n_parameters
def n_parameters(self): '\n \n ' return self._n_parameters<|docstring|>Returns the number of parameters of the predictive model.<|endoftext|>
5f3bf77ac1519a770c7f8c206a7419c0681222b3ee6baa1f4bd7509c14bfaaf5
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n The mechanistic model is solved for the provided parameters and times,\n and samples around this solution are drawn from the error models for\n each time point.\n\n The number of samples for each time point can be specified with\n ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "measurements" that are performed at each\n time point. If ``None`` the biomarkers are measured only once\n at each time point.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') n_parameters = self._mechanistic_model.n_parameters() mechanistic_params = parameters[:n_parameters] error_params = parameters[n_parameters:] times = np.sort(times) outputs = self._mechanistic_model.simulate(mechanistic_params, times) n_outputs = len(outputs) n_times = len(times) n_samples = (n_samples if (n_samples is not None) else 1) container = np.empty(shape=(n_outputs, n_times, n_samples)) start_index = 0 for (output_id, error_model) in enumerate(self._error_models): end_index = (start_index + error_model.n_parameters()) container[(output_id, ...)] = error_model.sample(parameters=error_params[start_index:end_index], model_output=outputs[output_id], n_samples=n_samples, seed=seed) start_index = end_index if (return_df is False): return container output_names = self._mechanistic_model.outputs() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples
Samples "measurements" of the biomarkers from the predictive model and returns them in form of a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. The mechanistic model is solved for the provided parameters and times, and samples around this solution are drawn from the error models for each time point. The number of samples for each time point can be specified with ``n_samples``. Parameters ---------- parameters An array-like object with the parameter values of the predictive model. times An array-like object with times at which the virtual "measurements" are performed. n_samples The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. seed A seed for the pseudo-random number generator or a :class:`numpy.random.Generator`. return_df A boolean flag which determines whether the output is returned as a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False`` the samples are returned as a numpy array of shape ``(n_outputs, n_times, n_samples)``. include_regimen A boolean flag which determines whether the dosing regimen information is included in the output. If the samples are returned as a :class:`numpy.ndarray`, the dosing information is not included.
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n The mechanistic model is solved for the provided parameters and times,\n and samples around this solution are drawn from the error models for\n each time point.\n\n The number of samples for each time point can be specified with\n ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "measurements" that are performed at each\n time point. If ``None`` the biomarkers are measured only once\n at each time point.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') n_parameters = self._mechanistic_model.n_parameters() mechanistic_params = parameters[:n_parameters] error_params = parameters[n_parameters:] times = np.sort(times) outputs = self._mechanistic_model.simulate(mechanistic_params, times) n_outputs = len(outputs) n_times = len(times) n_samples = (n_samples if (n_samples is not None) else 1) container = np.empty(shape=(n_outputs, n_times, n_samples)) start_index = 0 for (output_id, error_model) in enumerate(self._error_models): end_index = (start_index + error_model.n_parameters()) container[(output_id, ...)] = error_model.sample(parameters=error_params[start_index:end_index], model_output=outputs[output_id], n_samples=n_samples, seed=seed) start_index = end_index if (return_df is False): return container output_names = self._mechanistic_model.outputs() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False): '\n Samples "measurements" of the biomarkers from the predictive model and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n The mechanistic model is solved for the provided parameters and times,\n and samples around this solution are drawn from the error models for\n each time point.\n\n The number of samples for each time point can be specified with\n ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "measurements" that are performed at each\n time point. If ``None`` the biomarkers are measured only once\n at each time point.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') n_parameters = self._mechanistic_model.n_parameters() mechanistic_params = parameters[:n_parameters] error_params = parameters[n_parameters:] times = np.sort(times) outputs = self._mechanistic_model.simulate(mechanistic_params, times) n_outputs = len(outputs) n_times = len(times) n_samples = (n_samples if (n_samples is not None) else 1) container = np.empty(shape=(n_outputs, n_times, n_samples)) start_index = 0 for (output_id, error_model) in enumerate(self._error_models): end_index = (start_index + error_model.n_parameters()) container[(output_id, ...)] = error_model.sample(parameters=error_params[start_index:end_index], model_output=outputs[output_id], n_samples=n_samples, seed=seed) start_index = end_index if (return_df is False): return container output_names = self._mechanistic_model.outputs() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples<|docstring|>Samples "measurements" of the biomarkers from the predictive model and returns them in form of a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. The mechanistic model is solved for the provided parameters and times, and samples around this solution are drawn from the error models for each time point. The number of samples for each time point can be specified with ``n_samples``. Parameters ---------- parameters An array-like object with the parameter values of the predictive model. times An array-like object with times at which the virtual "measurements" are performed. n_samples The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. seed A seed for the pseudo-random number generator or a :class:`numpy.random.Generator`. return_df A boolean flag which determines whether the output is returned as a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False`` the samples are returned as a numpy array of shape ``(n_outputs, n_times, n_samples)``. include_regimen A boolean flag which determines whether the dosing regimen information is included in the output. If the samples are returned as a :class:`numpy.ndarray`, the dosing information is not included.<|endoftext|>
f98778f98284d0955289e071b7c2ce2df0d73b102e62bc0ed07bc9ff1717eea1
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' try: self._mechanistic_model.set_dosing_regimen(dose, start, duration, period, num) except AttributeError: raise AttributeError('The mechanistic model does not support to set dosing regimens. This may be because the underlying chi.MechanisticModel is a chi.PharmacodynamicModel.')
Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports compound administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.
chi/_predictive_models.py
set_dosing_regimen
DavAug/chi
2
python
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' try: self._mechanistic_model.set_dosing_regimen(dose, start, duration, period, num) except AttributeError: raise AttributeError('The mechanistic model does not support to set dosing regimens. This may be because the underlying chi.MechanisticModel is a chi.PharmacodynamicModel.')
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' try: self._mechanistic_model.set_dosing_regimen(dose, start, duration, period, num) except AttributeError: raise AttributeError('The mechanistic model does not support to set dosing regimens. This may be because the underlying chi.MechanisticModel is a chi.PharmacodynamicModel.')<|docstring|>Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports compound administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.<|endoftext|>
51065e24e78c11a6f1912f65688f81da67643af452f8a2044e0eec9728068b0d
def _check_covariate_map(self, covariates, covariate_map): '\n Checks that the covariate map can be used mask the covariates.\n ' n_covariates = len(covariates) n_population_models = len(self._population_models) if (covariate_map is None): covariate_map = ([np.arange(n_covariates)] * n_population_models) return covariate_map if (len(covariate_map) != n_population_models): raise ValueError('The covariate map has to be of length n_population_models.') max_index = np.max(np.hstack(covariate_map)) if (max_index >= n_covariates): raise IndexError('The covariate map exceeds the length of the covariates.') return covariate_map
Checks that the covariate map can be used mask the covariates.
chi/_predictive_models.py
_check_covariate_map
DavAug/chi
2
python
def _check_covariate_map(self, covariates, covariate_map): '\n \n ' n_covariates = len(covariates) n_population_models = len(self._population_models) if (covariate_map is None): covariate_map = ([np.arange(n_covariates)] * n_population_models) return covariate_map if (len(covariate_map) != n_population_models): raise ValueError('The covariate map has to be of length n_population_models.') max_index = np.max(np.hstack(covariate_map)) if (max_index >= n_covariates): raise IndexError('The covariate map exceeds the length of the covariates.') return covariate_map
def _check_covariate_map(self, covariates, covariate_map): '\n \n ' n_covariates = len(covariates) n_population_models = len(self._population_models) if (covariate_map is None): covariate_map = ([np.arange(n_covariates)] * n_population_models) return covariate_map if (len(covariate_map) != n_population_models): raise ValueError('The covariate map has to be of length n_population_models.') max_index = np.max(np.hstack(covariate_map)) if (max_index >= n_covariates): raise IndexError('The covariate map exceeds the length of the covariates.') return covariate_map<|docstring|>Checks that the covariate map can be used mask the covariates.<|endoftext|>
973d3d03f057eec4a4636bd23de3873566472d2c43516c0edcae3cfa6385e97d
def _set_population_parameter_names(self): '\n Sets the names of the population model parameters.\n\n For chi.HeterogeneousModel the bottom-level parameter is used\n as model parameter name.\n ' bottom_parameter_names = self._predictive_model.get_parameter_names() for (param_id, pop_model) in enumerate(self._population_models): pop_model.set_parameter_names(None) pop_params = pop_model.get_parameter_names() bottom_name = bottom_parameter_names[param_id] if (pop_params is not None): names = [((name + ' ') + bottom_name) for name in pop_params] else: names = [bottom_name] pop_model.set_parameter_names(names)
Sets the names of the population model parameters. For chi.HeterogeneousModel the bottom-level parameter is used as model parameter name.
chi/_predictive_models.py
_set_population_parameter_names
DavAug/chi
2
python
def _set_population_parameter_names(self): '\n Sets the names of the population model parameters.\n\n For chi.HeterogeneousModel the bottom-level parameter is used\n as model parameter name.\n ' bottom_parameter_names = self._predictive_model.get_parameter_names() for (param_id, pop_model) in enumerate(self._population_models): pop_model.set_parameter_names(None) pop_params = pop_model.get_parameter_names() bottom_name = bottom_parameter_names[param_id] if (pop_params is not None): names = [((name + ' ') + bottom_name) for name in pop_params] else: names = [bottom_name] pop_model.set_parameter_names(names)
def _set_population_parameter_names(self): '\n Sets the names of the population model parameters.\n\n For chi.HeterogeneousModel the bottom-level parameter is used\n as model parameter name.\n ' bottom_parameter_names = self._predictive_model.get_parameter_names() for (param_id, pop_model) in enumerate(self._population_models): pop_model.set_parameter_names(None) pop_params = pop_model.get_parameter_names() bottom_name = bottom_parameter_names[param_id] if (pop_params is not None): names = [((name + ' ') + bottom_name) for name in pop_params] else: names = [bottom_name] pop_model.set_parameter_names(names)<|docstring|>Sets the names of the population model parameters. For chi.HeterogeneousModel the bottom-level parameter is used as model parameter name.<|endoftext|>
dfa21e21b46dff44188dc2e910fdb0e79ea2254537c57b7c00bc2d5fe47e8c59
def _set_number_and_parameter_names(self): '\n Updates the number and names of the free model parameters.\n ' parameter_names = [] for pop_model in self._population_models: pop_params = pop_model.get_parameter_names() parameter_names += pop_params self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)
Updates the number and names of the free model parameters.
chi/_predictive_models.py
_set_number_and_parameter_names
DavAug/chi
2
python
def _set_number_and_parameter_names(self): '\n \n ' parameter_names = [] for pop_model in self._population_models: pop_params = pop_model.get_parameter_names() parameter_names += pop_params self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)
def _set_number_and_parameter_names(self): '\n \n ' parameter_names = [] for pop_model in self._population_models: pop_params = pop_model.get_parameter_names() parameter_names += pop_params self._parameter_names = parameter_names self._n_parameters = len(self._parameter_names)<|docstring|>Updates the number and names of the free model parameters.<|endoftext|>
13d9c438a270b19832a04afe5737ac6cd43c879b67f8190c733c77ab5350d18a
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n .. note:\n Parameters modelled by a :class:`HeterogeneousModel` cannot be\n fixed on the population level. If you would like to fix the\n associated parameter, fix it in the corresponding\n :class:`PredictiveModel`.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n values as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') pop_models = self._population_models for (model_id, pop_model) in enumerate(pop_models): if (not isinstance(pop_model, chi.ReducedPopulationModel)): pop_models[model_id] = chi.ReducedPopulationModel(pop_model) for pop_model in pop_models: pop_model.fix_parameters(name_value_dict) for (model_id, pop_model) in enumerate(pop_models): if (pop_model.n_fixed_parameters() == 0): pop_model = pop_model.get_population_model() pop_models[model_id] = pop_model self._population_models = pop_models self._set_number_and_parameter_names()
Fixes the value of model parameters, and effectively removes them as a parameter from the model. Fixing the value of a parameter at ``None``, sets the parameter free again. .. note: Parameters modelled by a :class:`HeterogeneousModel` cannot be fixed on the population level. If you would like to fix the associated parameter, fix it in the corresponding :class:`PredictiveModel`. Parameters ---------- name_value_dict A dictionary with model parameter names as keys, and parameter values as values.
chi/_predictive_models.py
fix_parameters
DavAug/chi
2
python
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n .. note:\n Parameters modelled by a :class:`HeterogeneousModel` cannot be\n fixed on the population level. If you would like to fix the\n associated parameter, fix it in the corresponding\n :class:`PredictiveModel`.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n values as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') pop_models = self._population_models for (model_id, pop_model) in enumerate(pop_models): if (not isinstance(pop_model, chi.ReducedPopulationModel)): pop_models[model_id] = chi.ReducedPopulationModel(pop_model) for pop_model in pop_models: pop_model.fix_parameters(name_value_dict) for (model_id, pop_model) in enumerate(pop_models): if (pop_model.n_fixed_parameters() == 0): pop_model = pop_model.get_population_model() pop_models[model_id] = pop_model self._population_models = pop_models self._set_number_and_parameter_names()
def fix_parameters(self, name_value_dict): '\n Fixes the value of model parameters, and effectively removes them as a\n parameter from the model. Fixing the value of a parameter at ``None``,\n sets the parameter free again.\n\n .. note:\n Parameters modelled by a :class:`HeterogeneousModel` cannot be\n fixed on the population level. If you would like to fix the\n associated parameter, fix it in the corresponding\n :class:`PredictiveModel`.\n\n Parameters\n ----------\n name_value_dict\n A dictionary with model parameter names as keys, and parameter\n values as values.\n ' try: name_value_dict = dict(name_value_dict) except (TypeError, ValueError): raise ValueError('The name-value dictionary has to be convertable to a python dictionary.') pop_models = self._population_models for (model_id, pop_model) in enumerate(pop_models): if (not isinstance(pop_model, chi.ReducedPopulationModel)): pop_models[model_id] = chi.ReducedPopulationModel(pop_model) for pop_model in pop_models: pop_model.fix_parameters(name_value_dict) for (model_id, pop_model) in enumerate(pop_models): if (pop_model.n_fixed_parameters() == 0): pop_model = pop_model.get_population_model() pop_models[model_id] = pop_model self._population_models = pop_models self._set_number_and_parameter_names()<|docstring|>Fixes the value of model parameters, and effectively removes them as a parameter from the model. Fixing the value of a parameter at ``None``, sets the parameter free again. .. note: Parameters modelled by a :class:`HeterogeneousModel` cannot be fixed on the population level. If you would like to fix the associated parameter, fix it in the corresponding :class:`PredictiveModel`. Parameters ---------- name_value_dict A dictionary with model parameter names as keys, and parameter values as values.<|endoftext|>
a6d74919c3227fcd314f2d27ce197afe596c9f22a2affd66303e84523fea8af4
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)
Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.
chi/_predictive_models.py
get_dosing_regimen
DavAug/chi
2
python
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)
def get_dosing_regimen(self, final_time=None): '\n Returns the dosing regimen of the compound in form of a\n :class:`pandas.DataFrame`.\n\n The dataframe has a time, a duration, and a dose column, which indicate\n the time point and duration of the dose administration in the time\n units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The\n dose column specifies the amount of the compound that is being\n administered in units of the drug amount variable of the mechanistic\n model.\n\n If an indefinitely administered dosing regimen is set, i.e. a\n finite duration and undefined number of doses, see\n :meth:`set_dosing_regimen`, only the first administration of the\n dose will appear in the dataframe. Alternatively, a final dose time\n ``final_time`` can be provided, up to which the dose events are\n registered.\n\n If no dosing regimen has been set, ``None`` is returned.\n\n Parameters\n ----------\n final_time\n Time up to which dose events are registered in the dataframe. If\n ``None``, all dose events are registered, except for indefinite\n dosing regimens. Here, only the first dose event is registered.\n ' return self._predictive_model.get_dosing_regimen(final_time)<|docstring|>Returns the dosing regimen of the compound in form of a :class:`pandas.DataFrame`. The dataframe has a time, a duration, and a dose column, which indicate the time point and duration of the dose administration in the time units of the mechanistic model, :meth:`MechanisticModel.time_unit`. The dose column specifies the amount of the compound that is being administered in units of the drug amount variable of the mechanistic model. If an indefinitely administered dosing regimen is set, i.e. a finite duration and undefined number of doses, see :meth:`set_dosing_regimen`, only the first administration of the dose will appear in the dataframe. Alternatively, a final dose time ``final_time`` can be provided, up to which the dose events are registered. If no dosing regimen has been set, ``None`` is returned. Parameters ---------- final_time Time up to which dose events are registered in the dataframe. If ``None``, all dose events are registered, except for indefinite dosing regimens. Here, only the first dose event is registered.<|endoftext|>
2e0b3e8f5a1959950ebf0808ee7f559f9b007c26393a16425c5a2d10b327ecc8
def get_n_outputs(self): '\n Returns the number of outputs.\n ' return self._predictive_model.get_n_outputs()
Returns the number of outputs.
chi/_predictive_models.py
get_n_outputs
DavAug/chi
2
python
def get_n_outputs(self): '\n \n ' return self._predictive_model.get_n_outputs()
def get_n_outputs(self): '\n \n ' return self._predictive_model.get_n_outputs()<|docstring|>Returns the number of outputs.<|endoftext|>
321ebe265017664b599be6ba28cb5e43a74a5d27dd4b4594844dbefd5e106686
def get_output_names(self): '\n Returns the output names.\n ' return self._predictive_model.get_output_names()
Returns the output names.
chi/_predictive_models.py
get_output_names
DavAug/chi
2
python
def get_output_names(self): '\n \n ' return self._predictive_model.get_output_names()
def get_output_names(self): '\n \n ' return self._predictive_model.get_output_names()<|docstring|>Returns the output names.<|endoftext|>
df5549d1fb5707e9f96b2fee76838fa0f962babac4e31793c55018e393604b37
def get_parameter_names(self): '\n Returns the parameter names of the predictive model.\n ' return copy.copy(self._parameter_names)
Returns the parameter names of the predictive model.
chi/_predictive_models.py
get_parameter_names
DavAug/chi
2
python
def get_parameter_names(self): '\n \n ' return copy.copy(self._parameter_names)
def get_parameter_names(self): '\n \n ' return copy.copy(self._parameter_names)<|docstring|>Returns the parameter names of the predictive model.<|endoftext|>
0f7032f739b23e219d5ded1a78bab8f5def886d3a2845104cfc5608b6102d053
def get_submodels(self): '\n Returns the submodels of the predictive model in form of a dictionary.\n ' submodels = self._predictive_model.get_submodels() pop_models = [] for pop_model in self._population_models: if isinstance(pop_model, chi.ReducedPopulationModel): pop_model = pop_model.get_population_model() pop_models.append(pop_model) submodels['Population models'] = pop_models return submodels
Returns the submodels of the predictive model in form of a dictionary.
chi/_predictive_models.py
get_submodels
DavAug/chi
2
python
def get_submodels(self): '\n \n ' submodels = self._predictive_model.get_submodels() pop_models = [] for pop_model in self._population_models: if isinstance(pop_model, chi.ReducedPopulationModel): pop_model = pop_model.get_population_model() pop_models.append(pop_model) submodels['Population models'] = pop_models return submodels
def get_submodels(self): '\n \n ' submodels = self._predictive_model.get_submodels() pop_models = [] for pop_model in self._population_models: if isinstance(pop_model, chi.ReducedPopulationModel): pop_model = pop_model.get_population_model() pop_models.append(pop_model) submodels['Population models'] = pop_models return submodels<|docstring|>Returns the submodels of the predictive model in form of a dictionary.<|endoftext|>
b57ff66b1bce0fc7397bac585ac4d5034fab1596f98b8a39f0f02f8826952152
def n_parameters(self): '\n Returns the number of parameters of the predictive model.\n ' return self._n_parameters
Returns the number of parameters of the predictive model.
chi/_predictive_models.py
n_parameters
DavAug/chi
2
python
def n_parameters(self): '\n \n ' return self._n_parameters
def n_parameters(self): '\n \n ' return self._n_parameters<|docstring|>Returns the number of parameters of the predictive model.<|endoftext|>
3fb287093ce14a398ad9ae2d7840cd588126bb6ed22d96b5030b91690d89a768
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from virtual "patients" and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n Virtual patients are sampled from the population models in form of\n predictive model parameters. Those parameters are then used to sample\n virtual measurements from the predictive model. For each virtual\n patient one measurement is performed at each of the provided time\n points.\n\n The number of virtual patients that is being measured can be specified\n with ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "patients" that are measured at each\n time point. If ``None`` the biomarkers are measured only for one\n patient.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n covariates\n A list with covariates of length c. Covariates are only relevant\n when CovariatePopulationModels are used.\n covariate_map\n A nested list of length n_population_models with indices that\n reference the relevant covariates for each population model.\n By default, it is assumed that all covariates are relevant for all\n population models.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (covariates is not None): covariates = np.array(covariates) covariate_map = self._check_covariate_map(covariates, covariate_map) n_parameters = self._predictive_model.n_parameters() patients = np.empty(shape=(n_samples, n_parameters)) start = 0 for (pid, pop_model) in enumerate(self._population_models): if isinstance(pop_model, chi.HeterogeneousModel): patients[(:, pid)] = parameters[start] start += 1 continue end = (start + pop_model.n_parameters()) cov = (covariates[covariate_map[pid]] if covariate_map else None) if pop_model.transforms_individual_parameters(): patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed, covariates=cov, return_psi=True) else: patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed) start = end n_outputs = self._predictive_model.get_n_outputs() n_times = len(times) container = np.empty(shape=(n_outputs, n_times, n_samples)) times = np.sort(times) for (patient_id, patient) in enumerate(patients): sample = self._predictive_model.sample(parameters=patient, times=times, seed=seed, return_df=False) container[(..., patient_id)] = sample[(..., 0)] if (return_df is False): return container output_names = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) if (covariates is not None): cov_names = [] for pop_model in self._population_models: try: cov_names.append(pop_model.get_covariate_names()) except AttributeError: continue covariate_map = np.hstack(covariate_map) cov_names = np.hstack(cov_names) n_covariates = len(covariates) covariate_names = [] for idc in range(n_covariates): index = np.where((covariate_map == idc))[0][0] name = cov_names[index] covariate_names.append(name) for (idc, covariate) in enumerate(covariate_names): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': np.nan, 'Observable': covariate, 'Value': covariates[idc]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples
Samples "measurements" of the biomarkers from virtual "patients" and returns them in form of a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. Virtual patients are sampled from the population models in form of predictive model parameters. Those parameters are then used to sample virtual measurements from the predictive model. For each virtual patient one measurement is performed at each of the provided time points. The number of virtual patients that is being measured can be specified with ``n_samples``. Parameters ---------- parameters An array-like object with the parameter values of the predictive model. times An array-like object with times at which the virtual "measurements" are performed. n_samples The number of virtual "patients" that are measured at each time point. If ``None`` the biomarkers are measured only for one patient. seed A seed for the pseudo-random number generator or a :class:`numpy.random.Generator`. return_df A boolean flag which determines whether the output is returned as a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False`` the samples are returned as a numpy array of shape ``(n_outputs, n_times, n_samples)``. include_regimen A boolean flag which determines whether the dosing regimen information is included in the output. If the samples are returned as a :class:`numpy.ndarray`, the dosing information is not included. covariates A list with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. covariate_map A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models.
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from virtual "patients" and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n Virtual patients are sampled from the population models in form of\n predictive model parameters. Those parameters are then used to sample\n virtual measurements from the predictive model. For each virtual\n patient one measurement is performed at each of the provided time\n points.\n\n The number of virtual patients that is being measured can be specified\n with ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "patients" that are measured at each\n time point. If ``None`` the biomarkers are measured only for one\n patient.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n covariates\n A list with covariates of length c. Covariates are only relevant\n when CovariatePopulationModels are used.\n covariate_map\n A nested list of length n_population_models with indices that\n reference the relevant covariates for each population model.\n By default, it is assumed that all covariates are relevant for all\n population models.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (covariates is not None): covariates = np.array(covariates) covariate_map = self._check_covariate_map(covariates, covariate_map) n_parameters = self._predictive_model.n_parameters() patients = np.empty(shape=(n_samples, n_parameters)) start = 0 for (pid, pop_model) in enumerate(self._population_models): if isinstance(pop_model, chi.HeterogeneousModel): patients[(:, pid)] = parameters[start] start += 1 continue end = (start + pop_model.n_parameters()) cov = (covariates[covariate_map[pid]] if covariate_map else None) if pop_model.transforms_individual_parameters(): patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed, covariates=cov, return_psi=True) else: patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed) start = end n_outputs = self._predictive_model.get_n_outputs() n_times = len(times) container = np.empty(shape=(n_outputs, n_times, n_samples)) times = np.sort(times) for (patient_id, patient) in enumerate(patients): sample = self._predictive_model.sample(parameters=patient, times=times, seed=seed, return_df=False) container[(..., patient_id)] = sample[(..., 0)] if (return_df is False): return container output_names = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) if (covariates is not None): cov_names = [] for pop_model in self._population_models: try: cov_names.append(pop_model.get_covariate_names()) except AttributeError: continue covariate_map = np.hstack(covariate_map) cov_names = np.hstack(cov_names) n_covariates = len(covariates) covariate_names = [] for idc in range(n_covariates): index = np.where((covariate_map == idc))[0][0] name = cov_names[index] covariate_names.append(name) for (idc, covariate) in enumerate(covariate_names): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': np.nan, 'Observable': covariate, 'Value': covariates[idc]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples
def sample(self, parameters, times, n_samples=None, seed=None, return_df=True, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from virtual "patients" and\n returns them in form of a :class:`pandas.DataFrame` or a\n :class:`numpy.ndarray`.\n\n Virtual patients are sampled from the population models in form of\n predictive model parameters. Those parameters are then used to sample\n virtual measurements from the predictive model. For each virtual\n patient one measurement is performed at each of the provided time\n points.\n\n The number of virtual patients that is being measured can be specified\n with ``n_samples``.\n\n Parameters\n ----------\n parameters\n An array-like object with the parameter values of the predictive\n model.\n times\n An array-like object with times at which the virtual "measurements"\n are performed.\n n_samples\n The number of virtual "patients" that are measured at each\n time point. If ``None`` the biomarkers are measured only for one\n patient.\n seed\n A seed for the pseudo-random number generator or a\n :class:`numpy.random.Generator`.\n return_df\n A boolean flag which determines whether the output is returned as a\n :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False``\n the samples are returned as a numpy array of shape\n ``(n_outputs, n_times, n_samples)``.\n include_regimen\n A boolean flag which determines whether the dosing regimen\n information is included in the output. If the samples are returned\n as a :class:`numpy.ndarray`, the dosing information is not\n included.\n covariates\n A list with covariates of length c. Covariates are only relevant\n when CovariatePopulationModels are used.\n covariate_map\n A nested list of length n_population_models with indices that\n reference the relevant covariates for each population model.\n By default, it is assumed that all covariates are relevant for all\n population models.\n ' parameters = np.asarray(parameters) if (len(parameters) != self._n_parameters): raise ValueError('The length of parameters does not match n_parameters.') if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (covariates is not None): covariates = np.array(covariates) covariate_map = self._check_covariate_map(covariates, covariate_map) n_parameters = self._predictive_model.n_parameters() patients = np.empty(shape=(n_samples, n_parameters)) start = 0 for (pid, pop_model) in enumerate(self._population_models): if isinstance(pop_model, chi.HeterogeneousModel): patients[(:, pid)] = parameters[start] start += 1 continue end = (start + pop_model.n_parameters()) cov = (covariates[covariate_map[pid]] if covariate_map else None) if pop_model.transforms_individual_parameters(): patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed, covariates=cov, return_psi=True) else: patients[(:, pid)] = pop_model.sample(parameters=parameters[start:end], n_samples=n_samples, seed=seed) start = end n_outputs = self._predictive_model.get_n_outputs() n_times = len(times) container = np.empty(shape=(n_outputs, n_times, n_samples)) times = np.sort(times) for (patient_id, patient) in enumerate(patients): sample = self._predictive_model.sample(parameters=patient, times=times, seed=seed, return_df=False) container[(..., patient_id)] = sample[(..., 0)] if (return_df is False): return container output_names = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) samples = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) for (output_id, name) in enumerate(output_names): for (time_id, time) in enumerate(times): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': time, 'Observable': name, 'Value': container[(output_id, time_id, :)]})) if (covariates is not None): cov_names = [] for pop_model in self._population_models: try: cov_names.append(pop_model.get_covariate_names()) except AttributeError: continue covariate_map = np.hstack(covariate_map) cov_names = np.hstack(cov_names) n_covariates = len(covariates) covariate_names = [] for idc in range(n_covariates): index = np.where((covariate_map == idc))[0][0] name = cov_names[index] covariate_names.append(name) for (idc, covariate) in enumerate(covariate_names): samples = samples.append(pd.DataFrame({'ID': sample_ids, 'Time': np.nan, 'Observable': covariate, 'Value': covariates[idc]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): for _id in sample_ids: regimen['ID'] = _id samples = samples.append(regimen) return samples<|docstring|>Samples "measurements" of the biomarkers from virtual "patients" and returns them in form of a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. Virtual patients are sampled from the population models in form of predictive model parameters. Those parameters are then used to sample virtual measurements from the predictive model. For each virtual patient one measurement is performed at each of the provided time points. The number of virtual patients that is being measured can be specified with ``n_samples``. Parameters ---------- parameters An array-like object with the parameter values of the predictive model. times An array-like object with times at which the virtual "measurements" are performed. n_samples The number of virtual "patients" that are measured at each time point. If ``None`` the biomarkers are measured only for one patient. seed A seed for the pseudo-random number generator or a :class:`numpy.random.Generator`. return_df A boolean flag which determines whether the output is returned as a :class:`pandas.DataFrame` or a :class:`numpy.ndarray`. If ``False`` the samples are returned as a numpy array of shape ``(n_outputs, n_times, n_samples)``. include_regimen A boolean flag which determines whether the dosing regimen information is included in the output. If the samples are returned as a :class:`numpy.ndarray`, the dosing information is not included. covariates A list with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. covariate_map A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models.<|endoftext|>
e650fccd26289136f16da3285ab40f74de1bfcbcce786f91c05863c5eb266664
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)
Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports compound administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.
chi/_predictive_models.py
set_dosing_regimen
DavAug/chi
2
python
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n compound administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' self._predictive_model.set_dosing_regimen(dose, start, duration, period, num)<|docstring|>Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports compound administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.<|endoftext|>
eaff5bd152f3332ed3335dba648cb39d2d2431d7fca3f070ab83c9c7cb0f900c
def sample(self, times, n_samples=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the prior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n log-prior. These paramaters are then used to sample from the predictive\n model.\n\n :param times: An array-like object with times at which the virtual\n "measurements" are performed.\n :type times: List or np.ndarray.\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int, optional\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (seed is not None): np.random.seed(seed) base_seed = seed times = np.sort(times) container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = self._log_prior.sample().flatten() if (seed is not None): seed = (base_seed + sample_id) is_pop_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_pop_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container
Samples "measurements" of the biomarkers from the prior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the log-prior. These paramaters are then used to sample from the predictive model. :param times: An array-like object with times at which the virtual "measurements" are performed. :type times: List or np.ndarray. :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param seed: A seed for the pseudo-random number generator. :type seed: int, optional :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional :param covariates: An array-like object with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. :type covariates: List or np.ndarray, optional :param covariate_map: A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models. :type covariate_map: List[List[int]], optional
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, times, n_samples=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the prior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n log-prior. These paramaters are then used to sample from the predictive\n model.\n\n :param times: An array-like object with times at which the virtual\n "measurements" are performed.\n :type times: List or np.ndarray.\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int, optional\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (seed is not None): np.random.seed(seed) base_seed = seed times = np.sort(times) container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = self._log_prior.sample().flatten() if (seed is not None): seed = (base_seed + sample_id) is_pop_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_pop_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container
def sample(self, times, n_samples=None, seed=None, include_regimen=False, covariates=None, covariate_map=None): '\n Samples "measurements" of the biomarkers from the prior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n log-prior. These paramaters are then used to sample from the predictive\n model.\n\n :param times: An array-like object with times at which the virtual\n "measurements" are performed.\n :type times: List or np.ndarray.\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int, optional\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n :param covariates: An array-like object with covariates of length c.\n Covariates are only relevant when CovariatePopulationModels are\n used.\n :type covariates: List or np.ndarray, optional\n :param covariate_map: A nested list of length n_population_models with\n indices that reference the relevant covariates for each population\n model. By default, it is assumed that all covariates are relevant\n for all population models.\n :type covariate_map: List[List[int]], optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) if (seed is not None): np.random.seed(seed) base_seed = seed times = np.sort(times) container = pd.DataFrame(columns=['ID', 'Time', 'Observable', 'Value']) outputs = self._predictive_model.get_output_names() sample_ids = np.arange(start=1, stop=(n_samples + 1)) for sample_id in sample_ids: parameters = self._log_prior.sample().flatten() if (seed is not None): seed = (base_seed + sample_id) is_pop_pred_model = isinstance(self._predictive_model, chi.PopulationPredictiveModel) if is_pop_pred_model: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False, covariates=covariates, covariate_map=covariate_map) else: sample = self._predictive_model.sample(parameters, times, n_samples, seed, return_df=False) for (output_id, name) in enumerate(outputs): container = container.append(pd.DataFrame({'ID': sample_id, 'Time': times, 'Observable': name, 'Value': sample[(output_id, :, 0)]})) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): container = container.append(regimen) return container<|docstring|>Samples "measurements" of the biomarkers from the prior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the log-prior. These paramaters are then used to sample from the predictive model. :param times: An array-like object with times at which the virtual "measurements" are performed. :type times: List or np.ndarray. :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param seed: A seed for the pseudo-random number generator. :type seed: int, optional :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional :param covariates: An array-like object with covariates of length c. Covariates are only relevant when CovariatePopulationModels are used. :type covariates: List or np.ndarray, optional :param covariate_map: A nested list of length n_population_models with indices that reference the relevant covariates for each population model. By default, it is assumed that all covariates are relevant for all population models. :type covariate_map: List[List[int]], optional<|endoftext|>
4e437c46c22cfb4abdff2e667e41d09b79d2086a8bab461b6ab38a815cd16d80
def get_predictive_model(self): '\n Returns a list of the\n :class:`chi.PosteriorPredictiveModel` instances.\n ' return self._predictive_models
Returns a list of the :class:`chi.PosteriorPredictiveModel` instances.
chi/_predictive_models.py
get_predictive_model
DavAug/chi
2
python
def get_predictive_model(self): '\n Returns a list of the\n :class:`chi.PosteriorPredictiveModel` instances.\n ' return self._predictive_models
def get_predictive_model(self): '\n Returns a list of the\n :class:`chi.PosteriorPredictiveModel` instances.\n ' return self._predictive_models<|docstring|>Returns a list of the :class:`chi.PosteriorPredictiveModel` instances.<|endoftext|>
70baf8b28349dab8ef9b7e6201c9a7125678f08eaa995a4022dec7e0a512082b
def get_weights(self): '\n Returns the weights of the individual predictive models.\n ' return copy.copy(self._weights)
Returns the weights of the individual predictive models.
chi/_predictive_models.py
get_weights
DavAug/chi
2
python
def get_weights(self): '\n \n ' return copy.copy(self._weights)
def get_weights(self): '\n \n ' return copy.copy(self._weights)<|docstring|>Returns the weights of the individual predictive models.<|endoftext|>
7b7332303b09e1916fcbfe07c8f4639ef68118306a113b8e0d85b5dea26ed740
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) seed = np.random.default_rng(seed=seed) n_models = len(self._predictive_models) model_indices = np.arange(n_models) model_draws = np.random.choice(model_indices, p=self._weights, size=n_samples) samples_per_model = np.zeros(n_models, dtype=int) for (model_id, model) in enumerate(model_indices): samples_per_model[model_id] = np.sum((model_draws == model)) samples = [] for (model_id, n_samples) in enumerate(samples_per_model): if (n_samples == 0): continue model = self._predictive_models[model_id] s = model.sample(times, n_samples, individual, seed=seed) s['ID'] += int(np.sum(samples_per_model[:model_id])) samples.append(s) samples = pd.concat(samples) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): samples = samples.append(regimen) return samples
Samples "measurements" of the biomarkers from the posterior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the approximate posterior distribution. These paramaters are then used to sample from the predictive model. :param times: Times for the virtual "measurements". :type times: list, numpy.ndarray of shape (n,) :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param individual: The ID of the modelled individual. If ``None``, either the first ID or the population is simulated. :type individual: str, optional :param seed: A seed for the pseudo-random number generator. :type seed: int :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional
chi/_predictive_models.py
sample
DavAug/chi
2
python
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) seed = np.random.default_rng(seed=seed) n_models = len(self._predictive_models) model_indices = np.arange(n_models) model_draws = np.random.choice(model_indices, p=self._weights, size=n_samples) samples_per_model = np.zeros(n_models, dtype=int) for (model_id, model) in enumerate(model_indices): samples_per_model[model_id] = np.sum((model_draws == model)) samples = [] for (model_id, n_samples) in enumerate(samples_per_model): if (n_samples == 0): continue model = self._predictive_models[model_id] s = model.sample(times, n_samples, individual, seed=seed) s['ID'] += int(np.sum(samples_per_model[:model_id])) samples.append(s) samples = pd.concat(samples) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): samples = samples.append(regimen) return samples
def sample(self, times, n_samples=None, individual=None, seed=None, include_regimen=False): '\n Samples "measurements" of the biomarkers from the posterior predictive\n model and returns them in form of a :class:`pandas.DataFrame`.\n\n For each of the ``n_samples`` a parameter set is drawn from the\n approximate posterior distribution. These paramaters are then used to\n sample from the predictive model.\n\n :param times: Times for the virtual "measurements".\n :type times: list, numpy.ndarray of shape (n,)\n :param n_samples: The number of virtual "measurements" that are\n performed at each time point. If ``None`` the biomarkers are\n measured only once at each time point.\n :type n_samples: int, optional\n :param individual: The ID of the modelled individual. If\n ``None``, either the first ID or the population is simulated.\n :type individual: str, optional\n :param seed: A seed for the pseudo-random number generator.\n :type seed: int\n :param include_regimen: A boolean flag which determines whether the\n information about the dosing regimen is included.\n :type include_regimen: bool, optional\n ' if (n_samples is None): n_samples = 1 n_samples = int(n_samples) seed = np.random.default_rng(seed=seed) n_models = len(self._predictive_models) model_indices = np.arange(n_models) model_draws = np.random.choice(model_indices, p=self._weights, size=n_samples) samples_per_model = np.zeros(n_models, dtype=int) for (model_id, model) in enumerate(model_indices): samples_per_model[model_id] = np.sum((model_draws == model)) samples = [] for (model_id, n_samples) in enumerate(samples_per_model): if (n_samples == 0): continue model = self._predictive_models[model_id] s = model.sample(times, n_samples, individual, seed=seed) s['ID'] += int(np.sum(samples_per_model[:model_id])) samples.append(s) samples = pd.concat(samples) final_time = np.max(times) regimen = self.get_dosing_regimen(final_time) if ((regimen is not None) and (include_regimen is True)): samples = samples.append(regimen) return samples<|docstring|>Samples "measurements" of the biomarkers from the posterior predictive model and returns them in form of a :class:`pandas.DataFrame`. For each of the ``n_samples`` a parameter set is drawn from the approximate posterior distribution. These paramaters are then used to sample from the predictive model. :param times: Times for the virtual "measurements". :type times: list, numpy.ndarray of shape (n,) :param n_samples: The number of virtual "measurements" that are performed at each time point. If ``None`` the biomarkers are measured only once at each time point. :type n_samples: int, optional :param individual: The ID of the modelled individual. If ``None``, either the first ID or the population is simulated. :type individual: str, optional :param seed: A seed for the pseudo-random number generator. :type seed: int :param include_regimen: A boolean flag which determines whether the information about the dosing regimen is included. :type include_regimen: bool, optional<|endoftext|>
4129c5a29bc123a333f49a1b81dfca2cfbd37c50963ab69037115ffcc4d09c55
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' for predictive_model in self._predictive_models: predictive_model.set_dosing_regimen(dose, start, duration, period, num)
Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports dose administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.
chi/_predictive_models.py
set_dosing_regimen
DavAug/chi
2
python
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' for predictive_model in self._predictive_models: predictive_model.set_dosing_regimen(dose, start, duration, period, num)
def set_dosing_regimen(self, dose, start, duration=0.01, period=None, num=None): '\n Sets the dosing regimen with which the compound is administered.\n\n By default the dose is administered as a bolus injection (duration on\n a time scale that is 100 fold smaller than the basic time unit). To\n model an infusion of the dose over a longer time period, the\n ``duration`` can be adjusted to the appropriate time scale.\n\n By default the dose is administered once. To apply multiple doses\n provide a dose administration period.\n\n .. note::\n This method requires a :class:`MechanisticModel` that supports\n dose administration.\n\n Parameters\n ----------\n dose\n The amount of the compound that is injected at each administration.\n start\n Start time of the treatment.\n duration\n Duration of dose administration. For a bolus injection, a dose\n duration of 1% of the time unit should suffice. By default the\n duration is set to 0.01 (bolus).\n period\n Periodicity at which doses are administered. If ``None`` the dose\n is administered only once.\n num\n Number of administered doses. If ``None`` and the periodicity of\n the administration is not ``None``, doses are administered\n indefinitely.\n ' for predictive_model in self._predictive_models: predictive_model.set_dosing_regimen(dose, start, duration, period, num)<|docstring|>Sets the dosing regimen with which the compound is administered. By default the dose is administered as a bolus injection (duration on a time scale that is 100 fold smaller than the basic time unit). To model an infusion of the dose over a longer time period, the ``duration`` can be adjusted to the appropriate time scale. By default the dose is administered once. To apply multiple doses provide a dose administration period. .. note:: This method requires a :class:`MechanisticModel` that supports dose administration. Parameters ---------- dose The amount of the compound that is injected at each administration. start Start time of the treatment. duration Duration of dose administration. For a bolus injection, a dose duration of 1% of the time unit should suffice. By default the duration is set to 0.01 (bolus). period Periodicity at which doses are administered. If ``None`` the dose is administered only once. num Number of administered doses. If ``None`` and the periodicity of the administration is not ``None``, doses are administered indefinitely.<|endoftext|>
8da19b0f13c2df9e671f17e0948cec8fad41162a36eddc33593bd6da79e918f2
def _gaussian(x, height, center, fwhm, offset=0.0): 'Gaussian function with a possible offset\n \n \n Parameters\n ----------\n \n x : float array\n values to calculate Gaussian function at\n \n height : float\n height of the Gaussian at maximum\n \n center : float\n position of maximum\n \n fwhm : float\n full width at half maximum of the Gaussian function\n \n offset : float\n the value at infinity; effectively an offset on the y-axis\n \n \n ' return ((height * numpy.exp(((- ((((x - center) ** 2) * 4.0) * numpy.log(2.0))) / (fwhm ** 2)))) + offset)
Gaussian function with a possible offset Parameters ---------- x : float array values to calculate Gaussian function at height : float height of the Gaussian at maximum center : float position of maximum fwhm : float full width at half maximum of the Gaussian function offset : float the value at infinity; effectively an offset on the y-axis
quantarhei/spectroscopy/abs.py
_gaussian
detrin/quantarhei
14
python
def _gaussian(x, height, center, fwhm, offset=0.0): 'Gaussian function with a possible offset\n \n \n Parameters\n ----------\n \n x : float array\n values to calculate Gaussian function at\n \n height : float\n height of the Gaussian at maximum\n \n center : float\n position of maximum\n \n fwhm : float\n full width at half maximum of the Gaussian function\n \n offset : float\n the value at infinity; effectively an offset on the y-axis\n \n \n ' return ((height * numpy.exp(((- ((((x - center) ** 2) * 4.0) * numpy.log(2.0))) / (fwhm ** 2)))) + offset)
def _gaussian(x, height, center, fwhm, offset=0.0): 'Gaussian function with a possible offset\n \n \n Parameters\n ----------\n \n x : float array\n values to calculate Gaussian function at\n \n height : float\n height of the Gaussian at maximum\n \n center : float\n position of maximum\n \n fwhm : float\n full width at half maximum of the Gaussian function\n \n offset : float\n the value at infinity; effectively an offset on the y-axis\n \n \n ' return ((height * numpy.exp(((- ((((x - center) ** 2) * 4.0) * numpy.log(2.0))) / (fwhm ** 2)))) + offset)<|docstring|>Gaussian function with a possible offset Parameters ---------- x : float array values to calculate Gaussian function at height : float height of the Gaussian at maximum center : float position of maximum fwhm : float full width at half maximum of the Gaussian function offset : float the value at infinity; effectively an offset on the y-axis<|endoftext|>
c60880e107ba543d369a36ae061e7873118a4c614403f8cc00403a6989c90a6f
def _n_gaussians(x, N, *params): 'Sum of N Gaussian functions plus an offset from zero\n\n Parameters\n ----------\n \n x : float\n values to calculate Gaussians function at \n\n N : int\n number of Gaussians\n \n params : floats\n 3*N + 1 parameters corresponding to height, center, fwhm for each \n Gaussian and one value of offset\n \n ' n = len(params) k = (n // 3) if (((k * 3) == n) and (k == N)): res = 0.0 pp = numpy.zeros(3) for i in range(k): pp[0:3] = params[(3 * i):((3 * i) + 3)] arg = tuple(pp) res += _gaussian(x, *arg) res += params[(n - 1)] return res else: raise Exception('Inconsistend number of parameters')
Sum of N Gaussian functions plus an offset from zero Parameters ---------- x : float values to calculate Gaussians function at N : int number of Gaussians params : floats 3*N + 1 parameters corresponding to height, center, fwhm for each Gaussian and one value of offset
quantarhei/spectroscopy/abs.py
_n_gaussians
detrin/quantarhei
14
python
def _n_gaussians(x, N, *params): 'Sum of N Gaussian functions plus an offset from zero\n\n Parameters\n ----------\n \n x : float\n values to calculate Gaussians function at \n\n N : int\n number of Gaussians\n \n params : floats\n 3*N + 1 parameters corresponding to height, center, fwhm for each \n Gaussian and one value of offset\n \n ' n = len(params) k = (n // 3) if (((k * 3) == n) and (k == N)): res = 0.0 pp = numpy.zeros(3) for i in range(k): pp[0:3] = params[(3 * i):((3 * i) + 3)] arg = tuple(pp) res += _gaussian(x, *arg) res += params[(n - 1)] return res else: raise Exception('Inconsistend number of parameters')
def _n_gaussians(x, N, *params): 'Sum of N Gaussian functions plus an offset from zero\n\n Parameters\n ----------\n \n x : float\n values to calculate Gaussians function at \n\n N : int\n number of Gaussians\n \n params : floats\n 3*N + 1 parameters corresponding to height, center, fwhm for each \n Gaussian and one value of offset\n \n ' n = len(params) k = (n // 3) if (((k * 3) == n) and (k == N)): res = 0.0 pp = numpy.zeros(3) for i in range(k): pp[0:3] = params[(3 * i):((3 * i) + 3)] arg = tuple(pp) res += _gaussian(x, *arg) res += params[(n - 1)] return res else: raise Exception('Inconsistend number of parameters')<|docstring|>Sum of N Gaussian functions plus an offset from zero Parameters ---------- x : float values to calculate Gaussians function at N : int number of Gaussians params : floats 3*N + 1 parameters corresponding to height, center, fwhm for each Gaussian and one value of offset<|endoftext|>
59526fe45f8712eba31d76cd8f8c3a6ce02061dfdc3d83a30612671e7e2261f6
def set_axis(self, axis): 'Sets axis atribute\n \n Parameters\n ----------\n \n axis : FrequencyAxis object\n Frequency axis object. This object has managed energy units\n \n ' self.axis = axis
Sets axis atribute Parameters ---------- axis : FrequencyAxis object Frequency axis object. This object has managed energy units
quantarhei/spectroscopy/abs.py
set_axis
detrin/quantarhei
14
python
def set_axis(self, axis): 'Sets axis atribute\n \n Parameters\n ----------\n \n axis : FrequencyAxis object\n Frequency axis object. This object has managed energy units\n \n ' self.axis = axis
def set_axis(self, axis): 'Sets axis atribute\n \n Parameters\n ----------\n \n axis : FrequencyAxis object\n Frequency axis object. This object has managed energy units\n \n ' self.axis = axis<|docstring|>Sets axis atribute Parameters ---------- axis : FrequencyAxis object Frequency axis object. This object has managed energy units<|endoftext|>
672b1033725a62ef8a573e78e32adc9bf5afe0ab53918aa0a8533f96c39e1428
def set_data(self, data): 'Sets data atribute\n \n Parameters\n ----------\n \n data : array like object (numpy array)\n Sets the data of the absorption spectrum\n \n ' self.data = data
Sets data atribute Parameters ---------- data : array like object (numpy array) Sets the data of the absorption spectrum
quantarhei/spectroscopy/abs.py
set_data
detrin/quantarhei
14
python
def set_data(self, data): 'Sets data atribute\n \n Parameters\n ----------\n \n data : array like object (numpy array)\n Sets the data of the absorption spectrum\n \n ' self.data = data
def set_data(self, data): 'Sets data atribute\n \n Parameters\n ----------\n \n data : array like object (numpy array)\n Sets the data of the absorption spectrum\n \n ' self.data = data<|docstring|>Sets data atribute Parameters ---------- data : array like object (numpy array) Sets the data of the absorption spectrum<|endoftext|>
4a5d88e6e862026cba9cca61efbd955866bae6b7ef10e2bca9f04c0455515694
def clear_data(self): 'Sets spectrum data to zero\n \n ' shp = self.data.shape self.data = numpy.zeros(shp, dtype=numpy.float64)
Sets spectrum data to zero
quantarhei/spectroscopy/abs.py
clear_data
detrin/quantarhei
14
python
def clear_data(self): '\n \n ' shp = self.data.shape self.data = numpy.zeros(shp, dtype=numpy.float64)
def clear_data(self): '\n \n ' shp = self.data.shape self.data = numpy.zeros(shp, dtype=numpy.float64)<|docstring|>Sets spectrum data to zero<|endoftext|>
f92100f4f682544241b6b75d8f516851bfb7f8212e7ee024e673a0839f720531
def normalize2(self, norm=1.0): 'Normalizes spectrum to a given value\n \n ' mx = numpy.max(self.data) self.data = ((norm * self.data) / mx)
Normalizes spectrum to a given value
quantarhei/spectroscopy/abs.py
normalize2
detrin/quantarhei
14
python
def normalize2(self, norm=1.0): '\n \n ' mx = numpy.max(self.data) self.data = ((norm * self.data) / mx)
def normalize2(self, norm=1.0): '\n \n ' mx = numpy.max(self.data) self.data = ((norm * self.data) / mx)<|docstring|>Normalizes spectrum to a given value<|endoftext|>
21c2933b7c8eb148ba9f6a7bba67b0870052d43eefb14684f26ef735f2c6cd02
def normalize(self): 'Normalization to one\n \n ' self.normalize2(norm=1.0)
Normalization to one
quantarhei/spectroscopy/abs.py
normalize
detrin/quantarhei
14
python
def normalize(self): '\n \n ' self.normalize2(norm=1.0)
def normalize(self): '\n \n ' self.normalize2(norm=1.0)<|docstring|>Normalization to one<|endoftext|>
5a9b4747edd2594a1d703155d1fcf64c4a49ac271c7191d51046082f439883ef
def add_to_data(self, spect): 'Performs addition on the data.\n \n Expects a compatible object holding absorption spectrum\n and adds its data to the present absorption spectrum.\n \n Parameters\n ----------\n \n spect : spectrum containing object\n This object should have a compatible axis and some data\n \n ' if (self.axis is None): self.axis = spect.axis.copy() if (not numpy.allclose(spect.axis.data, self.axis.data)): numpy.savetxt('spect_data_wrong.dat', spect.axis.data) numpy.savetxt('self_data_wrong.dat', self.axis.data) raise Exception('Incompatible axis') if (self.data is None): self.data = numpy.zeros(len(spect.data), dtype=spect.axis.data.dtype) self.data += spect.data
Performs addition on the data. Expects a compatible object holding absorption spectrum and adds its data to the present absorption spectrum. Parameters ---------- spect : spectrum containing object This object should have a compatible axis and some data
quantarhei/spectroscopy/abs.py
add_to_data
detrin/quantarhei
14
python
def add_to_data(self, spect): 'Performs addition on the data.\n \n Expects a compatible object holding absorption spectrum\n and adds its data to the present absorption spectrum.\n \n Parameters\n ----------\n \n spect : spectrum containing object\n This object should have a compatible axis and some data\n \n ' if (self.axis is None): self.axis = spect.axis.copy() if (not numpy.allclose(spect.axis.data, self.axis.data)): numpy.savetxt('spect_data_wrong.dat', spect.axis.data) numpy.savetxt('self_data_wrong.dat', self.axis.data) raise Exception('Incompatible axis') if (self.data is None): self.data = numpy.zeros(len(spect.data), dtype=spect.axis.data.dtype) self.data += spect.data
def add_to_data(self, spect): 'Performs addition on the data.\n \n Expects a compatible object holding absorption spectrum\n and adds its data to the present absorption spectrum.\n \n Parameters\n ----------\n \n spect : spectrum containing object\n This object should have a compatible axis and some data\n \n ' if (self.axis is None): self.axis = spect.axis.copy() if (not numpy.allclose(spect.axis.data, self.axis.data)): numpy.savetxt('spect_data_wrong.dat', spect.axis.data) numpy.savetxt('self_data_wrong.dat', self.axis.data) raise Exception('Incompatible axis') if (self.data is None): self.data = numpy.zeros(len(spect.data), dtype=spect.axis.data.dtype) self.data += spect.data<|docstring|>Performs addition on the data. Expects a compatible object holding absorption spectrum and adds its data to the present absorption spectrum. Parameters ---------- spect : spectrum containing object This object should have a compatible axis and some data<|endoftext|>
68e8bdda7fe4725231385e4f095b265111dd07b24fbb585a3b46a9031aebcc54
def load(self, filename, ext=None, replace=False): "Load the spectrum from a file\n \n Uses the load method of the DFunction class to load the absorption\n spectrum from a file. It sets the axis type to 'frequency', otherwise\n no changes to the inherited method are applied.\n \n Parameters\n ----------\n \n " super().load(filename, ext=ext, axis='frequency', replace=replace)
Load the spectrum from a file Uses the load method of the DFunction class to load the absorption spectrum from a file. It sets the axis type to 'frequency', otherwise no changes to the inherited method are applied. Parameters ----------
quantarhei/spectroscopy/abs.py
load
detrin/quantarhei
14
python
def load(self, filename, ext=None, replace=False): "Load the spectrum from a file\n \n Uses the load method of the DFunction class to load the absorption\n spectrum from a file. It sets the axis type to 'frequency', otherwise\n no changes to the inherited method are applied.\n \n Parameters\n ----------\n \n " super().load(filename, ext=ext, axis='frequency', replace=replace)
def load(self, filename, ext=None, replace=False): "Load the spectrum from a file\n \n Uses the load method of the DFunction class to load the absorption\n spectrum from a file. It sets the axis type to 'frequency', otherwise\n no changes to the inherited method are applied.\n \n Parameters\n ----------\n \n " super().load(filename, ext=ext, axis='frequency', replace=replace)<|docstring|>Load the spectrum from a file Uses the load method of the DFunction class to load the absorption spectrum from a file. It sets the axis type to 'frequency', otherwise no changes to the inherited method are applied. Parameters ----------<|endoftext|>
914362b7520fb7c1fb51fcb48c62b8941265fa9383d1e0886bbd9bf82814d644
def plot(self, **kwargs): ' Plotting absorption spectrum using the DFunction plot method\n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super().plot(**kwargs)
Plotting absorption spectrum using the DFunction plot method
quantarhei/spectroscopy/abs.py
plot
detrin/quantarhei
14
python
def plot(self, **kwargs): ' \n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super().plot(**kwargs)
def plot(self, **kwargs): ' \n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super().plot(**kwargs)<|docstring|>Plotting absorption spectrum using the DFunction plot method<|endoftext|>
1ca47406ae318fba028007f0c5627232690e81199204b82861eddca7cc969555
def difference(self, par=None): 'Calculates difference between spectra\n \n Calculates difference between the target spectrum and the spectrum\n calculated from submitted parameters\n \n Parameters\n ----------\n \n par : list or array (optional)\n parameters of the function \n \n ' target = self.target.data[self.nl:self.nu] if self._can_minimize: if (par is None): raise Exception(('Function parameters must be specified ' + 'to calculate difference')) secabs = self.optfce(par) sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat else: secabs = self.secabs sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat if (self.difftype == 'square'): diff = (1000.0 * numpy.sum(numpy.abs((((target - secabs) ** 2) / (self.x[(len(self.x) - 1)] - self.x[0]))))) elif (self.difftype == 'measure'): diff = 0.0 else: raise Exception('Unknown differene type') print('DIFF: ', diff) return diff
Calculates difference between spectra Calculates difference between the target spectrum and the spectrum calculated from submitted parameters Parameters ---------- par : list or array (optional) parameters of the function
quantarhei/spectroscopy/abs.py
difference
detrin/quantarhei
14
python
def difference(self, par=None): 'Calculates difference between spectra\n \n Calculates difference between the target spectrum and the spectrum\n calculated from submitted parameters\n \n Parameters\n ----------\n \n par : list or array (optional)\n parameters of the function \n \n ' target = self.target.data[self.nl:self.nu] if self._can_minimize: if (par is None): raise Exception(('Function parameters must be specified ' + 'to calculate difference')) secabs = self.optfce(par) sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat else: secabs = self.secabs sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat if (self.difftype == 'square'): diff = (1000.0 * numpy.sum(numpy.abs((((target - secabs) ** 2) / (self.x[(len(self.x) - 1)] - self.x[0]))))) elif (self.difftype == 'measure'): diff = 0.0 else: raise Exception('Unknown differene type') print('DIFF: ', diff) return diff
def difference(self, par=None): 'Calculates difference between spectra\n \n Calculates difference between the target spectrum and the spectrum\n calculated from submitted parameters\n \n Parameters\n ----------\n \n par : list or array (optional)\n parameters of the function \n \n ' target = self.target.data[self.nl:self.nu] if self._can_minimize: if (par is None): raise Exception(('Function parameters must be specified ' + 'to calculate difference')) secabs = self.optfce(par) sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat else: secabs = self.secabs sdat = numpy.zeros(len(self.x), dtype=numpy.float64) i = 0 for xi in self.x: sdat[i] = secabs.at(xi) i += 1 secabs = sdat if (self.difftype == 'square'): diff = (1000.0 * numpy.sum(numpy.abs((((target - secabs) ** 2) / (self.x[(len(self.x) - 1)] - self.x[0]))))) elif (self.difftype == 'measure'): diff = 0.0 else: raise Exception('Unknown differene type') print('DIFF: ', diff) return diff<|docstring|>Calculates difference between spectra Calculates difference between the target spectrum and the spectrum calculated from submitted parameters Parameters ---------- par : list or array (optional) parameters of the function<|endoftext|>
358275cf0d5bd444503d2091703ef84c36e76c9f72a419585e2ae15d59831768
def minimize(self, init_params, method): 'Minimizes the submitted function and returns optimal parameters\n \n ' if self._can_minimize: from scipy.optimize import minimize self.opt_result = minimize(self.difference, init_params, method=method, tol=self.tol, options=dict(disp=True)) return self.opt_result.x else: raise Exception(('Cannot perform minimization, ' + 'no function suplied'))
Minimizes the submitted function and returns optimal parameters
quantarhei/spectroscopy/abs.py
minimize
detrin/quantarhei
14
python
def minimize(self, init_params, method): '\n \n ' if self._can_minimize: from scipy.optimize import minimize self.opt_result = minimize(self.difference, init_params, method=method, tol=self.tol, options=dict(disp=True)) return self.opt_result.x else: raise Exception(('Cannot perform minimization, ' + 'no function suplied'))
def minimize(self, init_params, method): '\n \n ' if self._can_minimize: from scipy.optimize import minimize self.opt_result = minimize(self.difference, init_params, method=method, tol=self.tol, options=dict(disp=True)) return self.opt_result.x else: raise Exception(('Cannot perform minimization, ' + 'no function suplied'))<|docstring|>Minimizes the submitted function and returns optimal parameters<|endoftext|>
c8aa61ec278af851e5e40b41130eef28be8632427b019c0465f9eb02fd7fa6b3
def _frequency(self, dt): ' Calculates the frequency axis corresponding to TimeAxis\n \n \n ' Nt = self.TimeAxis.length return (numpy.pi * numpy.fft.fftshift(numpy.fft.fftfreq(Nt, d=dt)))
Calculates the frequency axis corresponding to TimeAxis
quantarhei/spectroscopy/abs.py
_frequency
detrin/quantarhei
14
python
def _frequency(self, dt): ' \n \n \n ' Nt = self.TimeAxis.length return (numpy.pi * numpy.fft.fftshift(numpy.fft.fftfreq(Nt, d=dt)))
def _frequency(self, dt): ' \n \n \n ' Nt = self.TimeAxis.length return (numpy.pi * numpy.fft.fftshift(numpy.fft.fftfreq(Nt, d=dt)))<|docstring|>Calculates the frequency axis corresponding to TimeAxis<|endoftext|>
8615e2c3b8bc1a81dbbc79449252f95d461fd3fb1d5b3975c09d6e43b6e55899
def plot(self, **kwargs): ' Plotting absorption spectrum using the DFunction plot method\n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super(AbsSpectContainer, self).plot(**kwargs)
Plotting absorption spectrum using the DFunction plot method
quantarhei/spectroscopy/abs.py
plot
detrin/quantarhei
14
python
def plot(self, **kwargs): ' \n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super(AbsSpectContainer, self).plot(**kwargs)
def plot(self, **kwargs): ' \n \n ' if ('ylabel' not in kwargs): ylabel = '$\\alpha(\\omega)$ [a.u.]' kwargs['ylabel'] = ylabel super(AbsSpectContainer, self).plot(**kwargs)<|docstring|>Plotting absorption spectrum using the DFunction plot method<|endoftext|>
e36dc40c5775510b4b884f528dc55efd2c61f99950be16709d0dd96d996c2d19
def calculate(self, rwa=0.0): ' Calculates the absorption spectrum \n \n \n ' rwa = self.convert_2_internal_u(rwa) with energy_units('int'): if (self.system is not None): if isinstance(self.system, Molecule): self._calculate_monomer(rwa) elif isinstance(self.system, Aggregate): self._calculate_aggregate(rwa, relaxation_tensor=self._relaxation_tensor, rate_matrix=self._rate_matrix, relaxation_hamiltonian=self._relaxation_hamiltonian) else: raise Exception('System to calculate spectrum for not defined')
Calculates the absorption spectrum
quantarhei/spectroscopy/abs.py
calculate
detrin/quantarhei
14
python
def calculate(self, rwa=0.0): ' \n \n \n ' rwa = self.convert_2_internal_u(rwa) with energy_units('int'): if (self.system is not None): if isinstance(self.system, Molecule): self._calculate_monomer(rwa) elif isinstance(self.system, Aggregate): self._calculate_aggregate(rwa, relaxation_tensor=self._relaxation_tensor, rate_matrix=self._rate_matrix, relaxation_hamiltonian=self._relaxation_hamiltonian) else: raise Exception('System to calculate spectrum for not defined')
def calculate(self, rwa=0.0): ' \n \n \n ' rwa = self.convert_2_internal_u(rwa) with energy_units('int'): if (self.system is not None): if isinstance(self.system, Molecule): self._calculate_monomer(rwa) elif isinstance(self.system, Aggregate): self._calculate_aggregate(rwa, relaxation_tensor=self._relaxation_tensor, rate_matrix=self._rate_matrix, relaxation_hamiltonian=self._relaxation_hamiltonian) else: raise Exception('System to calculate spectrum for not defined')<|docstring|>Calculates the absorption spectrum<|endoftext|>
daf00747e0f78f17a920f8b5177c2607c36135491c86e20e6eb3e6ecb63853ea
def _c2g(self, timeaxis, coft): ' Converts correlation function to lineshape function\n \n Explicit numerical double integration of the correlation\n function to form a lineshape function.\n\n Parameters\n ----------\n\n timeaxis : cu.oqs.time.TimeAxis\n TimeAxis of the correlation function\n \n coft : complex numpy array\n Values of correlation function given at points specified\n in the TimeAxis object\n \n \n ' ta = timeaxis rr = numpy.real(coft) ri = numpy.imag(coft) sr = scipy.interpolate.UnivariateSpline(ta.data, rr, s=0).antiderivative()(ta.data) sr = scipy.interpolate.UnivariateSpline(ta.data, sr, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, ri, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, si, s=0).antiderivative()(ta.data) gt = (sr + (1j * si)) return gt
Converts correlation function to lineshape function Explicit numerical double integration of the correlation function to form a lineshape function. Parameters ---------- timeaxis : cu.oqs.time.TimeAxis TimeAxis of the correlation function coft : complex numpy array Values of correlation function given at points specified in the TimeAxis object
quantarhei/spectroscopy/abs.py
_c2g
detrin/quantarhei
14
python
def _c2g(self, timeaxis, coft): ' Converts correlation function to lineshape function\n \n Explicit numerical double integration of the correlation\n function to form a lineshape function.\n\n Parameters\n ----------\n\n timeaxis : cu.oqs.time.TimeAxis\n TimeAxis of the correlation function\n \n coft : complex numpy array\n Values of correlation function given at points specified\n in the TimeAxis object\n \n \n ' ta = timeaxis rr = numpy.real(coft) ri = numpy.imag(coft) sr = scipy.interpolate.UnivariateSpline(ta.data, rr, s=0).antiderivative()(ta.data) sr = scipy.interpolate.UnivariateSpline(ta.data, sr, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, ri, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, si, s=0).antiderivative()(ta.data) gt = (sr + (1j * si)) return gt
def _c2g(self, timeaxis, coft): ' Converts correlation function to lineshape function\n \n Explicit numerical double integration of the correlation\n function to form a lineshape function.\n\n Parameters\n ----------\n\n timeaxis : cu.oqs.time.TimeAxis\n TimeAxis of the correlation function\n \n coft : complex numpy array\n Values of correlation function given at points specified\n in the TimeAxis object\n \n \n ' ta = timeaxis rr = numpy.real(coft) ri = numpy.imag(coft) sr = scipy.interpolate.UnivariateSpline(ta.data, rr, s=0).antiderivative()(ta.data) sr = scipy.interpolate.UnivariateSpline(ta.data, sr, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, ri, s=0).antiderivative()(ta.data) si = scipy.interpolate.UnivariateSpline(ta.data, si, s=0).antiderivative()(ta.data) gt = (sr + (1j * si)) return gt<|docstring|>Converts correlation function to lineshape function Explicit numerical double integration of the correlation function to form a lineshape function. Parameters ---------- timeaxis : cu.oqs.time.TimeAxis TimeAxis of the correlation function coft : complex numpy array Values of correlation function given at points specified in the TimeAxis object<|endoftext|>
f9bd2af076d242b2c9a2421eef1fade0d85673dd2c6eeca1b4d35884fb53f091
def one_transition_spectrum(self, tr): ' Calculates spectrum of one transition\n \n \n ' ta = tr['ta'] dd = tr['dd'] om = tr['om'] gg = tr['gg'] if self.system._has_system_bath_coupling: ct = tr['ct'] gt = self._c2g(ta, ct.data) at = numpy.exp(((- gt) - ((1j * om) * ta.data))) else: at = numpy.exp((((- 1j) * om) * ta.data)) if (len(gg) == 1): gam = gg[0] rt = numpy.exp((gam * ta.data)) at *= rt else: rt = numpy.exp((gg * ta.data)) at *= rt ft = ((dd * numpy.fft.hfft(at)) * ta.step) ft = numpy.fft.fftshift(ft) ft = numpy.flipud(ft) Nt = ta.length return ft[(Nt // 2):(Nt + (Nt // 2))]
Calculates spectrum of one transition
quantarhei/spectroscopy/abs.py
one_transition_spectrum
detrin/quantarhei
14
python
def one_transition_spectrum(self, tr): ' \n \n \n ' ta = tr['ta'] dd = tr['dd'] om = tr['om'] gg = tr['gg'] if self.system._has_system_bath_coupling: ct = tr['ct'] gt = self._c2g(ta, ct.data) at = numpy.exp(((- gt) - ((1j * om) * ta.data))) else: at = numpy.exp((((- 1j) * om) * ta.data)) if (len(gg) == 1): gam = gg[0] rt = numpy.exp((gam * ta.data)) at *= rt else: rt = numpy.exp((gg * ta.data)) at *= rt ft = ((dd * numpy.fft.hfft(at)) * ta.step) ft = numpy.fft.fftshift(ft) ft = numpy.flipud(ft) Nt = ta.length return ft[(Nt // 2):(Nt + (Nt // 2))]
def one_transition_spectrum(self, tr): ' \n \n \n ' ta = tr['ta'] dd = tr['dd'] om = tr['om'] gg = tr['gg'] if self.system._has_system_bath_coupling: ct = tr['ct'] gt = self._c2g(ta, ct.data) at = numpy.exp(((- gt) - ((1j * om) * ta.data))) else: at = numpy.exp((((- 1j) * om) * ta.data)) if (len(gg) == 1): gam = gg[0] rt = numpy.exp((gam * ta.data)) at *= rt else: rt = numpy.exp((gg * ta.data)) at *= rt ft = ((dd * numpy.fft.hfft(at)) * ta.step) ft = numpy.fft.fftshift(ft) ft = numpy.flipud(ft) Nt = ta.length return ft[(Nt // 2):(Nt + (Nt // 2))]<|docstring|>Calculates spectrum of one transition<|endoftext|>
8d9e8ea8f060f0fdd9c7c77c40c5f1abd654b10981087ab663c6a802258cf2d2
def _excitonic_coft(self, SS, AG, n): ' Returns energy gap correlation function data of an exciton state \n \n ' c0 = AG.monomers[0].get_egcf((0, 1)) Nt = len(c0) sbi = AG.get_SystemBathInteraction() cfm = sbi.CC ct = numpy.zeros(Nt, dtype=numpy.complex128) Na = AG.nmono for kk in range(Na): for ll in range(Na): ct += (((SS[((kk + 1), (n + 1))] ** 2) * (SS[((ll + 1), (n + 1))] ** 2)) * cfm.get_coft(kk, ll)) return ct
Returns energy gap correlation function data of an exciton state
quantarhei/spectroscopy/abs.py
_excitonic_coft
detrin/quantarhei
14
python
def _excitonic_coft(self, SS, AG, n): ' \n \n ' c0 = AG.monomers[0].get_egcf((0, 1)) Nt = len(c0) sbi = AG.get_SystemBathInteraction() cfm = sbi.CC ct = numpy.zeros(Nt, dtype=numpy.complex128) Na = AG.nmono for kk in range(Na): for ll in range(Na): ct += (((SS[((kk + 1), (n + 1))] ** 2) * (SS[((ll + 1), (n + 1))] ** 2)) * cfm.get_coft(kk, ll)) return ct
def _excitonic_coft(self, SS, AG, n): ' \n \n ' c0 = AG.monomers[0].get_egcf((0, 1)) Nt = len(c0) sbi = AG.get_SystemBathInteraction() cfm = sbi.CC ct = numpy.zeros(Nt, dtype=numpy.complex128) Na = AG.nmono for kk in range(Na): for ll in range(Na): ct += (((SS[((kk + 1), (n + 1))] ** 2) * (SS[((ll + 1), (n + 1))] ** 2)) * cfm.get_coft(kk, ll)) return ct<|docstring|>Returns energy gap correlation function data of an exciton state<|endoftext|>
94022c91ad9adf857d85a8d564d33619fe0986def83974578fed4b0765c2c901
def _calculate_monomer(self, rwa): ' Calculates the absorption spectrum of a monomer \n \n \n ' ta = self.TimeAxis om = (self.system.elenergies[1] - self.system.elenergies[0]) dm = self.system.dmoments[(0, 1, :)] dd = numpy.dot(dm, dm) gama = [((- 1.0) / self.system.get_electronic_natural_lifetime(1))] if self.system._has_system_bath_coupling: ct = self.system.get_egcf((0, 1)) tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'ct': ct, 'gg': gama} else: tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'gg': gama} self.data = numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa)
Calculates the absorption spectrum of a monomer
quantarhei/spectroscopy/abs.py
_calculate_monomer
detrin/quantarhei
14
python
def _calculate_monomer(self, rwa): ' \n \n \n ' ta = self.TimeAxis om = (self.system.elenergies[1] - self.system.elenergies[0]) dm = self.system.dmoments[(0, 1, :)] dd = numpy.dot(dm, dm) gama = [((- 1.0) / self.system.get_electronic_natural_lifetime(1))] if self.system._has_system_bath_coupling: ct = self.system.get_egcf((0, 1)) tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'ct': ct, 'gg': gama} else: tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'gg': gama} self.data = numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa)
def _calculate_monomer(self, rwa): ' \n \n \n ' ta = self.TimeAxis om = (self.system.elenergies[1] - self.system.elenergies[0]) dm = self.system.dmoments[(0, 1, :)] dd = numpy.dot(dm, dm) gama = [((- 1.0) / self.system.get_electronic_natural_lifetime(1))] if self.system._has_system_bath_coupling: ct = self.system.get_egcf((0, 1)) tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'ct': ct, 'gg': gama} else: tr = {'ta': ta, 'dd': dd, 'om': (om - rwa), 'gg': gama} self.data = numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa)<|docstring|>Calculates the absorption spectrum of a monomer<|endoftext|>
054da6133033387bbb3743cfd948b59ee89c9cfe27619bd7a76f9bd75c7fa903
def _calculate_aggregate(self, rwa, relaxation_tensor=None, relaxation_hamiltonian=None, rate_matrix=None): ' Calculates the absorption spectrum of a molecular aggregate\n \n \n \n ' ta = self.TimeAxis if (relaxation_hamiltonian is None): HH = self.system.get_Hamiltonian() else: HH = relaxation_hamiltonian SS = HH.diagonalize() DD = self.system.get_TransitionDipoleMoment() DD.transform(SS) tr = {'ta': ta} if (relaxation_tensor is not None): RR = relaxation_tensor RR.transform(SS) gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii, ii, ii)]]) tr['gg'] = gg[1] elif (rate_matrix is not None): RR = rate_matrix gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii)]]) tr['gg'] = gg[1] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, 1) tr['om'] = ((HH.data[(1, 1)] - HH.data[(0, 0)]) - rwa) ct = self._excitonic_coft(SS, self.system, 0) tr['ct'] = ct self.system._has_system_bath_coupling = True self.data = numpy.real(self.one_transition_spectrum(tr)) for ii in range(2, HH.dim): if (relaxation_tensor is not None): tr['gg'] = gg[ii] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, ii) tr['om'] = ((HH.data[(ii, ii)] - HH.data[(0, 0)]) - rwa) tr['ct'] = self._excitonic_coft(SS, self.system, (ii - 1)) self.data += numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa) S1 = numpy.linalg.inv(SS) HH.transform(S1) DD.transform(S1) if (relaxation_tensor is not None): RR.transform(S1)
Calculates the absorption spectrum of a molecular aggregate
quantarhei/spectroscopy/abs.py
_calculate_aggregate
detrin/quantarhei
14
python
def _calculate_aggregate(self, rwa, relaxation_tensor=None, relaxation_hamiltonian=None, rate_matrix=None): ' \n \n \n \n ' ta = self.TimeAxis if (relaxation_hamiltonian is None): HH = self.system.get_Hamiltonian() else: HH = relaxation_hamiltonian SS = HH.diagonalize() DD = self.system.get_TransitionDipoleMoment() DD.transform(SS) tr = {'ta': ta} if (relaxation_tensor is not None): RR = relaxation_tensor RR.transform(SS) gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii, ii, ii)]]) tr['gg'] = gg[1] elif (rate_matrix is not None): RR = rate_matrix gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii)]]) tr['gg'] = gg[1] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, 1) tr['om'] = ((HH.data[(1, 1)] - HH.data[(0, 0)]) - rwa) ct = self._excitonic_coft(SS, self.system, 0) tr['ct'] = ct self.system._has_system_bath_coupling = True self.data = numpy.real(self.one_transition_spectrum(tr)) for ii in range(2, HH.dim): if (relaxation_tensor is not None): tr['gg'] = gg[ii] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, ii) tr['om'] = ((HH.data[(ii, ii)] - HH.data[(0, 0)]) - rwa) tr['ct'] = self._excitonic_coft(SS, self.system, (ii - 1)) self.data += numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa) S1 = numpy.linalg.inv(SS) HH.transform(S1) DD.transform(S1) if (relaxation_tensor is not None): RR.transform(S1)
def _calculate_aggregate(self, rwa, relaxation_tensor=None, relaxation_hamiltonian=None, rate_matrix=None): ' \n \n \n \n ' ta = self.TimeAxis if (relaxation_hamiltonian is None): HH = self.system.get_Hamiltonian() else: HH = relaxation_hamiltonian SS = HH.diagonalize() DD = self.system.get_TransitionDipoleMoment() DD.transform(SS) tr = {'ta': ta} if (relaxation_tensor is not None): RR = relaxation_tensor RR.transform(SS) gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii, ii, ii)]]) tr['gg'] = gg[1] elif (rate_matrix is not None): RR = rate_matrix gg = [] if isinstance(RR, TimeDependent): for ii in range(HH.dim): gg.append(RR.data[(:, ii, ii)]) else: for ii in range(HH.dim): gg.append([RR.data[(ii, ii)]]) tr['gg'] = gg[1] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, 1) tr['om'] = ((HH.data[(1, 1)] - HH.data[(0, 0)]) - rwa) ct = self._excitonic_coft(SS, self.system, 0) tr['ct'] = ct self.system._has_system_bath_coupling = True self.data = numpy.real(self.one_transition_spectrum(tr)) for ii in range(2, HH.dim): if (relaxation_tensor is not None): tr['gg'] = gg[ii] else: tr['gg'] = [0.0] tr['dd'] = DD.dipole_strength(0, ii) tr['om'] = ((HH.data[(ii, ii)] - HH.data[(0, 0)]) - rwa) tr['ct'] = self._excitonic_coft(SS, self.system, (ii - 1)) self.data += numpy.real(self.one_transition_spectrum(tr)) self.frequencyAxis = self.TimeAxis.get_FrequencyAxis() self.frequencyAxis.data += rwa Nt = (len(self.frequencyAxis.data) // 2) do = (self.frequencyAxis.data[1] - self.frequencyAxis.data[0]) st = self.frequencyAxis.data[(Nt // 2)] self.axis = FrequencyAxis(st, Nt, do) self.frequency = (self._frequency(ta.step) + rwa) S1 = numpy.linalg.inv(SS) HH.transform(S1) DD.transform(S1) if (relaxation_tensor is not None): RR.transform(S1)<|docstring|>Calculates the absorption spectrum of a molecular aggregate<|endoftext|>
95673a167cb6ecbcd6d438a7e6a2ba65eeceb06ebd5f2ccb4bd0b3e7019b6584
def dfs(graph, s, visited=[]): '\n DFS\n ' if (s not in visited): visited += s for v in [v for (u, v) in graph if ((u == s) and (v not in visited))]: dfs(graph, v, visited) return visited
DFS
02-graph-search-shortest-path-data-structures/week-01/dfs.py
dfs
tiefenauer/stanford-algorithms
5
python
def dfs(graph, s, visited=[]): '\n \n ' if (s not in visited): visited += s for v in [v for (u, v) in graph if ((u == s) and (v not in visited))]: dfs(graph, v, visited) return visited
def dfs(graph, s, visited=[]): '\n \n ' if (s not in visited): visited += s for v in [v for (u, v) in graph if ((u == s) and (v not in visited))]: dfs(graph, v, visited) return visited<|docstring|>DFS<|endoftext|>
5c0ea370161a6da73888bd5075e05c61996213ae7fd779e4797d52c7c172dc56
def image_names(path_to_folder, with_extension=False): '\n Reads raster files from multiple folders and returns their names\n\n :param path_to_folder: directory path\n :param with_extension: file extension\n :return: names of the raster files\n ' name_list = [] extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] if os.path.isdir(path_to_folder): files = os.listdir(path_to_folder) for f in files: if (f.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(f) else: (title, ext) = f.split('.') name_list.append(title) else: file = path_to_folder if (file.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(file) else: (title, ext) = file.split('.') name_list.append(title) return name_list
Reads raster files from multiple folders and returns their names :param path_to_folder: directory path :param with_extension: file extension :return: names of the raster files
imageprep/utils.py
image_names
agcopenhaver/imageprep
0
python
def image_names(path_to_folder, with_extension=False): '\n Reads raster files from multiple folders and returns their names\n\n :param path_to_folder: directory path\n :param with_extension: file extension\n :return: names of the raster files\n ' name_list = [] extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] if os.path.isdir(path_to_folder): files = os.listdir(path_to_folder) for f in files: if (f.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(f) else: (title, ext) = f.split('.') name_list.append(title) else: file = path_to_folder if (file.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(file) else: (title, ext) = file.split('.') name_list.append(title) return name_list
def image_names(path_to_folder, with_extension=False): '\n Reads raster files from multiple folders and returns their names\n\n :param path_to_folder: directory path\n :param with_extension: file extension\n :return: names of the raster files\n ' name_list = [] extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] if os.path.isdir(path_to_folder): files = os.listdir(path_to_folder) for f in files: if (f.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(f) else: (title, ext) = f.split('.') name_list.append(title) else: file = path_to_folder if (file.split('.')[(- 1)] in extension): if (with_extension is True): name_list.append(file) else: (title, ext) = file.split('.') name_list.append(title) return name_list<|docstring|>Reads raster files from multiple folders and returns their names :param path_to_folder: directory path :param with_extension: file extension :return: names of the raster files<|endoftext|>
4bdd5e6fe0959f12b5c3ebb7ce9aac7c5651f7c8df5bf42679255345c3a6b50c
def pad_image(image_file_name, new_size=(600, 600), save=False): '\n Pad Image with a given number of rows and columns\n\n :param image_file_name: image file\n :param new_size: now image size\n :param save: option to save output\n :return:\n ' image = Image.open(image_file_name) (rows, cols) = image.size if ((rows % 2) == 0): add_left = add_right = ((new_size[0] - rows) // 2) else: add_left = ((new_size[0] - rows) // 2) add_right = (((new_size[0] - rows) // 2) + 1) if ((cols % 2) == 0): add_top = add_bottom = ((new_size[1] - cols) // 2) else: add_top = ((new_size[1] - cols[1]) // 2) add_bottom = (((new_size[1] - cols[1]) // 2) + 1) left = (0 - add_left) top = (0 - add_top) right = (rows + add_right) bottom = (cols + add_bottom) image = image.crop((left, top, right, bottom)) if (save is True): image.save('padded_output.png') return image
Pad Image with a given number of rows and columns :param image_file_name: image file :param new_size: now image size :param save: option to save output :return:
imageprep/utils.py
pad_image
agcopenhaver/imageprep
0
python
def pad_image(image_file_name, new_size=(600, 600), save=False): '\n Pad Image with a given number of rows and columns\n\n :param image_file_name: image file\n :param new_size: now image size\n :param save: option to save output\n :return:\n ' image = Image.open(image_file_name) (rows, cols) = image.size if ((rows % 2) == 0): add_left = add_right = ((new_size[0] - rows) // 2) else: add_left = ((new_size[0] - rows) // 2) add_right = (((new_size[0] - rows) // 2) + 1) if ((cols % 2) == 0): add_top = add_bottom = ((new_size[1] - cols) // 2) else: add_top = ((new_size[1] - cols[1]) // 2) add_bottom = (((new_size[1] - cols[1]) // 2) + 1) left = (0 - add_left) top = (0 - add_top) right = (rows + add_right) bottom = (cols + add_bottom) image = image.crop((left, top, right, bottom)) if (save is True): image.save('padded_output.png') return image
def pad_image(image_file_name, new_size=(600, 600), save=False): '\n Pad Image with a given number of rows and columns\n\n :param image_file_name: image file\n :param new_size: now image size\n :param save: option to save output\n :return:\n ' image = Image.open(image_file_name) (rows, cols) = image.size if ((rows % 2) == 0): add_left = add_right = ((new_size[0] - rows) // 2) else: add_left = ((new_size[0] - rows) // 2) add_right = (((new_size[0] - rows) // 2) + 1) if ((cols % 2) == 0): add_top = add_bottom = ((new_size[1] - cols) // 2) else: add_top = ((new_size[1] - cols[1]) // 2) add_bottom = (((new_size[1] - cols[1]) // 2) + 1) left = (0 - add_left) top = (0 - add_top) right = (rows + add_right) bottom = (cols + add_bottom) image = image.crop((left, top, right, bottom)) if (save is True): image.save('padded_output.png') return image<|docstring|>Pad Image with a given number of rows and columns :param image_file_name: image file :param new_size: now image size :param save: option to save output :return:<|endoftext|>
53bce710f4ce0a2263d7f32693ba3dc30a7056817908cca2872b979b699558ac
def resize_images_in_one_folder(path, output_size=256): '\n Re-sizes images in one folder\n\n :param path: path to the folder\n :param output_size: size of the image output\n :return: re-sized images saved in the same folder\n ' dirs = os.listdir(path) for item in dirs: if os.path.isfile((path + item)): if item.endswith('.jpg'): im = Image.open((path + item)) (f, e) = os.path.splitext((path + item)) im_resize = im.resize((output_size, output_size), Image.ANTIALIAS) im_resize.save((f + '.jpg'), 'JPEG', quality=90)
Re-sizes images in one folder :param path: path to the folder :param output_size: size of the image output :return: re-sized images saved in the same folder
imageprep/utils.py
resize_images_in_one_folder
agcopenhaver/imageprep
0
python
def resize_images_in_one_folder(path, output_size=256): '\n Re-sizes images in one folder\n\n :param path: path to the folder\n :param output_size: size of the image output\n :return: re-sized images saved in the same folder\n ' dirs = os.listdir(path) for item in dirs: if os.path.isfile((path + item)): if item.endswith('.jpg'): im = Image.open((path + item)) (f, e) = os.path.splitext((path + item)) im_resize = im.resize((output_size, output_size), Image.ANTIALIAS) im_resize.save((f + '.jpg'), 'JPEG', quality=90)
def resize_images_in_one_folder(path, output_size=256): '\n Re-sizes images in one folder\n\n :param path: path to the folder\n :param output_size: size of the image output\n :return: re-sized images saved in the same folder\n ' dirs = os.listdir(path) for item in dirs: if os.path.isfile((path + item)): if item.endswith('.jpg'): im = Image.open((path + item)) (f, e) = os.path.splitext((path + item)) im_resize = im.resize((output_size, output_size), Image.ANTIALIAS) im_resize.save((f + '.jpg'), 'JPEG', quality=90)<|docstring|>Re-sizes images in one folder :param path: path to the folder :param output_size: size of the image output :return: re-sized images saved in the same folder<|endoftext|>
60b6f48098d1cc1c14560f2b637e1f45149feb58f17e202c232cea569110a795
def resize_images_from_multiple_folders(path, output_size=256): '\n Re-sizes images in multiple folders and saves images in each respective folder\n\n :param path: path to the folder containing all folders with images\n :param output_size:\n :return: re-sized images saved in their respective folder\n ' for folders in os.listdir(path): folder_list = os.path.join(path, folders) for item in os.listdir(folder_list): if item.endswith('.png'): file = os.path.join(folder_list, item) im = Image.open(file) imResize = im.resize((output_size, output_size), Image.ANTIALIAS) (f, e) = os.path.splitext(file) imResize.save((f + '.png'), 'JPEG', quality=90)
Re-sizes images in multiple folders and saves images in each respective folder :param path: path to the folder containing all folders with images :param output_size: :return: re-sized images saved in their respective folder
imageprep/utils.py
resize_images_from_multiple_folders
agcopenhaver/imageprep
0
python
def resize_images_from_multiple_folders(path, output_size=256): '\n Re-sizes images in multiple folders and saves images in each respective folder\n\n :param path: path to the folder containing all folders with images\n :param output_size:\n :return: re-sized images saved in their respective folder\n ' for folders in os.listdir(path): folder_list = os.path.join(path, folders) for item in os.listdir(folder_list): if item.endswith('.png'): file = os.path.join(folder_list, item) im = Image.open(file) imResize = im.resize((output_size, output_size), Image.ANTIALIAS) (f, e) = os.path.splitext(file) imResize.save((f + '.png'), 'JPEG', quality=90)
def resize_images_from_multiple_folders(path, output_size=256): '\n Re-sizes images in multiple folders and saves images in each respective folder\n\n :param path: path to the folder containing all folders with images\n :param output_size:\n :return: re-sized images saved in their respective folder\n ' for folders in os.listdir(path): folder_list = os.path.join(path, folders) for item in os.listdir(folder_list): if item.endswith('.png'): file = os.path.join(folder_list, item) im = Image.open(file) imResize = im.resize((output_size, output_size), Image.ANTIALIAS) (f, e) = os.path.splitext(file) imResize.save((f + '.png'), 'JPEG', quality=90)<|docstring|>Re-sizes images in multiple folders and saves images in each respective folder :param path: path to the folder containing all folders with images :param output_size: :return: re-sized images saved in their respective folder<|endoftext|>
22c0126cb5213e1dc95b2bbeb511c69c115ec063db3f0c098c1676568caaa5e0
def list_path_to_files(path_to_folders, save=False): '\n Saves the path to files (images or labels) in one text file\n\n :param path_to_folders: path to the folder containing images or labels\n :param output_file_name: name of output text file\n :param save: option to save list to a text file\n :return: a text file with a list of path to files\n ' extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] files = os.listdir(path_to_folders) counter = 0 cwd = os.getcwd() output_file_name = 'path.txt' txt = open(os.path.join(cwd, output_file_name), 'w') all_files = [] for f in files: if (f.split('.')[(- 1)] in extension): if (save is True): txt.write(((path_to_folders + f) + '\n')) counter = (counter + 1) else: list_path = (path_to_folders + f) all_files.append(list_path) return all_files
Saves the path to files (images or labels) in one text file :param path_to_folders: path to the folder containing images or labels :param output_file_name: name of output text file :param save: option to save list to a text file :return: a text file with a list of path to files
imageprep/utils.py
list_path_to_files
agcopenhaver/imageprep
0
python
def list_path_to_files(path_to_folders, save=False): '\n Saves the path to files (images or labels) in one text file\n\n :param path_to_folders: path to the folder containing images or labels\n :param output_file_name: name of output text file\n :param save: option to save list to a text file\n :return: a text file with a list of path to files\n ' extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] files = os.listdir(path_to_folders) counter = 0 cwd = os.getcwd() output_file_name = 'path.txt' txt = open(os.path.join(cwd, output_file_name), 'w') all_files = [] for f in files: if (f.split('.')[(- 1)] in extension): if (save is True): txt.write(((path_to_folders + f) + '\n')) counter = (counter + 1) else: list_path = (path_to_folders + f) all_files.append(list_path) return all_files
def list_path_to_files(path_to_folders, save=False): '\n Saves the path to files (images or labels) in one text file\n\n :param path_to_folders: path to the folder containing images or labels\n :param output_file_name: name of output text file\n :param save: option to save list to a text file\n :return: a text file with a list of path to files\n ' extension = ['jpg', 'png', 'tif', 'jpeg', 'tiff'] files = os.listdir(path_to_folders) counter = 0 cwd = os.getcwd() output_file_name = 'path.txt' txt = open(os.path.join(cwd, output_file_name), 'w') all_files = [] for f in files: if (f.split('.')[(- 1)] in extension): if (save is True): txt.write(((path_to_folders + f) + '\n')) counter = (counter + 1) else: list_path = (path_to_folders + f) all_files.append(list_path) return all_files<|docstring|>Saves the path to files (images or labels) in one text file :param path_to_folders: path to the folder containing images or labels :param output_file_name: name of output text file :param save: option to save list to a text file :return: a text file with a list of path to files<|endoftext|>
3aa623ba508cc2e666d7573857e7cedca2fb2e2fad28b1bd58d810c14e0fe3cb
def read_image(file, as_array=True): '\n Reads image and returns a numpy array\n\n :param file: image file namec\n :param as_array: option to read image to array.\n :return: numpy array\n ' img = Image.open(file) if (as_array is True): img = np.asarray(img) return img
Reads image and returns a numpy array :param file: image file namec :param as_array: option to read image to array. :return: numpy array
imageprep/utils.py
read_image
agcopenhaver/imageprep
0
python
def read_image(file, as_array=True): '\n Reads image and returns a numpy array\n\n :param file: image file namec\n :param as_array: option to read image to array.\n :return: numpy array\n ' img = Image.open(file) if (as_array is True): img = np.asarray(img) return img
def read_image(file, as_array=True): '\n Reads image and returns a numpy array\n\n :param file: image file namec\n :param as_array: option to read image to array.\n :return: numpy array\n ' img = Image.open(file) if (as_array is True): img = np.asarray(img) return img<|docstring|>Reads image and returns a numpy array :param file: image file namec :param as_array: option to read image to array. :return: numpy array<|endoftext|>
2ecdbd397e78e1902f5ce57fc0bf247a95610292f57743a37e931fbd3af1182c
def images_as_array(path, ext='.jpg'): '\n Reads multiple images in a folder and returns a stacked numpy array\n\n :param path: path to the folder containing the images\n :param ext: file extension. defaulted to jpg\n :return: stacked numpy array of images\n ' dir = os.listdir(path) img_arr_list = [] for item in dir: if os.path.isfile((path + item)): if item.endswith(ext): img_arr = read_image((path + item)) img_arr = np.expand_dims(img_arr, axis=0) img_arr_list.append(img_arr) img_stack = np.vstack(img_arr_list) return img_stack
Reads multiple images in a folder and returns a stacked numpy array :param path: path to the folder containing the images :param ext: file extension. defaulted to jpg :return: stacked numpy array of images
imageprep/utils.py
images_as_array
agcopenhaver/imageprep
0
python
def images_as_array(path, ext='.jpg'): '\n Reads multiple images in a folder and returns a stacked numpy array\n\n :param path: path to the folder containing the images\n :param ext: file extension. defaulted to jpg\n :return: stacked numpy array of images\n ' dir = os.listdir(path) img_arr_list = [] for item in dir: if os.path.isfile((path + item)): if item.endswith(ext): img_arr = read_image((path + item)) img_arr = np.expand_dims(img_arr, axis=0) img_arr_list.append(img_arr) img_stack = np.vstack(img_arr_list) return img_stack
def images_as_array(path, ext='.jpg'): '\n Reads multiple images in a folder and returns a stacked numpy array\n\n :param path: path to the folder containing the images\n :param ext: file extension. defaulted to jpg\n :return: stacked numpy array of images\n ' dir = os.listdir(path) img_arr_list = [] for item in dir: if os.path.isfile((path + item)): if item.endswith(ext): img_arr = read_image((path + item)) img_arr = np.expand_dims(img_arr, axis=0) img_arr_list.append(img_arr) img_stack = np.vstack(img_arr_list) return img_stack<|docstring|>Reads multiple images in a folder and returns a stacked numpy array :param path: path to the folder containing the images :param ext: file extension. defaulted to jpg :return: stacked numpy array of images<|endoftext|>
57c799fca1a1eed2af7564a9f797c3b26da2233d694581b27f86912c4b87611d
def read_labels(input_path, ext='.txt'): '\n Read multiple label text files\n\n :param input_path: path to the folder containing the labels text files\n :param ext: name of file extension. defaulted to jpg\n :return:\n ' folder = os.listdir(input_path) label_content = [] for item in folder: if os.path.isfile((input_path + item)): if item.endswith(ext): content = [] input_file = open(os.path.join((input_path + item))) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([item, content]) else: label_content.append([item, content[0]]) return label_content
Read multiple label text files :param input_path: path to the folder containing the labels text files :param ext: name of file extension. defaulted to jpg :return:
imageprep/utils.py
read_labels
agcopenhaver/imageprep
0
python
def read_labels(input_path, ext='.txt'): '\n Read multiple label text files\n\n :param input_path: path to the folder containing the labels text files\n :param ext: name of file extension. defaulted to jpg\n :return:\n ' folder = os.listdir(input_path) label_content = [] for item in folder: if os.path.isfile((input_path + item)): if item.endswith(ext): content = [] input_file = open(os.path.join((input_path + item))) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([item, content]) else: label_content.append([item, content[0]]) return label_content
def read_labels(input_path, ext='.txt'): '\n Read multiple label text files\n\n :param input_path: path to the folder containing the labels text files\n :param ext: name of file extension. defaulted to jpg\n :return:\n ' folder = os.listdir(input_path) label_content = [] for item in folder: if os.path.isfile((input_path + item)): if item.endswith(ext): content = [] input_file = open(os.path.join((input_path + item))) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([item, content]) else: label_content.append([item, content[0]]) return label_content<|docstring|>Read multiple label text files :param input_path: path to the folder containing the labels text files :param ext: name of file extension. defaulted to jpg :return:<|endoftext|>
6dddfaa5d385318984fe8830690956ba817b73fbec63c377447a9a171537b1f0
def read_label_as_dict(file, ext='.txt'): '\n Reads a label file in text format as a dictionary\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: A dictionary of the label\n ' label_content = {} if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content['name'] = file label_content['bbox'] = content else: label_content['name'] = file label_content['bbox'] = content[0] return label_content
Reads a label file in text format as a dictionary :param file: Name of the label file :param ext: Name of the file extension. Defaulted to text :return: A dictionary of the label
imageprep/utils.py
read_label_as_dict
agcopenhaver/imageprep
0
python
def read_label_as_dict(file, ext='.txt'): '\n Reads a label file in text format as a dictionary\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: A dictionary of the label\n ' label_content = {} if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content['name'] = file label_content['bbox'] = content else: label_content['name'] = file label_content['bbox'] = content[0] return label_content
def read_label_as_dict(file, ext='.txt'): '\n Reads a label file in text format as a dictionary\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: A dictionary of the label\n ' label_content = {} if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content['name'] = file label_content['bbox'] = content else: label_content['name'] = file label_content['bbox'] = content[0] return label_content<|docstring|>Reads a label file in text format as a dictionary :param file: Name of the label file :param ext: Name of the file extension. Defaulted to text :return: A dictionary of the label<|endoftext|>
135e993b413ca125c026f660e5c56dd16b3f81077dca0d0478c12f84aba2eb38
def read_label_as_list(file, ext='.txt'): '\n Reads a label file in text format as a list\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: Label as a list\n ' label_content = [] if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([file, content]) else: label_content.append([file, content[0]]) return label_content
Reads a label file in text format as a list :param file: Name of the label file :param ext: Name of the file extension. Defaulted to text :return: Label as a list
imageprep/utils.py
read_label_as_list
agcopenhaver/imageprep
0
python
def read_label_as_list(file, ext='.txt'): '\n Reads a label file in text format as a list\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: Label as a list\n ' label_content = [] if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([file, content]) else: label_content.append([file, content[0]]) return label_content
def read_label_as_list(file, ext='.txt'): '\n Reads a label file in text format as a list\n\n :param file: Name of the label file\n :param ext: Name of the file extension. Defaulted to text\n :return: Label as a list\n ' label_content = [] if os.path.isfile(file): if file.endswith(ext): content = [] input_file = open(file) for line in input_file.read().splitlines(): content.append([line]) if (len(content) != 1): label_content.append([file, content]) else: label_content.append([file, content[0]]) return label_content<|docstring|>Reads a label file in text format as a list :param file: Name of the label file :param ext: Name of the file extension. Defaulted to text :return: Label as a list<|endoftext|>
3f17c2f12615769b52c2b1727ed18b4f52957e88ebd41045df330f07c328d8fa
@pytest.mark.parametrize('file_path', ['tests/data/assets/labelbox_v1/tiled_image_export.json']) def test_image(file_path): 'Tests against both Simple and non-Simple tiled image export data. \n index-0 is non-Simple, index-1 is Simple\n ' with open(file_path, 'r') as f: payload = json.load(f) collection = LBV1Converter.deserialize(payload) collection_as_list = collection.as_list() assert (len(collection_as_list) == 2) non_simple_annotations = collection_as_list[0].annotations assert (len(non_simple_annotations) == 6) expected_shapes = [Polygon, Point, Point, Point, Line, Rectangle] for idx in range(len(non_simple_annotations)): assert isinstance(non_simple_annotations[idx].value, expected_shapes[idx]) assert (non_simple_annotations[(- 1)].value.start.x == (- 99.36567524971268)) assert (non_simple_annotations[(- 1)].value.start.y == 19.34717117508651) assert (non_simple_annotations[(- 1)].value.end.x == (- 99.3649886680726)) assert (non_simple_annotations[(- 1)].value.end.y == 19.41999425190506) simple_annotations = collection_as_list[1].annotations assert (len(simple_annotations) == 8) expected_shapes = [Polygon, Point, Point, Point, Point, Point, Line, Rectangle] for idx in range(len(simple_annotations)): assert isinstance(simple_annotations[idx].value, expected_shapes[idx])
Tests against both Simple and non-Simple tiled image export data. index-0 is non-Simple, index-1 is Simple
tests/data/serialization/labelbox_v1/test_tiled_image.py
test_image
Cyniikal/labelbox-python
0
python
@pytest.mark.parametrize('file_path', ['tests/data/assets/labelbox_v1/tiled_image_export.json']) def test_image(file_path): 'Tests against both Simple and non-Simple tiled image export data. \n index-0 is non-Simple, index-1 is Simple\n ' with open(file_path, 'r') as f: payload = json.load(f) collection = LBV1Converter.deserialize(payload) collection_as_list = collection.as_list() assert (len(collection_as_list) == 2) non_simple_annotations = collection_as_list[0].annotations assert (len(non_simple_annotations) == 6) expected_shapes = [Polygon, Point, Point, Point, Line, Rectangle] for idx in range(len(non_simple_annotations)): assert isinstance(non_simple_annotations[idx].value, expected_shapes[idx]) assert (non_simple_annotations[(- 1)].value.start.x == (- 99.36567524971268)) assert (non_simple_annotations[(- 1)].value.start.y == 19.34717117508651) assert (non_simple_annotations[(- 1)].value.end.x == (- 99.3649886680726)) assert (non_simple_annotations[(- 1)].value.end.y == 19.41999425190506) simple_annotations = collection_as_list[1].annotations assert (len(simple_annotations) == 8) expected_shapes = [Polygon, Point, Point, Point, Point, Point, Line, Rectangle] for idx in range(len(simple_annotations)): assert isinstance(simple_annotations[idx].value, expected_shapes[idx])
@pytest.mark.parametrize('file_path', ['tests/data/assets/labelbox_v1/tiled_image_export.json']) def test_image(file_path): 'Tests against both Simple and non-Simple tiled image export data. \n index-0 is non-Simple, index-1 is Simple\n ' with open(file_path, 'r') as f: payload = json.load(f) collection = LBV1Converter.deserialize(payload) collection_as_list = collection.as_list() assert (len(collection_as_list) == 2) non_simple_annotations = collection_as_list[0].annotations assert (len(non_simple_annotations) == 6) expected_shapes = [Polygon, Point, Point, Point, Line, Rectangle] for idx in range(len(non_simple_annotations)): assert isinstance(non_simple_annotations[idx].value, expected_shapes[idx]) assert (non_simple_annotations[(- 1)].value.start.x == (- 99.36567524971268)) assert (non_simple_annotations[(- 1)].value.start.y == 19.34717117508651) assert (non_simple_annotations[(- 1)].value.end.x == (- 99.3649886680726)) assert (non_simple_annotations[(- 1)].value.end.y == 19.41999425190506) simple_annotations = collection_as_list[1].annotations assert (len(simple_annotations) == 8) expected_shapes = [Polygon, Point, Point, Point, Point, Point, Line, Rectangle] for idx in range(len(simple_annotations)): assert isinstance(simple_annotations[idx].value, expected_shapes[idx])<|docstring|>Tests against both Simple and non-Simple tiled image export data. index-0 is non-Simple, index-1 is Simple<|endoftext|>
c9433bcd51e2ace1cf5e9d68f6f2f273f09861552ccd79ce77977d22bc5b50a3
def read_tolerance(folder): '\n read the 7nth thing from a line, replace D with E (e.g. in "0.1D-1"), transform to a float\n ' with open(os.path.join(folder, 'input.cnd'), 'r') as file: line = file.readline() line_list = line.split() tol_str = line_list[6] tol_str_refined = tol_str.replace('D', 'E') tol = float(tol_str_refined) return tol
read the 7nth thing from a line, replace D with E (e.g. in "0.1D-1"), transform to a float
FBEM/postproc.py
read_tolerance
icemtel/stokes
0
python
def read_tolerance(folder): '\n \n ' with open(os.path.join(folder, 'input.cnd'), 'r') as file: line = file.readline() line_list = line.split() tol_str = line_list[6] tol_str_refined = tol_str.replace('D', 'E') tol = float(tol_str_refined) return tol
def read_tolerance(folder): '\n \n ' with open(os.path.join(folder, 'input.cnd'), 'r') as file: line = file.readline() line_list = line.split() tol_str = line_list[6] tol_str_refined = tol_str.replace('D', 'E') tol = float(tol_str_refined) return tol<|docstring|>read the 7nth thing from a line, replace D with E (e.g. in "0.1D-1"), transform to a float<|endoftext|>
f0db7082198680076377669896145679c105edac42a469ff71da5f662abd03e1
def read_viscosity(folder, infile='input.dat'): '\n Skip two lines, read the third thing from the line, transform to a float.\n ' with open(os.path.join(folder, infile), 'r') as file: file.readline() file.readline() line = file.readline() line_list = line.split() visc_str = line_list[2] return float(visc_str)
Skip two lines, read the third thing from the line, transform to a float.
FBEM/postproc.py
read_viscosity
icemtel/stokes
0
python
def read_viscosity(folder, infile='input.dat'): '\n \n ' with open(os.path.join(folder, infile), 'r') as file: file.readline() file.readline() line = file.readline() line_list = line.split() visc_str = line_list[2] return float(visc_str)
def read_viscosity(folder, infile='input.dat'): '\n \n ' with open(os.path.join(folder, infile), 'r') as file: file.readline() file.readline() line = file.readline() line_list = line.split() visc_str = line_list[2] return float(visc_str)<|docstring|>Skip two lines, read the third thing from the line, transform to a float.<|endoftext|>
d75249316894ca3d4acea69cf686597bd42252f9ac9aefc80c5d34253df49139
def read_all_triangulation_input(filename): '\n read points and triangulation and return it.\n Points numbers in triangulation start from 0.\n ' f = open(filename, 'r') points = [] trias = [] pointFlag = False triaFlag = False for line in f: if pointFlag: try: pos_line = line.split()[1:] points.append([float(h) for h in pos_line]) except: pointFlag = False triaFlag = True elif triaFlag: tria_line = line.split()[1:4] trias.append([(int(val) - 1) for val in tria_line]) elif ('$ Nodes' in line): pointFlag = True f.close() return (np.array(points), np.array(trias, dtype=np.int))
read points and triangulation and return it. Points numbers in triangulation start from 0.
FBEM/postproc.py
read_all_triangulation_input
icemtel/stokes
0
python
def read_all_triangulation_input(filename): '\n read points and triangulation and return it.\n Points numbers in triangulation start from 0.\n ' f = open(filename, 'r') points = [] trias = [] pointFlag = False triaFlag = False for line in f: if pointFlag: try: pos_line = line.split()[1:] points.append([float(h) for h in pos_line]) except: pointFlag = False triaFlag = True elif triaFlag: tria_line = line.split()[1:4] trias.append([(int(val) - 1) for val in tria_line]) elif ('$ Nodes' in line): pointFlag = True f.close() return (np.array(points), np.array(trias, dtype=np.int))
def read_all_triangulation_input(filename): '\n read points and triangulation and return it.\n Points numbers in triangulation start from 0.\n ' f = open(filename, 'r') points = [] trias = [] pointFlag = False triaFlag = False for line in f: if pointFlag: try: pos_line = line.split()[1:] points.append([float(h) for h in pos_line]) except: pointFlag = False triaFlag = True elif triaFlag: tria_line = line.split()[1:4] trias.append([(int(val) - 1) for val in tria_line]) elif ('$ Nodes' in line): pointFlag = True f.close() return (np.array(points), np.array(trias, dtype=np.int))<|docstring|>read points and triangulation and return it. Points numbers in triangulation start from 0.<|endoftext|>
84ec1a2d0ce7b933183c669d15b30020b6c946a0e5e5db4cc6053170f3d258c2
def read_triangulation_by_names_input(object_names, folder, input_name='input.dat'): "\n :return: list of coords, list of trias, corresponding to each of the input objects\n TODO: should work faster if don't iterate over the names of objects which have their data already loaded\n TODO: And load all points of object after encountering the first one?\n " ranges = load_ranges(folder) inputfile = os.path.join(folder, input_name) result = {} for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) result[name] = (posi, tria) triaFlag = False posiFlag = False with open(inputfile, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): result[name][0][((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) break if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): result[name][1][((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) break coords_list = [result[name][0] for name in object_names] trias_list = [result[name][1] for name in object_names] return (coords_list, trias_list)
:return: list of coords, list of trias, corresponding to each of the input objects TODO: should work faster if don't iterate over the names of objects which have their data already loaded TODO: And load all points of object after encountering the first one?
FBEM/postproc.py
read_triangulation_by_names_input
icemtel/stokes
0
python
def read_triangulation_by_names_input(object_names, folder, input_name='input.dat'): "\n :return: list of coords, list of trias, corresponding to each of the input objects\n TODO: should work faster if don't iterate over the names of objects which have their data already loaded\n TODO: And load all points of object after encountering the first one?\n " ranges = load_ranges(folder) inputfile = os.path.join(folder, input_name) result = {} for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) result[name] = (posi, tria) triaFlag = False posiFlag = False with open(inputfile, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): result[name][0][((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) break if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): result[name][1][((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) break coords_list = [result[name][0] for name in object_names] trias_list = [result[name][1] for name in object_names] return (coords_list, trias_list)
def read_triangulation_by_names_input(object_names, folder, input_name='input.dat'): "\n :return: list of coords, list of trias, corresponding to each of the input objects\n TODO: should work faster if don't iterate over the names of objects which have their data already loaded\n TODO: And load all points of object after encountering the first one?\n " ranges = load_ranges(folder) inputfile = os.path.join(folder, input_name) result = {} for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) result[name] = (posi, tria) triaFlag = False posiFlag = False with open(inputfile, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): result[name][0][((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) break if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue for name in object_names: (posiRange, triaRange) = (ranges.coords[name], ranges.trias[name]) if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): result[name][1][((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) break coords_list = [result[name][0] for name in object_names] trias_list = [result[name][1] for name in object_names] return (coords_list, trias_list)<|docstring|>:return: list of coords, list of trias, corresponding to each of the input objects TODO: should work faster if don't iterate over the names of objects which have their data already loaded TODO: And load all points of object after encountering the first one?<|endoftext|>
0220385264109b571b8bc6d490cccdfac04a34aba2bd283496226c60451749c3
def read_triangulation_input(filename, posiRange, triaRange): "\n filename is the name of the input file. (usually 'input.dat')\n posiRange: indices of coordinates.\n triaRange: indices of triangulation.\n return two numpy arrays, containing the coordinates and the triangulation\n with respect to those coordinates.\n " tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) triaFlag = False posiFlag = False with open(filename, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): posi[((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): tria[((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) return (posi, tria)
filename is the name of the input file. (usually 'input.dat') posiRange: indices of coordinates. triaRange: indices of triangulation. return two numpy arrays, containing the coordinates and the triangulation with respect to those coordinates.
FBEM/postproc.py
read_triangulation_input
icemtel/stokes
0
python
def read_triangulation_input(filename, posiRange, triaRange): "\n filename is the name of the input file. (usually 'input.dat')\n posiRange: indices of coordinates.\n triaRange: indices of triangulation.\n return two numpy arrays, containing the coordinates and the triangulation\n with respect to those coordinates.\n " tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) triaFlag = False posiFlag = False with open(filename, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): posi[((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): tria[((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) return (posi, tria)
def read_triangulation_input(filename, posiRange, triaRange): "\n filename is the name of the input file. (usually 'input.dat')\n posiRange: indices of coordinates.\n triaRange: indices of triangulation.\n return two numpy arrays, containing the coordinates and the triangulation\n with respect to those coordinates.\n " tria = np.zeros((((triaRange[1] - triaRange[0]) + 1), 3), dtype=np.int) posi = np.zeros((((posiRange[1] - posiRange[0]) + 1), 3)) triaFlag = False posiFlag = False with open(filename, 'r') as f: for line in f: if ('$ Nodes (Nod' in line): posiFlag = True if ('$ Elements and Boundary Co' in line): triaFlag = True posiFlag = False if posiFlag: try: (a, b, c, d) = line.split() except: continue if ((int(a) > posiRange[0]) and (int(a) <= (posiRange[1] + 1))): posi[((int(a) - 1) - posiRange[0])] = (float(b), float(c), float(d)) if triaFlag: try: (a, b, c, d, e, f, g, h, i, j) = line.split() except: continue if ((int(a) > triaRange[0]) and (int(a) <= (triaRange[1] + 1))): tria[((int(a) - 1) - triaRange[0])] = (((int(b) - 1) - posiRange[0]), ((int(c) - 1) - posiRange[0]), ((int(d) - 1) - posiRange[0])) return (posi, tria)<|docstring|>filename is the name of the input file. (usually 'input.dat') posiRange: indices of coordinates. triaRange: indices of triangulation. return two numpy arrays, containing the coordinates and the triangulation with respect to those coordinates.<|endoftext|>
4135276e10d4e4619d08c2cf59ecfc602df4ef56555835021b03f0de8262c404
def triangleArea(v1, v2, v3): "\n given three position vectors, calculate the area of a triangle, using\n Heron's formula.\n " [v1, v2, v3] = [np.array(v) for v in [v1, v2, v3]] [a, b, c] = [lin.norm(d) for d in [(v1 - v2), (v2 - v3), (v3 - v1)]] s = (((a + b) + c) / 2.0) A = np.sqrt((((s * (s - a)) * (s - b)) * (s - c))) return A
given three position vectors, calculate the area of a triangle, using Heron's formula.
FBEM/postproc.py
triangleArea
icemtel/stokes
0
python
def triangleArea(v1, v2, v3): "\n given three position vectors, calculate the area of a triangle, using\n Heron's formula.\n " [v1, v2, v3] = [np.array(v) for v in [v1, v2, v3]] [a, b, c] = [lin.norm(d) for d in [(v1 - v2), (v2 - v3), (v3 - v1)]] s = (((a + b) + c) / 2.0) A = np.sqrt((((s * (s - a)) * (s - b)) * (s - c))) return A
def triangleArea(v1, v2, v3): "\n given three position vectors, calculate the area of a triangle, using\n Heron's formula.\n " [v1, v2, v3] = [np.array(v) for v in [v1, v2, v3]] [a, b, c] = [lin.norm(d) for d in [(v1 - v2), (v2 - v3), (v3 - v1)]] s = (((a + b) + c) / 2.0) A = np.sqrt((((s * (s - a)) * (s - b)) * (s - c))) return A<|docstring|>given three position vectors, calculate the area of a triangle, using Heron's formula.<|endoftext|>
72467692dc3f7f72a7a862d449c33c19199355abe8aff7b8605bbb162c1b32bd
def read_triangle_areas_input(filename, posiRange, triaRange): "\n filename is the path to 'input.dat'\n " (posi, tria) = read_triangulation_input(filename, posiRange, triaRange) areas = np.zeros(((triaRange[1] - triaRange[0]) + 1)) for (i, t) in enumerate(tria): areas[i] = triangleArea(posi[t[0]], posi[t[1]], posi[t[2]]) return areas
filename is the path to 'input.dat'
FBEM/postproc.py
read_triangle_areas_input
icemtel/stokes
0
python
def read_triangle_areas_input(filename, posiRange, triaRange): "\n \n " (posi, tria) = read_triangulation_input(filename, posiRange, triaRange) areas = np.zeros(((triaRange[1] - triaRange[0]) + 1)) for (i, t) in enumerate(tria): areas[i] = triangleArea(posi[t[0]], posi[t[1]], posi[t[2]]) return areas
def read_triangle_areas_input(filename, posiRange, triaRange): "\n \n " (posi, tria) = read_triangulation_input(filename, posiRange, triaRange) areas = np.zeros(((triaRange[1] - triaRange[0]) + 1)) for (i, t) in enumerate(tria): areas[i] = triangleArea(posi[t[0]], posi[t[1]], posi[t[2]]) return areas<|docstring|>filename is the path to 'input.dat'<|endoftext|>
78bfe7de3df0ccbe60a3f64997282aaa9fdcd3140eaa9554464314abd32661b7
def _exctract_data(posiRange, triaRange, infile='input.dat', outfile='output.dat'): '\n given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n ' areas = read_triangle_areas_input(infile, posiRange, triaRange) num = ((triaRange[1] - triaRange[0]) + 1) velocities = np.zeros((num, 3)) forces = np.zeros((num, 3)) positions = np.zeros((num, 3)) with open(outfile, 'r') as file: for line in file: try: (a, b, c, d, e, f, g, h, i, j) = line.split() index = int(a) if ((index > triaRange[0]) and (index <= (triaRange[1] + 1))): velocities[((index - triaRange[0]) - 1)] = (float(b), float(c), float(d)) forces[((index - triaRange[0]) - 1)] = (float(e), float(f), float(g)) positions[((index - triaRange[0]) - 1)] = (float(h), float(i), float(j)) if (index > triaRange[1]): break except: continue for k in range(num): forces[k] *= areas[k] visc = read_viscosity('.', infile) return ResultsData(forces, velocities, positions, visc, areas)
given a number range, that correspond to an object, read velocities, forces and positions and areas as np.arrays.
FBEM/postproc.py
_exctract_data
icemtel/stokes
0
python
def _exctract_data(posiRange, triaRange, infile='input.dat', outfile='output.dat'): '\n given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n ' areas = read_triangle_areas_input(infile, posiRange, triaRange) num = ((triaRange[1] - triaRange[0]) + 1) velocities = np.zeros((num, 3)) forces = np.zeros((num, 3)) positions = np.zeros((num, 3)) with open(outfile, 'r') as file: for line in file: try: (a, b, c, d, e, f, g, h, i, j) = line.split() index = int(a) if ((index > triaRange[0]) and (index <= (triaRange[1] + 1))): velocities[((index - triaRange[0]) - 1)] = (float(b), float(c), float(d)) forces[((index - triaRange[0]) - 1)] = (float(e), float(f), float(g)) positions[((index - triaRange[0]) - 1)] = (float(h), float(i), float(j)) if (index > triaRange[1]): break except: continue for k in range(num): forces[k] *= areas[k] visc = read_viscosity('.', infile) return ResultsData(forces, velocities, positions, visc, areas)
def _exctract_data(posiRange, triaRange, infile='input.dat', outfile='output.dat'): '\n given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n ' areas = read_triangle_areas_input(infile, posiRange, triaRange) num = ((triaRange[1] - triaRange[0]) + 1) velocities = np.zeros((num, 3)) forces = np.zeros((num, 3)) positions = np.zeros((num, 3)) with open(outfile, 'r') as file: for line in file: try: (a, b, c, d, e, f, g, h, i, j) = line.split() index = int(a) if ((index > triaRange[0]) and (index <= (triaRange[1] + 1))): velocities[((index - triaRange[0]) - 1)] = (float(b), float(c), float(d)) forces[((index - triaRange[0]) - 1)] = (float(e), float(f), float(g)) positions[((index - triaRange[0]) - 1)] = (float(h), float(i), float(j)) if (index > triaRange[1]): break except: continue for k in range(num): forces[k] *= areas[k] visc = read_viscosity('.', infile) return ResultsData(forces, velocities, positions, visc, areas)<|docstring|>given a number range, that correspond to an object, read velocities, forces and positions and areas as np.arrays.<|endoftext|>
1a1449fe696392eb18f335c58ad5805cb93b3a8b7707d6d2c867926a9f948701
def load_ranges(folder): '\n Create Ranges class from a ranges.csv file.\n ' ranges = Ranges() objects_df = pd.read_csv(os.path.join(folder, 'ranges.csv')) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges
Create Ranges class from a ranges.csv file.
FBEM/postproc.py
load_ranges
icemtel/stokes
0
python
def load_ranges(folder): '\n \n ' ranges = Ranges() objects_df = pd.read_csv(os.path.join(folder, 'ranges.csv')) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges
def load_ranges(folder): '\n \n ' ranges = Ranges() objects_df = pd.read_csv(os.path.join(folder, 'ranges.csv')) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges<|docstring|>Create Ranges class from a ranges.csv file.<|endoftext|>
21ce085be77e45c715a56c698653f0e9df3a758636162b66e81fc617dea96831
def extract_data_by_names(object_name_list, folder='.', infile='input.dat', outfile='output.dat'): '\n Returns list of ResultsData, corresponding to each object in obejct_name_list\n ' ranges = load_ranges(folder) res_list = [] for object_name in object_name_list: res = _exctract_data(ranges.coords[object_name], ranges.trias[object_name], infile=os.path.join(folder, infile), outfile=os.path.join(folder, outfile)) res_list.append(res) return res_list
Returns list of ResultsData, corresponding to each object in obejct_name_list
FBEM/postproc.py
extract_data_by_names
icemtel/stokes
0
python
def extract_data_by_names(object_name_list, folder='.', infile='input.dat', outfile='output.dat'): '\n \n ' ranges = load_ranges(folder) res_list = [] for object_name in object_name_list: res = _exctract_data(ranges.coords[object_name], ranges.trias[object_name], infile=os.path.join(folder, infile), outfile=os.path.join(folder, outfile)) res_list.append(res) return res_list
def extract_data_by_names(object_name_list, folder='.', infile='input.dat', outfile='output.dat'): '\n \n ' ranges = load_ranges(folder) res_list = [] for object_name in object_name_list: res = _exctract_data(ranges.coords[object_name], ranges.trias[object_name], infile=os.path.join(folder, infile), outfile=os.path.join(folder, outfile)) res_list.append(res) return res_list<|docstring|>Returns list of ResultsData, corresponding to each object in obejct_name_list<|endoftext|>
59f4809449fdd0eacbcfa2304ea3bd58124d793f9668310746443b0f86f31a04
def get_df_from_csv_str(csv_str): '\n Convert str object to df. Assuming that str object has a structure of csv file.\n ' import io csv_file = io.StringIO(csv_str) df = pd.read_csv(csv_file) return df
Convert str object to df. Assuming that str object has a structure of csv file.
FBEM/postproc.py
get_df_from_csv_str
icemtel/stokes
0
python
def get_df_from_csv_str(csv_str): '\n \n ' import io csv_file = io.StringIO(csv_str) df = pd.read_csv(csv_file) return df
def get_df_from_csv_str(csv_str): '\n \n ' import io csv_file = io.StringIO(csv_str) df = pd.read_csv(csv_file) return df<|docstring|>Convert str object to df. Assuming that str object has a structure of csv file.<|endoftext|>
46ffcb4ebe7bcc88b0b7682ec26d755aefbfad5f00bb5c44298c5bd49e7dc77b
def load_ranges_hdf5(file_handle, group='.'): '\n Helper function to load ranges/remembery in one line\n ' g = file_handle[path_to_string(group)] ranges_csv_str = g['ranges'][()] ranges = Ranges() objects_df = get_df_from_csv_str(ranges_csv_str) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges
Helper function to load ranges/remembery in one line
FBEM/postproc.py
load_ranges_hdf5
icemtel/stokes
0
python
def load_ranges_hdf5(file_handle, group='.'): '\n \n ' g = file_handle[path_to_string(group)] ranges_csv_str = g['ranges'][()] ranges = Ranges() objects_df = get_df_from_csv_str(ranges_csv_str) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges
def load_ranges_hdf5(file_handle, group='.'): '\n \n ' g = file_handle[path_to_string(group)] ranges_csv_str = g['ranges'][()] ranges = Ranges() objects_df = get_df_from_csv_str(ranges_csv_str) for (idx, row) in objects_df.iterrows(): (name, coords_start, coords_end, trias_start, trias_end) = row ranges.names.append(name) ranges.coords[name] = (coords_start, coords_end) ranges.trias[name] = (trias_start, trias_end) return ranges<|docstring|>Helper function to load ranges/remembery in one line<|endoftext|>
3bca48be2ffa07bd79c4db4fc3eabac8e0dd584e608f59a879dee7e9b6402542
def _extract_data_hdf5(triaRange, file_handle, group='.'): '\n - given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n - load from hdf5 file.\n ' g = file_handle[path_to_string(group)] (t0, t1) = triaRange forces = g['forces'][t0:(t1 + 1)] velocities = g['velocities'][t0:(t1 + 1)] positions = g['coords'][t0:(t1 + 1)] visc = read_viscosity_hdf5(g) node_positions = g['node_coords'][:] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) return ResultsData(forces, velocities, positions, visc, areas)
- given a number range, that correspond to an object, read velocities, forces and positions and areas as np.arrays. - load from hdf5 file.
FBEM/postproc.py
_extract_data_hdf5
icemtel/stokes
0
python
def _extract_data_hdf5(triaRange, file_handle, group='.'): '\n - given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n - load from hdf5 file.\n ' g = file_handle[path_to_string(group)] (t0, t1) = triaRange forces = g['forces'][t0:(t1 + 1)] velocities = g['velocities'][t0:(t1 + 1)] positions = g['coords'][t0:(t1 + 1)] visc = read_viscosity_hdf5(g) node_positions = g['node_coords'][:] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) return ResultsData(forces, velocities, positions, visc, areas)
def _extract_data_hdf5(triaRange, file_handle, group='.'): '\n - given a number range, that correspond to an object,\n read velocities, forces and positions and areas as np.arrays.\n - load from hdf5 file.\n ' g = file_handle[path_to_string(group)] (t0, t1) = triaRange forces = g['forces'][t0:(t1 + 1)] velocities = g['velocities'][t0:(t1 + 1)] positions = g['coords'][t0:(t1 + 1)] visc = read_viscosity_hdf5(g) node_positions = g['node_coords'][:] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) return ResultsData(forces, velocities, positions, visc, areas)<|docstring|>- given a number range, that correspond to an object, read velocities, forces and positions and areas as np.arrays. - load from hdf5 file.<|endoftext|>
28c1c51078c06b061575248c793e7ca4f84341f33b54b8e170e55806513770dc
def extract_data_by_names_hdf5(names, file, group='.'): '\n Returns list of ResultsData, corresponding to each object in obejct_name_list\n ' ranges = load_ranges_hdf5(file, group) g = file[path_to_string(group)] forces_full = g['forces'][()] velocities_full = g['velocities'][()] positions_full = g['coords'][()] node_positions = g['node_coords'][:] visc = read_viscosity_hdf5(g) res_list = [] for name in names: (t0, t1) = ranges.trias[name] forces = forces_full[t0:(t1 + 1)] velocities = velocities_full[t0:(t1 + 1)] positions = positions_full[t0:(t1 + 1)] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) res = ResultsData(forces, velocities, positions, visc, areas) res_list.append(res) return res_list
Returns list of ResultsData, corresponding to each object in obejct_name_list
FBEM/postproc.py
extract_data_by_names_hdf5
icemtel/stokes
0
python
def extract_data_by_names_hdf5(names, file, group='.'): '\n \n ' ranges = load_ranges_hdf5(file, group) g = file[path_to_string(group)] forces_full = g['forces'][()] velocities_full = g['velocities'][()] positions_full = g['coords'][()] node_positions = g['node_coords'][:] visc = read_viscosity_hdf5(g) res_list = [] for name in names: (t0, t1) = ranges.trias[name] forces = forces_full[t0:(t1 + 1)] velocities = velocities_full[t0:(t1 + 1)] positions = positions_full[t0:(t1 + 1)] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) res = ResultsData(forces, velocities, positions, visc, areas) res_list.append(res) return res_list
def extract_data_by_names_hdf5(names, file, group='.'): '\n \n ' ranges = load_ranges_hdf5(file, group) g = file[path_to_string(group)] forces_full = g['forces'][()] velocities_full = g['velocities'][()] positions_full = g['coords'][()] node_positions = g['node_coords'][:] visc = read_viscosity_hdf5(g) res_list = [] for name in names: (t0, t1) = ranges.trias[name] forces = forces_full[t0:(t1 + 1)] velocities = velocities_full[t0:(t1 + 1)] positions = positions_full[t0:(t1 + 1)] trias = g['trias'][t0:(t1 + 1)] areas = np.array([triangleArea(node_positions[t[0]], node_positions[t[1]], node_positions[t[2]]) for t in trias]) res = ResultsData(forces, velocities, positions, visc, areas) res_list.append(res) return res_list<|docstring|>Returns list of ResultsData, corresponding to each object in obejct_name_list<|endoftext|>
470988e0d9d37d4e9408343d68aee774738fa468bf2ef807db0d6a9a5b7af37d
def get_names(self): '\n :return: object names without "all"\n ' names = self.names.copy() names.remove('all') return names
:return: object names without "all"
FBEM/postproc.py
get_names
icemtel/stokes
0
python
def get_names(self): '\n \n ' names = self.names.copy() names.remove('all') return names
def get_names(self): '\n \n ' names = self.names.copy() names.remove('all') return names<|docstring|>:return: object names without "all"<|endoftext|>
3063ab9b0e5f141c2464bacd639fd513ea8db83eb56f74339a8492f6c23ad2d4
def __init__(self, path, group='.'): '\n :param path: either folder or hdf5 filename\n :param group: group in hdf5 file; ignored if `path` is a folder\n ' self.path = Path(path) self.group = group if (not self.path.exists()): raise ValueError("Path doesn't point to a hdf5 file or directory") if self.path.is_dir(): self.is_hdf5 = False elif (self.path.suffix in ['.h5', '.hdf5']): self.is_hdf5 = True else: raise NotImplementedError
:param path: either folder or hdf5 filename :param group: group in hdf5 file; ignored if `path` is a folder
FBEM/postproc.py
__init__
icemtel/stokes
0
python
def __init__(self, path, group='.'): '\n :param path: either folder or hdf5 filename\n :param group: group in hdf5 file; ignored if `path` is a folder\n ' self.path = Path(path) self.group = group if (not self.path.exists()): raise ValueError("Path doesn't point to a hdf5 file or directory") if self.path.is_dir(): self.is_hdf5 = False elif (self.path.suffix in ['.h5', '.hdf5']): self.is_hdf5 = True else: raise NotImplementedError
def __init__(self, path, group='.'): '\n :param path: either folder or hdf5 filename\n :param group: group in hdf5 file; ignored if `path` is a folder\n ' self.path = Path(path) self.group = group if (not self.path.exists()): raise ValueError("Path doesn't point to a hdf5 file or directory") if self.path.is_dir(): self.is_hdf5 = False elif (self.path.suffix in ['.h5', '.hdf5']): self.is_hdf5 = True else: raise NotImplementedError<|docstring|>:param path: either folder or hdf5 filename :param group: group in hdf5 file; ignored if `path` is a folder<|endoftext|>
198f1d620da33217d41e84b9166c3cd635efefd518dea6d5c190d42c69992322
def read_triangulation_list(self, names): '\n :return: Tuple: list of coords and list of triangles\n ' if self.is_hdf5: with h5py.File(self.path, 'r') as file: return read_triangulation_by_names_hdf5(names, file, self.group) else: return read_triangulation_by_names_input(names, self.path)
:return: Tuple: list of coords and list of triangles
FBEM/postproc.py
read_triangulation_list
icemtel/stokes
0
python
def read_triangulation_list(self, names): '\n \n ' if self.is_hdf5: with h5py.File(self.path, 'r') as file: return read_triangulation_by_names_hdf5(names, file, self.group) else: return read_triangulation_by_names_input(names, self.path)
def read_triangulation_list(self, names): '\n \n ' if self.is_hdf5: with h5py.File(self.path, 'r') as file: return read_triangulation_by_names_hdf5(names, file, self.group) else: return read_triangulation_by_names_input(names, self.path)<|docstring|>:return: Tuple: list of coords and list of triangles<|endoftext|>