body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
8c7af49b661c9749b1c54724003d17452c6de0467cd7a8755c004af33b07c246
|
def check(self):
'Check if the action can be performed.\n\n This may check arguments/types/... and normally raises an Error if\n something is wrong.\n '
|
Check if the action can be performed.
This may check arguments/types/... and normally raises an Error if
something is wrong.
|
nicos/devices/generic/sequence.py
|
check
|
mlz-ictrl/nicos
| 12 |
python
|
def check(self):
'Check if the action can be performed.\n\n This may check arguments/types/... and normally raises an Error if\n something is wrong.\n '
|
def check(self):
'Check if the action can be performed.\n\n This may check arguments/types/... and normally raises an Error if\n something is wrong.\n '<|docstring|>Check if the action can be performed.
This may check arguments/types/... and normally raises an Error if
something is wrong.<|endoftext|>
|
a8ad703e3d801ec65e596b40c6336a817a83317c29996003d4d4d553e3934899
|
def run(self):
'Initate an action, define in derived classes.'
|
Initate an action, define in derived classes.
|
nicos/devices/generic/sequence.py
|
run
|
mlz-ictrl/nicos
| 12 |
python
|
def run(self):
|
def run(self):
<|docstring|>Initate an action, define in derived classes.<|endoftext|>
|
b53527410b98cb8965755cb9d69e9afd95ddd09426cc4f98f42802733390a2a2
|
def retry(self, retries):
'Retry the start of an already failed action.'
if (retries <= 0):
return
while (retries > 1):
try:
self.run()
return
except Exception:
pass
retries -= 1
self.run()
|
Retry the start of an already failed action.
|
nicos/devices/generic/sequence.py
|
retry
|
mlz-ictrl/nicos
| 12 |
python
|
def retry(self, retries):
if (retries <= 0):
return
while (retries > 1):
try:
self.run()
return
except Exception:
pass
retries -= 1
self.run()
|
def retry(self, retries):
if (retries <= 0):
return
while (retries > 1):
try:
self.run()
return
except Exception:
pass
retries -= 1
self.run()<|docstring|>Retry the start of an already failed action.<|endoftext|>
|
a7b26fe8c4efe5e12355397500cb57b67b801d5eb891cc83bfd40b9a27fe3cff
|
def isCompleted(self):
'Check for completion of the initiated action.\n\n Should return True if completed or False if not yet completed,\n allowing a polling mode.\n '
return True
|
Check for completion of the initiated action.
Should return True if completed or False if not yet completed,
allowing a polling mode.
|
nicos/devices/generic/sequence.py
|
isCompleted
|
mlz-ictrl/nicos
| 12 |
python
|
def isCompleted(self):
'Check for completion of the initiated action.\n\n Should return True if completed or False if not yet completed,\n allowing a polling mode.\n '
return True
|
def isCompleted(self):
'Check for completion of the initiated action.\n\n Should return True if completed or False if not yet completed,\n allowing a polling mode.\n '
return True<|docstring|>Check for completion of the initiated action.
Should return True if completed or False if not yet completed,
allowing a polling mode.<|endoftext|>
|
ba1cdf55ab866013bce8d8b6b4a85b6b8defc331e2fd6561d4dbac581752f9da
|
def stop(self):
'Interrupt the action started by run().'
|
Interrupt the action started by run().
|
nicos/devices/generic/sequence.py
|
stop
|
mlz-ictrl/nicos
| 12 |
python
|
def stop(self):
|
def stop(self):
<|docstring|>Interrupt the action started by run().<|endoftext|>
|
0271fe5471472ee005d7dff8d27c8c02360457c65c5f097cdf79161844492f17
|
def _set_seq_status(self, newstatus=status.OK, newstatusstring='unknown'):
'Set the current sequence status.'
oldstatus = self.status()
self._seq_status = (newstatus, newstatusstring.strip())
self.log.debug(self._seq_status[1])
if (self._cache and (oldstatus != self._seq_status)):
self._cache.put(self, 'status', self._seq_status, currenttime(), self.maxage)
|
Set the current sequence status.
|
nicos/devices/generic/sequence.py
|
_set_seq_status
|
mlz-ictrl/nicos
| 12 |
python
|
def _set_seq_status(self, newstatus=status.OK, newstatusstring='unknown'):
oldstatus = self.status()
self._seq_status = (newstatus, newstatusstring.strip())
self.log.debug(self._seq_status[1])
if (self._cache and (oldstatus != self._seq_status)):
self._cache.put(self, 'status', self._seq_status, currenttime(), self.maxage)
|
def _set_seq_status(self, newstatus=status.OK, newstatusstring='unknown'):
oldstatus = self.status()
self._seq_status = (newstatus, newstatusstring.strip())
self.log.debug(self._seq_status[1])
if (self._cache and (oldstatus != self._seq_status)):
self._cache.put(self, 'status', self._seq_status, currenttime(), self.maxage)<|docstring|>Set the current sequence status.<|endoftext|>
|
e89987fc8126b3119843ccb1cdca10a7f8474cfb2e334476ff531ac8b8cbf56a
|
def _startSequence(self, sequence):
'Check and start the sequence.'
for (i, step) in enumerate(sequence):
if (not hasattr(step, '__iter__')):
step = (step,)
sequence[i] = step
for action in step:
try:
action.check()
except Exception as e:
self.log.error('action.check for %r failed with %r', action, e)
self.log.debug('_checkFailed returned %r', self._checkFailed(i, action, sys.exc_info()))
if self._seq_is_running():
raise ProgrammingError(self, 'sequence is still running!')
if (self.loglevel == 'debug'):
self.log.debug('generated sequence has %d steps:', len(sequence))
for (i, step) in enumerate(sequence):
self.log.debug(' - step %d:', (i + 1))
for action in step:
self.log.debug(' - action: %r', action)
self._set_seq_status(status.BUSY, '')
self._asyncSequence(sequence)
|
Check and start the sequence.
|
nicos/devices/generic/sequence.py
|
_startSequence
|
mlz-ictrl/nicos
| 12 |
python
|
def _startSequence(self, sequence):
for (i, step) in enumerate(sequence):
if (not hasattr(step, '__iter__')):
step = (step,)
sequence[i] = step
for action in step:
try:
action.check()
except Exception as e:
self.log.error('action.check for %r failed with %r', action, e)
self.log.debug('_checkFailed returned %r', self._checkFailed(i, action, sys.exc_info()))
if self._seq_is_running():
raise ProgrammingError(self, 'sequence is still running!')
if (self.loglevel == 'debug'):
self.log.debug('generated sequence has %d steps:', len(sequence))
for (i, step) in enumerate(sequence):
self.log.debug(' - step %d:', (i + 1))
for action in step:
self.log.debug(' - action: %r', action)
self._set_seq_status(status.BUSY, )
self._asyncSequence(sequence)
|
def _startSequence(self, sequence):
for (i, step) in enumerate(sequence):
if (not hasattr(step, '__iter__')):
step = (step,)
sequence[i] = step
for action in step:
try:
action.check()
except Exception as e:
self.log.error('action.check for %r failed with %r', action, e)
self.log.debug('_checkFailed returned %r', self._checkFailed(i, action, sys.exc_info()))
if self._seq_is_running():
raise ProgrammingError(self, 'sequence is still running!')
if (self.loglevel == 'debug'):
self.log.debug('generated sequence has %d steps:', len(sequence))
for (i, step) in enumerate(sequence):
self.log.debug(' - step %d:', (i + 1))
for action in step:
self.log.debug(' - action: %r', action)
self._set_seq_status(status.BUSY, )
self._asyncSequence(sequence)<|docstring|>Check and start the sequence.<|endoftext|>
|
503dad7a37ac97fd43d5abfe5250e81985a6567cbca6a762b43a7ac8409f6a22
|
def _asyncSequence(self, sequence):
'Start a thread to execute the sequence.'
self._seq_stopflag = False
self._seq_was_stopped = False
self._seq_thread = createThread('sequence', self._run, (sequence,))
|
Start a thread to execute the sequence.
|
nicos/devices/generic/sequence.py
|
_asyncSequence
|
mlz-ictrl/nicos
| 12 |
python
|
def _asyncSequence(self, sequence):
self._seq_stopflag = False
self._seq_was_stopped = False
self._seq_thread = createThread('sequence', self._run, (sequence,))
|
def _asyncSequence(self, sequence):
self._seq_stopflag = False
self._seq_was_stopped = False
self._seq_thread = createThread('sequence', self._run, (sequence,))<|docstring|>Start a thread to execute the sequence.<|endoftext|>
|
54a207c5ead68a7bef64654325af3a744526586f468f3cdf267e21e25b3e1114
|
def _run(self, sequence):
"The thread performing the sequence.\n\n May be overwritten in derived classes needed the status sync between\n poller and daemon but don't want to use the actual sequencing routine.\n "
try:
self._sequence(sequence)
finally:
self._seq_stopflag = False
self._cleanUp()
|
The thread performing the sequence.
May be overwritten in derived classes needed the status sync between
poller and daemon but don't want to use the actual sequencing routine.
|
nicos/devices/generic/sequence.py
|
_run
|
mlz-ictrl/nicos
| 12 |
python
|
def _run(self, sequence):
"The thread performing the sequence.\n\n May be overwritten in derived classes needed the status sync between\n poller and daemon but don't want to use the actual sequencing routine.\n "
try:
self._sequence(sequence)
finally:
self._seq_stopflag = False
self._cleanUp()
|
def _run(self, sequence):
"The thread performing the sequence.\n\n May be overwritten in derived classes needed the status sync between\n poller and daemon but don't want to use the actual sequencing routine.\n "
try:
self._sequence(sequence)
finally:
self._seq_stopflag = False
self._cleanUp()<|docstring|>The thread performing the sequence.
May be overwritten in derived classes needed the status sync between
poller and daemon but don't want to use the actual sequencing routine.<|endoftext|>
|
6a9da785d6948dbb4166201f9c7b3bb3100122d08cd2f331c63f944c7a4f0b95
|
def _sequence(self, sequence):
"The Sequence 'interpreter', stepping through the sequence."
try:
self.log.debug('performing sequence of %d steps', len(sequence))
for (i, step) in enumerate(sequence):
self._set_seq_status(status.BUSY, (('action %d: ' % (i + 1)) + '; '.join(map(repr, step))))
for action in step:
self.log.debug(' - action: %r', action)
try:
action.run()
except Exception:
self.log.warning('action %d (%r) failed', (i + 1), action, exc=1)
nretries = self._runFailed(i, action, sys.exc_info())
self.log.debug('_runFailed returned %r', nretries)
if nretries:
try:
action.retry(nretries)
except Exception as e:
self.log.debug('action.retry failed with %r', e)
ret = self._retryFailed(i, action, nretries, sys.exc_info())
self.log.debug('_retryFailed returned %r', ret)
waiters = set(step)
while waiters:
t = monotonic()
self._set_seq_status(status.BUSY, ('waiting: ' + '; '.join(map(repr, waiters))))
for action in list(waiters):
try:
if action.isCompleted():
waiters.remove(action)
except Exception as e:
self.log.debug('action.isCompleted failed with %r', e)
code = self._waitFailed(i, action, sys.exc_info())
self.log.debug('_waitFailed returned %r', code)
if code:
if action.isCompleted():
waiters.remove(action)
if self._seq_stopflag:
self.log.debug('stopflag caught!')
self._seq_was_stopped = True
for dev in waiters:
dev.stop()
break
t = (0.1 - (monotonic() - t))
if (waiters and (t > 0)):
session.delay(t)
if self._seq_stopflag:
self._seq_was_stopped = True
self.log.debug('stopping actions: %s', '; '.join(map(repr, step)))
self._set_seq_status(status.BUSY, (('stopping at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
try:
for action in step:
failed = []
try:
action.stop()
except Exception as e:
self.log.debug('action.stop failed with %r', e)
failed.append((action, e))
for (ac, e) in failed:
ret = self._stopFailed(i, ac, sys.exc_info())
self.log.debug('_stopFailed returned %r', ret)
finally:
self._stopAction(i)
self._set_seq_status(status.NOTREACHED, (('operation interrupted at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
self.log.debug('stopping finished')
break
if (not self._seq_stopflag):
self.log.debug('sequence finished')
self._set_seq_status(status.OK, 'idle')
except NicosError as err:
self._set_seq_status(status.ERROR, (('error %s upon ' % err) + self._seq_status[1]))
self.log.error(self._seq_status[1], exc=1)
except Exception as err:
self.log.error('%s', err, exc=1)
|
The Sequence 'interpreter', stepping through the sequence.
|
nicos/devices/generic/sequence.py
|
_sequence
|
mlz-ictrl/nicos
| 12 |
python
|
def _sequence(self, sequence):
try:
self.log.debug('performing sequence of %d steps', len(sequence))
for (i, step) in enumerate(sequence):
self._set_seq_status(status.BUSY, (('action %d: ' % (i + 1)) + '; '.join(map(repr, step))))
for action in step:
self.log.debug(' - action: %r', action)
try:
action.run()
except Exception:
self.log.warning('action %d (%r) failed', (i + 1), action, exc=1)
nretries = self._runFailed(i, action, sys.exc_info())
self.log.debug('_runFailed returned %r', nretries)
if nretries:
try:
action.retry(nretries)
except Exception as e:
self.log.debug('action.retry failed with %r', e)
ret = self._retryFailed(i, action, nretries, sys.exc_info())
self.log.debug('_retryFailed returned %r', ret)
waiters = set(step)
while waiters:
t = monotonic()
self._set_seq_status(status.BUSY, ('waiting: ' + '; '.join(map(repr, waiters))))
for action in list(waiters):
try:
if action.isCompleted():
waiters.remove(action)
except Exception as e:
self.log.debug('action.isCompleted failed with %r', e)
code = self._waitFailed(i, action, sys.exc_info())
self.log.debug('_waitFailed returned %r', code)
if code:
if action.isCompleted():
waiters.remove(action)
if self._seq_stopflag:
self.log.debug('stopflag caught!')
self._seq_was_stopped = True
for dev in waiters:
dev.stop()
break
t = (0.1 - (monotonic() - t))
if (waiters and (t > 0)):
session.delay(t)
if self._seq_stopflag:
self._seq_was_stopped = True
self.log.debug('stopping actions: %s', '; '.join(map(repr, step)))
self._set_seq_status(status.BUSY, (('stopping at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
try:
for action in step:
failed = []
try:
action.stop()
except Exception as e:
self.log.debug('action.stop failed with %r', e)
failed.append((action, e))
for (ac, e) in failed:
ret = self._stopFailed(i, ac, sys.exc_info())
self.log.debug('_stopFailed returned %r', ret)
finally:
self._stopAction(i)
self._set_seq_status(status.NOTREACHED, (('operation interrupted at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
self.log.debug('stopping finished')
break
if (not self._seq_stopflag):
self.log.debug('sequence finished')
self._set_seq_status(status.OK, 'idle')
except NicosError as err:
self._set_seq_status(status.ERROR, (('error %s upon ' % err) + self._seq_status[1]))
self.log.error(self._seq_status[1], exc=1)
except Exception as err:
self.log.error('%s', err, exc=1)
|
def _sequence(self, sequence):
try:
self.log.debug('performing sequence of %d steps', len(sequence))
for (i, step) in enumerate(sequence):
self._set_seq_status(status.BUSY, (('action %d: ' % (i + 1)) + '; '.join(map(repr, step))))
for action in step:
self.log.debug(' - action: %r', action)
try:
action.run()
except Exception:
self.log.warning('action %d (%r) failed', (i + 1), action, exc=1)
nretries = self._runFailed(i, action, sys.exc_info())
self.log.debug('_runFailed returned %r', nretries)
if nretries:
try:
action.retry(nretries)
except Exception as e:
self.log.debug('action.retry failed with %r', e)
ret = self._retryFailed(i, action, nretries, sys.exc_info())
self.log.debug('_retryFailed returned %r', ret)
waiters = set(step)
while waiters:
t = monotonic()
self._set_seq_status(status.BUSY, ('waiting: ' + '; '.join(map(repr, waiters))))
for action in list(waiters):
try:
if action.isCompleted():
waiters.remove(action)
except Exception as e:
self.log.debug('action.isCompleted failed with %r', e)
code = self._waitFailed(i, action, sys.exc_info())
self.log.debug('_waitFailed returned %r', code)
if code:
if action.isCompleted():
waiters.remove(action)
if self._seq_stopflag:
self.log.debug('stopflag caught!')
self._seq_was_stopped = True
for dev in waiters:
dev.stop()
break
t = (0.1 - (monotonic() - t))
if (waiters and (t > 0)):
session.delay(t)
if self._seq_stopflag:
self._seq_was_stopped = True
self.log.debug('stopping actions: %s', '; '.join(map(repr, step)))
self._set_seq_status(status.BUSY, (('stopping at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
try:
for action in step:
failed = []
try:
action.stop()
except Exception as e:
self.log.debug('action.stop failed with %r', e)
failed.append((action, e))
for (ac, e) in failed:
ret = self._stopFailed(i, ac, sys.exc_info())
self.log.debug('_stopFailed returned %r', ret)
finally:
self._stopAction(i)
self._set_seq_status(status.NOTREACHED, (('operation interrupted at step %d: ' % (i + 1)) + '; '.join(map(repr, step))))
self.log.debug('stopping finished')
break
if (not self._seq_stopflag):
self.log.debug('sequence finished')
self._set_seq_status(status.OK, 'idle')
except NicosError as err:
self._set_seq_status(status.ERROR, (('error %s upon ' % err) + self._seq_status[1]))
self.log.error(self._seq_status[1], exc=1)
except Exception as err:
self.log.error('%s', err, exc=1)<|docstring|>The Sequence 'interpreter', stepping through the sequence.<|endoftext|>
|
65e02d22caa3630800807cfd98ee12638a7908e9bcdd30f857bcec9f530c97fc
|
def doStatus(self, maxage=0):
'Return highest statusvalue.'
stati = ([dev.status(maxage) for dev in devIter(self._getWaiters(), Readable)] + [self._seq_status])
stati.sort(key=(lambda st: st[0]))
_status = stati[(- 1)]
if self._seq_is_running():
return (max(status.BUSY, _status[0]), _status[1])
return _status
|
Return highest statusvalue.
|
nicos/devices/generic/sequence.py
|
doStatus
|
mlz-ictrl/nicos
| 12 |
python
|
def doStatus(self, maxage=0):
stati = ([dev.status(maxage) for dev in devIter(self._getWaiters(), Readable)] + [self._seq_status])
stati.sort(key=(lambda st: st[0]))
_status = stati[(- 1)]
if self._seq_is_running():
return (max(status.BUSY, _status[0]), _status[1])
return _status
|
def doStatus(self, maxage=0):
stati = ([dev.status(maxage) for dev in devIter(self._getWaiters(), Readable)] + [self._seq_status])
stati.sort(key=(lambda st: st[0]))
_status = stati[(- 1)]
if self._seq_is_running():
return (max(status.BUSY, _status[0]), _status[1])
return _status<|docstring|>Return highest statusvalue.<|endoftext|>
|
d7075c37cb865db4691f3407bc3a5fba0bc3126cef7f45bb73cbf25363d00496
|
def _stopAction(self, nr):
"Called whenever a running sequence is 'stopped'.\n\n Stopping of the currently performing actions is automatically done\n before. If additional actions are required to get the Device into a\n stable state, place them here.\n\n Default to a NOP.\n "
|
Called whenever a running sequence is 'stopped'.
Stopping of the currently performing actions is automatically done
before. If additional actions are required to get the Device into a
stable state, place them here.
Default to a NOP.
|
nicos/devices/generic/sequence.py
|
_stopAction
|
mlz-ictrl/nicos
| 12 |
python
|
def _stopAction(self, nr):
"Called whenever a running sequence is 'stopped'.\n\n Stopping of the currently performing actions is automatically done\n before. If additional actions are required to get the Device into a\n stable state, place them here.\n\n Default to a NOP.\n "
|
def _stopAction(self, nr):
"Called whenever a running sequence is 'stopped'.\n\n Stopping of the currently performing actions is automatically done\n before. If additional actions are required to get the Device into a\n stable state, place them here.\n\n Default to a NOP.\n "<|docstring|>Called whenever a running sequence is 'stopped'.
Stopping of the currently performing actions is automatically done
before. If additional actions are required to get the Device into a
stable state, place them here.
Default to a NOP.<|endoftext|>
|
caf75774f8690d993694ee3509e1892c42d626768eec8a237b65c13891310662
|
def _checkFailed(self, step, action, exc_info):
'Called whenever an action check failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
Called whenever an action check failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
Default is to re-raise the given exception.
|
nicos/devices/generic/sequence.py
|
_checkFailed
|
mlz-ictrl/nicos
| 12 |
python
|
def _checkFailed(self, step, action, exc_info):
'Called whenever an action check failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
def _checkFailed(self, step, action, exc_info):
'Called whenever an action check failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]<|docstring|>Called whenever an action check failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
Default is to re-raise the given exception.<|endoftext|>
|
8fb0369db016c721c1fe1905a28b6b6c182f4b37f8a4ae04603845203bac8fe9
|
def _runFailed(self, step, action, exc_info):
'Called whenever an action run failed.\n\n This may raise an Exception to end the sequence or return\n an integer. If that integer is > 0, the actions retry is called.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
Called whenever an action run failed.
This may raise an Exception to end the sequence or return
an integer. If that integer is > 0, the actions retry is called.
Default is to re-raise the given exception.
|
nicos/devices/generic/sequence.py
|
_runFailed
|
mlz-ictrl/nicos
| 12 |
python
|
def _runFailed(self, step, action, exc_info):
'Called whenever an action run failed.\n\n This may raise an Exception to end the sequence or return\n an integer. If that integer is > 0, the actions retry is called.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
def _runFailed(self, step, action, exc_info):
'Called whenever an action run failed.\n\n This may raise an Exception to end the sequence or return\n an integer. If that integer is > 0, the actions retry is called.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]<|docstring|>Called whenever an action run failed.
This may raise an Exception to end the sequence or return
an integer. If that integer is > 0, the actions retry is called.
Default is to re-raise the given exception.<|endoftext|>
|
f5f4723a31351e68a3b9dcacf951526842f5a592310545e46e94593e6df32825
|
def _retryFailed(self, step, action, nretries, exc_info):
'Called whenever an actions retry failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
Called whenever an actions retry failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
Default is to re-raise the given exception.
|
nicos/devices/generic/sequence.py
|
_retryFailed
|
mlz-ictrl/nicos
| 12 |
python
|
def _retryFailed(self, step, action, nretries, exc_info):
'Called whenever an actions retry failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
def _retryFailed(self, step, action, nretries, exc_info):
'Called whenever an actions retry failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]<|docstring|>Called whenever an actions retry failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
Default is to re-raise the given exception.<|endoftext|>
|
77eb740ca1383941179ffe30bdf45581ffb6d6dd83e3a51b511724ddc4d78b19
|
def _waitFailed(self, step, action, exc_info):
'Called whenever a wait failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n If the returned value evaluates to a boolean True, the wait\n is retried once. If it still fails, the sequence is aborted.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
Called whenever a wait failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
If the returned value evaluates to a boolean True, the wait
is retried once. If it still fails, the sequence is aborted.
Default is to re-raise the given exception.
|
nicos/devices/generic/sequence.py
|
_waitFailed
|
mlz-ictrl/nicos
| 12 |
python
|
def _waitFailed(self, step, action, exc_info):
'Called whenever a wait failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n If the returned value evaluates to a boolean True, the wait\n is retried once. If it still fails, the sequence is aborted.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]
|
def _waitFailed(self, step, action, exc_info):
'Called whenever a wait failed.\n\n This may raise an Exception to end the sequence or return\n anything to ignore this.\n If the returned value evaluates to a boolean True, the wait\n is retried once. If it still fails, the sequence is aborted.\n\n Default is to re-raise the given exception.\n '
raise exc_info[1]<|docstring|>Called whenever a wait failed.
This may raise an Exception to end the sequence or return
anything to ignore this.
If the returned value evaluates to a boolean True, the wait
is retried once. If it still fails, the sequence is aborted.
Default is to re-raise the given exception.<|endoftext|>
|
d5c994e048211b2b46c2660a8178bf08240272b61ac2530c1a8a82e75c908136
|
def _stopFailed(self, step, action, exc_info):
'Called whenever a stop failed with an exception.\n\n Default is to re-raise the exception.\n '
raise exc_info[1]
|
Called whenever a stop failed with an exception.
Default is to re-raise the exception.
|
nicos/devices/generic/sequence.py
|
_stopFailed
|
mlz-ictrl/nicos
| 12 |
python
|
def _stopFailed(self, step, action, exc_info):
'Called whenever a stop failed with an exception.\n\n Default is to re-raise the exception.\n '
raise exc_info[1]
|
def _stopFailed(self, step, action, exc_info):
'Called whenever a stop failed with an exception.\n\n Default is to re-raise the exception.\n '
raise exc_info[1]<|docstring|>Called whenever a stop failed with an exception.
Default is to re-raise the exception.<|endoftext|>
|
3aa4b9724e601e07f1cce6ac1e80af441736c8891cd693246924f5d8e4f16fc5
|
def _cleanUp(self):
"Called at the end of the sequence thread.\n\n It could perform a clean up on derived devices to bring it back into\n a 'normal' state.\n "
|
Called at the end of the sequence thread.
It could perform a clean up on derived devices to bring it back into
a 'normal' state.
|
nicos/devices/generic/sequence.py
|
_cleanUp
|
mlz-ictrl/nicos
| 12 |
python
|
def _cleanUp(self):
"Called at the end of the sequence thread.\n\n It could perform a clean up on derived devices to bring it back into\n a 'normal' state.\n "
|
def _cleanUp(self):
"Called at the end of the sequence thread.\n\n It could perform a clean up on derived devices to bring it back into\n a 'normal' state.\n "<|docstring|>Called at the end of the sequence thread.
It could perform a clean up on derived devices to bring it back into
a 'normal' state.<|endoftext|>
|
dee9ae167d4f5069a7f024bc8b29db4b21a4e3b532e285ca7c19a16abee238de
|
def doStart(self, target):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence(target))``\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise MoveError(self, ('Cannot start device, sequence is still running (at %s)!' % self._seq_status[1]))
self._startSequence(self._generateSequence(target))
|
Generate and start a sequence if non is running.
Just calls ``self._startSequence(self._generateSequence(target))``
|
nicos/devices/generic/sequence.py
|
doStart
|
mlz-ictrl/nicos
| 12 |
python
|
def doStart(self, target):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence(target))``\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise MoveError(self, ('Cannot start device, sequence is still running (at %s)!' % self._seq_status[1]))
self._startSequence(self._generateSequence(target))
|
def doStart(self, target):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence(target))``\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise MoveError(self, ('Cannot start device, sequence is still running (at %s)!' % self._seq_status[1]))
self._startSequence(self._generateSequence(target))<|docstring|>Generate and start a sequence if non is running.
Just calls ``self._startSequence(self._generateSequence(target))``<|endoftext|>
|
48b5b9d94c8fd36a7738131c9e5f6b5c114a4c50d0b2487674c97d07031e91ce
|
def _generateSequence(self, target):
'Return the target-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')
|
Return the target-specific sequence as a list of steps.
Each step is a SequenceItem or a tuple thereof.
SequenceItems (also called actions) are "executed" one after another in
a "lock-step fashion" while the actions grouped together in a tuple are
tried to execute in parallel.
The actual action performed depends on the implementation of the
`SequenceItem`.
Default is to raise an `NotImplementedError`
|
nicos/devices/generic/sequence.py
|
_generateSequence
|
mlz-ictrl/nicos
| 12 |
python
|
def _generateSequence(self, target):
'Return the target-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')
|
def _generateSequence(self, target):
'Return the target-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')<|docstring|>Return the target-specific sequence as a list of steps.
Each step is a SequenceItem or a tuple thereof.
SequenceItems (also called actions) are "executed" one after another in
a "lock-step fashion" while the actions grouped together in a tuple are
tried to execute in parallel.
The actual action performed depends on the implementation of the
`SequenceItem`.
Default is to raise an `NotImplementedError`<|endoftext|>
|
669ceff18c69aab37415a80f34ccad21d5b831f452d7f0a83c27e2c72c84125d
|
def doPrepare(self):
'Prepare measurement sequence.\n\n This method will raise a `NicosError` when a sequence is already in\n progress. Otherwise the internal sequence state is set to `status.OK`\n which also helps starting a new sequence when a previous sequence ended\n up in fault state.\n\n Derived implementations should first call this method and might call\n `doPrepare` on attached devices.\n\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise NicosError(self, 'Cannot start device, it is still busy')
if ((self._seq_status[0] > status.OK) and (not self._seq_was_stopped)):
self.log.warning('resetting internal state %s', formatStatus(self._seq_status))
self._set_seq_status(status.OK, 'preparing measurement')
|
Prepare measurement sequence.
This method will raise a `NicosError` when a sequence is already in
progress. Otherwise the internal sequence state is set to `status.OK`
which also helps starting a new sequence when a previous sequence ended
up in fault state.
Derived implementations should first call this method and might call
`doPrepare` on attached devices.
|
nicos/devices/generic/sequence.py
|
doPrepare
|
mlz-ictrl/nicos
| 12 |
python
|
def doPrepare(self):
'Prepare measurement sequence.\n\n This method will raise a `NicosError` when a sequence is already in\n progress. Otherwise the internal sequence state is set to `status.OK`\n which also helps starting a new sequence when a previous sequence ended\n up in fault state.\n\n Derived implementations should first call this method and might call\n `doPrepare` on attached devices.\n\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise NicosError(self, 'Cannot start device, it is still busy')
if ((self._seq_status[0] > status.OK) and (not self._seq_was_stopped)):
self.log.warning('resetting internal state %s', formatStatus(self._seq_status))
self._set_seq_status(status.OK, 'preparing measurement')
|
def doPrepare(self):
'Prepare measurement sequence.\n\n This method will raise a `NicosError` when a sequence is already in\n progress. Otherwise the internal sequence state is set to `status.OK`\n which also helps starting a new sequence when a previous sequence ended\n up in fault state.\n\n Derived implementations should first call this method and might call\n `doPrepare` on attached devices.\n\n '
if self._seq_is_running():
if (self._mode == SIMULATION):
self._seq_thread.join()
self._seq_thread = None
else:
raise NicosError(self, 'Cannot start device, it is still busy')
if ((self._seq_status[0] > status.OK) and (not self._seq_was_stopped)):
self.log.warning('resetting internal state %s', formatStatus(self._seq_status))
self._set_seq_status(status.OK, 'preparing measurement')<|docstring|>Prepare measurement sequence.
This method will raise a `NicosError` when a sequence is already in
progress. Otherwise the internal sequence state is set to `status.OK`
which also helps starting a new sequence when a previous sequence ended
up in fault state.
Derived implementations should first call this method and might call
`doPrepare` on attached devices.<|endoftext|>
|
a7f2aa98c3fb547f607d3cf1eab5a6241f210177f5c9e21c9509082375386afb
|
def doStart(self):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence())``\n\n '
self._startSequence(self._generateSequence())
|
Generate and start a sequence if non is running.
Just calls ``self._startSequence(self._generateSequence())``
|
nicos/devices/generic/sequence.py
|
doStart
|
mlz-ictrl/nicos
| 12 |
python
|
def doStart(self):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence())``\n\n '
self._startSequence(self._generateSequence())
|
def doStart(self):
'Generate and start a sequence if non is running.\n\n Just calls ``self._startSequence(self._generateSequence())``\n\n '
self._startSequence(self._generateSequence())<|docstring|>Generate and start a sequence if non is running.
Just calls ``self._startSequence(self._generateSequence())``<|endoftext|>
|
1e68ac2f626e12e8e65d32a65e93bd761fa68b1ab9472b3104dcd6b08bbc1df2
|
def _generateSequence(self):
'Return the device-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')
|
Return the device-specific sequence as a list of steps.
Each step is a SequenceItem or a tuple thereof.
SequenceItems (also called actions) are "executed" one after another in
a "lock-step fashion" while the actions grouped together in a tuple are
tried to execute in parallel.
The actual action performed depends on the implementation of the
`SequenceItem`.
Default is to raise an `NotImplementedError`
|
nicos/devices/generic/sequence.py
|
_generateSequence
|
mlz-ictrl/nicos
| 12 |
python
|
def _generateSequence(self):
'Return the device-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')
|
def _generateSequence(self):
'Return the device-specific sequence as a list of steps.\n\n Each step is a SequenceItem or a tuple thereof.\n SequenceItems (also called actions) are "executed" one after another in\n a "lock-step fashion" while the actions grouped together in a tuple are\n tried to execute in parallel.\n\n The actual action performed depends on the implementation of the\n `SequenceItem`.\n\n Default is to raise an `NotImplementedError`\n '
raise NotImplementedError('put a proper _generateSequence implementation here!')<|docstring|>Return the device-specific sequence as a list of steps.
Each step is a SequenceItem or a tuple thereof.
SequenceItems (also called actions) are "executed" one after another in
a "lock-step fashion" while the actions grouped together in a tuple are
tried to execute in parallel.
The actual action performed depends on the implementation of the
`SequenceItem`.
Default is to raise an `NotImplementedError`<|endoftext|>
|
3ddfd1f10f200ffc65f21048fc22673d6c4750068a3583fe2995ea3d4d3f04b5
|
def income(other_args: List[str], ticker: str):
'Market Watch ticker income statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='income', description='\n Prints either yearly or quarterly income statement the company. The following fields\n are expected: Sales Growth, Cost of Goods Sold (COGS) incl. D&A, COGS Growth, COGS\n excluding D&A, Depreciation & Amortization Expense, Depreciation, Amortization of\n Intangibles, Gross Income, Gross Income Growth, Gross Profit Margin, SG&A Expense, SGA\n Growth, Research & Development, Other SG&A, Other Operating Expense, Unusual Expense,\n EBIT after Unusual Expense, Non Operating Income/Expense, Non-Operating Interest\n Income, Equity in Affiliates (Pretax), Interest Expense, Interest Expense Growth,\n Gross Interest Expense, Interest Capitalized, Pretax Income, Pretax Income Growth,\n Pretax Margin, Income Tax, Income Tax - Current Domestic, Income Tax - Current Foreign,\n Income Tax - Deferred Domestic, Income Tax - Deferred Foreign, Income Tax Credits,\n Equity in Affiliates, Other After Tax Income (Expense), Consolidated Net Income,\n Minority Interest Expense, Net Income Growth, Net Margin Growth, Extraordinaries &\n Discontinued Operations, Extra Items & Gain/Loss Sale Of Assets, Cumulative Effect -\n Accounting Chg, Discontinued Operations, Net Income After Extraordinaries,\n Preferred Dividends, Net Income Available to Common, EPS (Basic), EPS (Basic) Growth,\n Basic Shares Outstanding, EPS (Diluted), EPS (Diluted) Growth, Diluted Shares\n Outstanding, EBITDA, EBITDA Growth, EBITDA Margin, Sales/Revenue, and Net Income.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'income', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print('')
except Exception as e:
print(e)
print('')
return
|
Market Watch ticker income statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol
|
gamestonk_terminal/fundamental_analysis/market_watch_view.py
|
income
|
MrMasterDoosh/GamestonkTerminal
| 1 |
python
|
def income(other_args: List[str], ticker: str):
'Market Watch ticker income statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='income', description='\n Prints either yearly or quarterly income statement the company. The following fields\n are expected: Sales Growth, Cost of Goods Sold (COGS) incl. D&A, COGS Growth, COGS\n excluding D&A, Depreciation & Amortization Expense, Depreciation, Amortization of\n Intangibles, Gross Income, Gross Income Growth, Gross Profit Margin, SG&A Expense, SGA\n Growth, Research & Development, Other SG&A, Other Operating Expense, Unusual Expense,\n EBIT after Unusual Expense, Non Operating Income/Expense, Non-Operating Interest\n Income, Equity in Affiliates (Pretax), Interest Expense, Interest Expense Growth,\n Gross Interest Expense, Interest Capitalized, Pretax Income, Pretax Income Growth,\n Pretax Margin, Income Tax, Income Tax - Current Domestic, Income Tax - Current Foreign,\n Income Tax - Deferred Domestic, Income Tax - Deferred Foreign, Income Tax Credits,\n Equity in Affiliates, Other After Tax Income (Expense), Consolidated Net Income,\n Minority Interest Expense, Net Income Growth, Net Margin Growth, Extraordinaries &\n Discontinued Operations, Extra Items & Gain/Loss Sale Of Assets, Cumulative Effect -\n Accounting Chg, Discontinued Operations, Net Income After Extraordinaries,\n Preferred Dividends, Net Income Available to Common, EPS (Basic), EPS (Basic) Growth,\n Basic Shares Outstanding, EPS (Diluted), EPS (Diluted) Growth, Diluted Shares\n Outstanding, EBITDA, EBITDA Growth, EBITDA Margin, Sales/Revenue, and Net Income.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'income', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return
|
def income(other_args: List[str], ticker: str):
'Market Watch ticker income statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='income', description='\n Prints either yearly or quarterly income statement the company. The following fields\n are expected: Sales Growth, Cost of Goods Sold (COGS) incl. D&A, COGS Growth, COGS\n excluding D&A, Depreciation & Amortization Expense, Depreciation, Amortization of\n Intangibles, Gross Income, Gross Income Growth, Gross Profit Margin, SG&A Expense, SGA\n Growth, Research & Development, Other SG&A, Other Operating Expense, Unusual Expense,\n EBIT after Unusual Expense, Non Operating Income/Expense, Non-Operating Interest\n Income, Equity in Affiliates (Pretax), Interest Expense, Interest Expense Growth,\n Gross Interest Expense, Interest Capitalized, Pretax Income, Pretax Income Growth,\n Pretax Margin, Income Tax, Income Tax - Current Domestic, Income Tax - Current Foreign,\n Income Tax - Deferred Domestic, Income Tax - Deferred Foreign, Income Tax Credits,\n Equity in Affiliates, Other After Tax Income (Expense), Consolidated Net Income,\n Minority Interest Expense, Net Income Growth, Net Margin Growth, Extraordinaries &\n Discontinued Operations, Extra Items & Gain/Loss Sale Of Assets, Cumulative Effect -\n Accounting Chg, Discontinued Operations, Net Income After Extraordinaries,\n Preferred Dividends, Net Income Available to Common, EPS (Basic), EPS (Basic) Growth,\n Basic Shares Outstanding, EPS (Diluted), EPS (Diluted) Growth, Diluted Shares\n Outstanding, EBITDA, EBITDA Growth, EBITDA Margin, Sales/Revenue, and Net Income.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'income', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return<|docstring|>Market Watch ticker income statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol<|endoftext|>
|
5fd5f95f3909c65f3b8c4ff59ed97147fe243e4b9018cb9533817cc114e8aee2
|
def balance(other_args: List[str], ticker: str):
'Market Watch ticker balance statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='balance', description="\n Prints either yearly or quarterly assets from balance sheet of the company.\n The following fields are expected: Cash & Short Term Investments, Cash & Short Term\n Investments Growth, Cash Only, Short-Term Investments, Cash & ST Investments / Total\n Assets, Total Accounts Receivable, Total Accounts Receivable Growth, Accounts\n Receivables, Net, Accounts Receivables, Gross, Bad Debt/Doubtful Accounts, Other\n Receivable, Accounts Receivable Turnover, Inventories, Finished Goods, Work in\n Progress, Raw Materials, Progress Payments & Other, Other Current Assets,\n Miscellaneous Current Assets, Net Property, Plant & Equipment, Property, Plant &\n Equipment - Gross, Buildings, Land & Improvements, Computer Software and Equipment,\n Other Property, Plant & Equipment, Accumulated Depreciation, Total Investments and\n Advances, Other Long-Term Investments, Long-Term Note Receivables, Intangible Assets,\n Net Goodwill, Net Other Intangibles, Other Assets.\n\n Prints either yearly or quarterly liabilities and shareholders' equity from balance\n sheet of the company. The following fields are expected: ST Debt & Current Portion LT\n Debt, Short Term Debt, Current Portion of Long Term Debt, Accounts Payable, Accounts\n Payable Growth, Income Tax Payable, Other Current Liabilities, Dividends Payable,\n Accrued Payroll, Miscellaneous Current Liabilities, Long-Term Debt, Long-Term Debt\n excl. Capitalized Leases, Non-Convertible Debt, Convertible Debt, Capitalized Lease\n Obligations, Provision for Risks & Charges, Deferred Taxes, Deferred Taxes - Credits,\n Deferred Taxes - Debit, Other Liabilities, Other Liabilities (excl. Deferred Income),\n Deferred Income, Non-Equity Reserves, Total Liabilities / Total Assets, Preferred Stock\n (Carrying Value), Redeemable Preferred Stock, Non-Redeemable Preferred Stock, Common\n Equity (Total), Common Equity/Total Assets, Common Stock Par/Carry Value, Retained\n Earnings, ESOP Debt Guarantee, Cumulative Translation Adjustment/Unrealized For. Exch.\n Gain, Unrealized Gain/Loss Marketable Securities, Revaluation Reserves, Treasury Stock,\n Total Shareholders' Equity, Total Shareholders' Equity / Total Assets, Accumulated\n Minority Interest, Total Equity, Total Current Assets, Total Assets, Total Current\n Liabilities, Total Liabilities, and Liabilities & Shareholders' Equity.\n [Source: Market Watch]\n ")
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'balance', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print('')
except Exception as e:
print(e)
print('')
return
|
Market Watch ticker balance statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol
|
gamestonk_terminal/fundamental_analysis/market_watch_view.py
|
balance
|
MrMasterDoosh/GamestonkTerminal
| 1 |
python
|
def balance(other_args: List[str], ticker: str):
'Market Watch ticker balance statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='balance', description="\n Prints either yearly or quarterly assets from balance sheet of the company.\n The following fields are expected: Cash & Short Term Investments, Cash & Short Term\n Investments Growth, Cash Only, Short-Term Investments, Cash & ST Investments / Total\n Assets, Total Accounts Receivable, Total Accounts Receivable Growth, Accounts\n Receivables, Net, Accounts Receivables, Gross, Bad Debt/Doubtful Accounts, Other\n Receivable, Accounts Receivable Turnover, Inventories, Finished Goods, Work in\n Progress, Raw Materials, Progress Payments & Other, Other Current Assets,\n Miscellaneous Current Assets, Net Property, Plant & Equipment, Property, Plant &\n Equipment - Gross, Buildings, Land & Improvements, Computer Software and Equipment,\n Other Property, Plant & Equipment, Accumulated Depreciation, Total Investments and\n Advances, Other Long-Term Investments, Long-Term Note Receivables, Intangible Assets,\n Net Goodwill, Net Other Intangibles, Other Assets.\n\n Prints either yearly or quarterly liabilities and shareholders' equity from balance\n sheet of the company. The following fields are expected: ST Debt & Current Portion LT\n Debt, Short Term Debt, Current Portion of Long Term Debt, Accounts Payable, Accounts\n Payable Growth, Income Tax Payable, Other Current Liabilities, Dividends Payable,\n Accrued Payroll, Miscellaneous Current Liabilities, Long-Term Debt, Long-Term Debt\n excl. Capitalized Leases, Non-Convertible Debt, Convertible Debt, Capitalized Lease\n Obligations, Provision for Risks & Charges, Deferred Taxes, Deferred Taxes - Credits,\n Deferred Taxes - Debit, Other Liabilities, Other Liabilities (excl. Deferred Income),\n Deferred Income, Non-Equity Reserves, Total Liabilities / Total Assets, Preferred Stock\n (Carrying Value), Redeemable Preferred Stock, Non-Redeemable Preferred Stock, Common\n Equity (Total), Common Equity/Total Assets, Common Stock Par/Carry Value, Retained\n Earnings, ESOP Debt Guarantee, Cumulative Translation Adjustment/Unrealized For. Exch.\n Gain, Unrealized Gain/Loss Marketable Securities, Revaluation Reserves, Treasury Stock,\n Total Shareholders' Equity, Total Shareholders' Equity / Total Assets, Accumulated\n Minority Interest, Total Equity, Total Current Assets, Total Assets, Total Current\n Liabilities, Total Liabilities, and Liabilities & Shareholders' Equity.\n [Source: Market Watch]\n ")
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'balance', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return
|
def balance(other_args: List[str], ticker: str):
'Market Watch ticker balance statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='balance', description="\n Prints either yearly or quarterly assets from balance sheet of the company.\n The following fields are expected: Cash & Short Term Investments, Cash & Short Term\n Investments Growth, Cash Only, Short-Term Investments, Cash & ST Investments / Total\n Assets, Total Accounts Receivable, Total Accounts Receivable Growth, Accounts\n Receivables, Net, Accounts Receivables, Gross, Bad Debt/Doubtful Accounts, Other\n Receivable, Accounts Receivable Turnover, Inventories, Finished Goods, Work in\n Progress, Raw Materials, Progress Payments & Other, Other Current Assets,\n Miscellaneous Current Assets, Net Property, Plant & Equipment, Property, Plant &\n Equipment - Gross, Buildings, Land & Improvements, Computer Software and Equipment,\n Other Property, Plant & Equipment, Accumulated Depreciation, Total Investments and\n Advances, Other Long-Term Investments, Long-Term Note Receivables, Intangible Assets,\n Net Goodwill, Net Other Intangibles, Other Assets.\n\n Prints either yearly or quarterly liabilities and shareholders' equity from balance\n sheet of the company. The following fields are expected: ST Debt & Current Portion LT\n Debt, Short Term Debt, Current Portion of Long Term Debt, Accounts Payable, Accounts\n Payable Growth, Income Tax Payable, Other Current Liabilities, Dividends Payable,\n Accrued Payroll, Miscellaneous Current Liabilities, Long-Term Debt, Long-Term Debt\n excl. Capitalized Leases, Non-Convertible Debt, Convertible Debt, Capitalized Lease\n Obligations, Provision for Risks & Charges, Deferred Taxes, Deferred Taxes - Credits,\n Deferred Taxes - Debit, Other Liabilities, Other Liabilities (excl. Deferred Income),\n Deferred Income, Non-Equity Reserves, Total Liabilities / Total Assets, Preferred Stock\n (Carrying Value), Redeemable Preferred Stock, Non-Redeemable Preferred Stock, Common\n Equity (Total), Common Equity/Total Assets, Common Stock Par/Carry Value, Retained\n Earnings, ESOP Debt Guarantee, Cumulative Translation Adjustment/Unrealized For. Exch.\n Gain, Unrealized Gain/Loss Marketable Securities, Revaluation Reserves, Treasury Stock,\n Total Shareholders' Equity, Total Shareholders' Equity / Total Assets, Accumulated\n Minority Interest, Total Equity, Total Current Assets, Total Assets, Total Current\n Liabilities, Total Liabilities, and Liabilities & Shareholders' Equity.\n [Source: Market Watch]\n ")
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'balance', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return<|docstring|>Market Watch ticker balance statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol<|endoftext|>
|
73514d5be9700b433d3d7848157d2f5b6b2b69ef392636825cb0c14b112e2b55
|
def cash(other_args: List[str], ticker: str):
'Market Watch ticker cash flow statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='cash_flow', description='\n Prints either yearly or quarterly cash flow operating activities of the company.\n The following fields are expected: Net Income before Extraordinaries, Net Income\n Growth, Depreciation, Depletion & Amortization, Depreciation and Depletion,\n Amortization of Intangible Assets, Deferred Taxes & Investment Tax Credit, Deferred\n Taxes, Investment Tax Credit, Other Funds, Funds from Operations, Extraordinaries,\n Changes in Working Capital, Receivables, Accounts Payable, Other Assets/Liabilities,\n and Net Operating Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow investing activities of the company.\n The following fields are expected: Capital Expenditures, Capital Expenditures Growth,\n Capital Expenditures/Sales, Capital Expenditures (Fixed Assets), Capital Expenditures\n (Other Assets), Net Assets from Acquisitions, Sale of Fixed Assets & Businesses,\n Purchase/Sale of Investments, Purchase of Investments, Sale/Maturity of Investments,\n Other Uses, Other Sources, Net Investing Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow financing activities of the company.\n The following fields are expected: Cash Dividends Paid - Total, Common Dividends,\n Preferred Dividends, Change in Capital Stock, Repurchase of Common & Preferred Stk.,\n Sale of Common & Preferred Stock, Proceeds from Stock Options, Other Proceeds from Sale\n of Stock, Issuance/Reduction of Debt, Net, Change in Current Debt, Change in Long-Term\n Debt, Issuance of Long-Term Debt, Reduction in Long-Term Debt, Other Funds, Other Uses,\n Other Sources, Net Financing Cash Flow Growth, Net Financing Cash Flow/Sales, Exchange\n Rate Effect, Miscellaneous Funds, Net Change in Cash, Free Cash Flow, Free Cash Flow\n Growth, Free Cash Flow Yield, Net Operating Cash Flow, Net Investing Cash Flow, Net\n Financing Cash Flow.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'cashflow', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print('')
except Exception as e:
print(e)
print('')
return
|
Market Watch ticker cash flow statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol
|
gamestonk_terminal/fundamental_analysis/market_watch_view.py
|
cash
|
MrMasterDoosh/GamestonkTerminal
| 1 |
python
|
def cash(other_args: List[str], ticker: str):
'Market Watch ticker cash flow statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='cash_flow', description='\n Prints either yearly or quarterly cash flow operating activities of the company.\n The following fields are expected: Net Income before Extraordinaries, Net Income\n Growth, Depreciation, Depletion & Amortization, Depreciation and Depletion,\n Amortization of Intangible Assets, Deferred Taxes & Investment Tax Credit, Deferred\n Taxes, Investment Tax Credit, Other Funds, Funds from Operations, Extraordinaries,\n Changes in Working Capital, Receivables, Accounts Payable, Other Assets/Liabilities,\n and Net Operating Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow investing activities of the company.\n The following fields are expected: Capital Expenditures, Capital Expenditures Growth,\n Capital Expenditures/Sales, Capital Expenditures (Fixed Assets), Capital Expenditures\n (Other Assets), Net Assets from Acquisitions, Sale of Fixed Assets & Businesses,\n Purchase/Sale of Investments, Purchase of Investments, Sale/Maturity of Investments,\n Other Uses, Other Sources, Net Investing Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow financing activities of the company.\n The following fields are expected: Cash Dividends Paid - Total, Common Dividends,\n Preferred Dividends, Change in Capital Stock, Repurchase of Common & Preferred Stk.,\n Sale of Common & Preferred Stock, Proceeds from Stock Options, Other Proceeds from Sale\n of Stock, Issuance/Reduction of Debt, Net, Change in Current Debt, Change in Long-Term\n Debt, Issuance of Long-Term Debt, Reduction in Long-Term Debt, Other Funds, Other Uses,\n Other Sources, Net Financing Cash Flow Growth, Net Financing Cash Flow/Sales, Exchange\n Rate Effect, Miscellaneous Funds, Net Change in Cash, Free Cash Flow, Free Cash Flow\n Growth, Free Cash Flow Yield, Net Operating Cash Flow, Net Investing Cash Flow, Net\n Financing Cash Flow.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'cashflow', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return
|
def cash(other_args: List[str], ticker: str):
'Market Watch ticker cash flow statement\n\n Parameters\n ----------\n other_args : List[str]\n argparse other args\n ticker : str\n Fundamental analysis ticker symbol\n '
parser = argparse.ArgumentParser(add_help=False, prog='cash_flow', description='\n Prints either yearly or quarterly cash flow operating activities of the company.\n The following fields are expected: Net Income before Extraordinaries, Net Income\n Growth, Depreciation, Depletion & Amortization, Depreciation and Depletion,\n Amortization of Intangible Assets, Deferred Taxes & Investment Tax Credit, Deferred\n Taxes, Investment Tax Credit, Other Funds, Funds from Operations, Extraordinaries,\n Changes in Working Capital, Receivables, Accounts Payable, Other Assets/Liabilities,\n and Net Operating Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow investing activities of the company.\n The following fields are expected: Capital Expenditures, Capital Expenditures Growth,\n Capital Expenditures/Sales, Capital Expenditures (Fixed Assets), Capital Expenditures\n (Other Assets), Net Assets from Acquisitions, Sale of Fixed Assets & Businesses,\n Purchase/Sale of Investments, Purchase of Investments, Sale/Maturity of Investments,\n Other Uses, Other Sources, Net Investing Cash Flow Growth.\n\n Prints either yearly or quarterly cash flow financing activities of the company.\n The following fields are expected: Cash Dividends Paid - Total, Common Dividends,\n Preferred Dividends, Change in Capital Stock, Repurchase of Common & Preferred Stk.,\n Sale of Common & Preferred Stock, Proceeds from Stock Options, Other Proceeds from Sale\n of Stock, Issuance/Reduction of Debt, Net, Change in Current Debt, Change in Long-Term\n Debt, Issuance of Long-Term Debt, Reduction in Long-Term Debt, Other Funds, Other Uses,\n Other Sources, Net Financing Cash Flow Growth, Net Financing Cash Flow/Sales, Exchange\n Rate Effect, Miscellaneous Funds, Net Change in Cash, Free Cash Flow, Free Cash Flow\n Growth, Free Cash Flow Yield, Net Operating Cash Flow, Net Investing Cash Flow, Net\n Financing Cash Flow.\n [Source: Market Watch]\n ')
parser.add_argument('-q', '--quarter', action='store_true', default=False, dest='b_quarter', help='Quarter fundamental data flag.')
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if (not ns_parser):
return
df_financials = mwm.prepare_df_financials(ticker, 'cashflow', ns_parser.b_quarter)
if gtff.USE_COLOR:
df_financials = df_financials.applymap(financials_colored_values)
patch_pandas_text_adjustment()
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_rows', None)
print(df_financials.to_string(index=False))
print()
except Exception as e:
print(e)
print()
return<|docstring|>Market Watch ticker cash flow statement
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol<|endoftext|>
|
4535f0e8faa33a5d2893cee02bb82e8e010c37382d579c8c1deaa71ee8306d86
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, certificate: Optional[pulumi.Input[str]]=None, certificate_name: Optional[pulumi.Input[str]]=None, provisioning_service_name: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None):
'\n The X509 Certificate.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] certificate: Base-64 representation of the X509 leaf certificate .cer file or just .pem file content.\n :param pulumi.Input[str] certificate_name: The name of the certificate create or update.\n :param pulumi.Input[str] provisioning_service_name: The name of the provisioning service.\n :param pulumi.Input[str] resource_group_name: Resource group identifier.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate'] = certificate
__props__['certificate_name'] = certificate_name
if ((provisioning_service_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'provisioning_service_name'")
__props__['provisioning_service_name'] = provisioning_service_name
if ((resource_group_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['name'] = None
__props__['properties'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:devices:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/latest:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20170821preview:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20171115:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20180122:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200101:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200301:DpsCertificate')])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DpsCertificate, __self__).__init__('azure-nextgen:devices/v20200901preview:DpsCertificate', resource_name, __props__, opts)
|
The X509 Certificate.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate: Base-64 representation of the X509 leaf certificate .cer file or just .pem file content.
:param pulumi.Input[str] certificate_name: The name of the certificate create or update.
:param pulumi.Input[str] provisioning_service_name: The name of the provisioning service.
:param pulumi.Input[str] resource_group_name: Resource group identifier.
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
__init__
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, certificate: Optional[pulumi.Input[str]]=None, certificate_name: Optional[pulumi.Input[str]]=None, provisioning_service_name: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None):
'\n The X509 Certificate.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] certificate: Base-64 representation of the X509 leaf certificate .cer file or just .pem file content.\n :param pulumi.Input[str] certificate_name: The name of the certificate create or update.\n :param pulumi.Input[str] provisioning_service_name: The name of the provisioning service.\n :param pulumi.Input[str] resource_group_name: Resource group identifier.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate'] = certificate
__props__['certificate_name'] = certificate_name
if ((provisioning_service_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'provisioning_service_name'")
__props__['provisioning_service_name'] = provisioning_service_name
if ((resource_group_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['name'] = None
__props__['properties'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:devices:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/latest:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20170821preview:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20171115:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20180122:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200101:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200301:DpsCertificate')])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DpsCertificate, __self__).__init__('azure-nextgen:devices/v20200901preview:DpsCertificate', resource_name, __props__, opts)
|
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, certificate: Optional[pulumi.Input[str]]=None, certificate_name: Optional[pulumi.Input[str]]=None, provisioning_service_name: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None):
'\n The X509 Certificate.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] certificate: Base-64 representation of the X509 leaf certificate .cer file or just .pem file content.\n :param pulumi.Input[str] certificate_name: The name of the certificate create or update.\n :param pulumi.Input[str] provisioning_service_name: The name of the provisioning service.\n :param pulumi.Input[str] resource_group_name: Resource group identifier.\n '
if (__name__ is not None):
warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning)
resource_name = __name__
if (__opts__ is not None):
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if (opts is None):
opts = pulumi.ResourceOptions()
if (not isinstance(opts, pulumi.ResourceOptions)):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if (opts.version is None):
opts.version = _utilities.get_version()
if (opts.id is None):
if (__props__ is not None):
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate'] = certificate
__props__['certificate_name'] = certificate_name
if ((provisioning_service_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'provisioning_service_name'")
__props__['provisioning_service_name'] = provisioning_service_name
if ((resource_group_name is None) and (not opts.urn)):
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['etag'] = None
__props__['name'] = None
__props__['properties'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:devices:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/latest:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20170821preview:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20171115:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20180122:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200101:DpsCertificate'), pulumi.Alias(type_='azure-nextgen:devices/v20200301:DpsCertificate')])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DpsCertificate, __self__).__init__('azure-nextgen:devices/v20200901preview:DpsCertificate', resource_name, __props__, opts)<|docstring|>The X509 Certificate.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate: Base-64 representation of the X509 leaf certificate .cer file or just .pem file content.
:param pulumi.Input[str] certificate_name: The name of the certificate create or update.
:param pulumi.Input[str] provisioning_service_name: The name of the provisioning service.
:param pulumi.Input[str] resource_group_name: Resource group identifier.<|endoftext|>
|
9d9dbf8660e25f99a3db2eb1a6a884b4a6369890ad08ec7e33e5aa0d11bf5baa
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'DpsCertificate':
"\n Get an existing DpsCertificate resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DpsCertificate(resource_name, opts=opts, __props__=__props__)
|
Get an existing DpsCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
get
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'DpsCertificate':
"\n Get an existing DpsCertificate resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DpsCertificate(resource_name, opts=opts, __props__=__props__)
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'DpsCertificate':
"\n Get an existing DpsCertificate resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DpsCertificate(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing DpsCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
|
a4563b0bc517beb0d87ae89d093c322fe09f5eee1c55a266d0683d3d8cd61f56
|
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n The entity tag.\n '
return pulumi.get(self, 'etag')
|
The entity tag.
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
etag
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'etag')
|
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'etag')<|docstring|>The entity tag.<|endoftext|>
|
0495257151c3cc4b246d134c46ade030ab002152db1e2dfb600a7db160cedf55
|
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n The name of the certificate.\n '
return pulumi.get(self, 'name')
|
The name of the certificate.
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
name
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name')
|
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name')<|docstring|>The name of the certificate.<|endoftext|>
|
87fbf44c38cc789ba8ba1b4f68df35122a9313f7583701b4b298c7303399b336
|
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.CertificatePropertiesResponse']:
'\n properties of a certificate\n '
return pulumi.get(self, 'properties')
|
properties of a certificate
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
properties
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.CertificatePropertiesResponse']:
'\n \n '
return pulumi.get(self, 'properties')
|
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.CertificatePropertiesResponse']:
'\n \n '
return pulumi.get(self, 'properties')<|docstring|>properties of a certificate<|endoftext|>
|
d5902ece5095248dcfa55da0de657e15158cc28ac4082681e612d4f3cae8ee93
|
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n The resource type.\n '
return pulumi.get(self, 'type')
|
The resource type.
|
sdk/python/pulumi_azure_nextgen/devices/v20200901preview/dps_certificate.py
|
type
|
pulumi/pulumi-azure-nextgen
| 31 |
python
|
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type')
|
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type')<|docstring|>The resource type.<|endoftext|>
|
710f9e1b4f63cdd58e28c5bfa33dd4203ba80cac7ba7bfad04b9f1c8808accca
|
def refresh_access_token(client_id, client_secret, refresh_token):
"\n Refreshes access token. Performs synchronous request.\n\n :return: dict\n {'access_token': '<omitted>',\n 'expires_in': 1209599,\n 'refresh_token': '<omitted>',\n 'refresh_token_expires_in': 7775553}\n "
post_data = {'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token'}
not_set = next((k for k in post_data.keys() if (post_data[k] is None)), None)
if not_set:
raise ValueError(f'"{not_set}" not set, it is needed in order to refresh access token')
parsed_url = urllib.parse.urlparse(API_BASE_URL)
headers = {'Accept': 'application/json', 'Content-Type': 'application/json;charset=utf-8'}
conn = HTTPSConnection(parsed_url.hostname, parsed_url.port)
url = os.path.join('/', API_V1, 'access_token')
conn.request('POST', url, body=json.dumps(post_data), headers=headers)
resp = conn.getresponse()
data = json.loads(resp.read().decode())
return data
|
Refreshes access token. Performs synchronous request.
:return: dict
{'access_token': '<omitted>',
'expires_in': 1209599,
'refresh_token': '<omitted>',
'refresh_token_expires_in': 7775553}
|
aiociscospark/utils.py
|
refresh_access_token
|
razor-1/aiociscospark
| 8 |
python
|
def refresh_access_token(client_id, client_secret, refresh_token):
"\n Refreshes access token. Performs synchronous request.\n\n :return: dict\n {'access_token': '<omitted>',\n 'expires_in': 1209599,\n 'refresh_token': '<omitted>',\n 'refresh_token_expires_in': 7775553}\n "
post_data = {'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token'}
not_set = next((k for k in post_data.keys() if (post_data[k] is None)), None)
if not_set:
raise ValueError(f'"{not_set}" not set, it is needed in order to refresh access token')
parsed_url = urllib.parse.urlparse(API_BASE_URL)
headers = {'Accept': 'application/json', 'Content-Type': 'application/json;charset=utf-8'}
conn = HTTPSConnection(parsed_url.hostname, parsed_url.port)
url = os.path.join('/', API_V1, 'access_token')
conn.request('POST', url, body=json.dumps(post_data), headers=headers)
resp = conn.getresponse()
data = json.loads(resp.read().decode())
return data
|
def refresh_access_token(client_id, client_secret, refresh_token):
"\n Refreshes access token. Performs synchronous request.\n\n :return: dict\n {'access_token': '<omitted>',\n 'expires_in': 1209599,\n 'refresh_token': '<omitted>',\n 'refresh_token_expires_in': 7775553}\n "
post_data = {'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token'}
not_set = next((k for k in post_data.keys() if (post_data[k] is None)), None)
if not_set:
raise ValueError(f'"{not_set}" not set, it is needed in order to refresh access token')
parsed_url = urllib.parse.urlparse(API_BASE_URL)
headers = {'Accept': 'application/json', 'Content-Type': 'application/json;charset=utf-8'}
conn = HTTPSConnection(parsed_url.hostname, parsed_url.port)
url = os.path.join('/', API_V1, 'access_token')
conn.request('POST', url, body=json.dumps(post_data), headers=headers)
resp = conn.getresponse()
data = json.loads(resp.read().decode())
return data<|docstring|>Refreshes access token. Performs synchronous request.
:return: dict
{'access_token': '<omitted>',
'expires_in': 1209599,
'refresh_token': '<omitted>',
'refresh_token_expires_in': 7775553}<|endoftext|>
|
4a99b934d41029d7636eee42b928973b7d65cb68f712f76a5b78e3c755073ac3
|
@click.group()
def cli():
" Script to patch strings in (binary) file. \nThe script works with output format of unix 'strings' utility.\n "
|
Script to patch strings in (binary) file.
The script works with output format of unix 'strings' utility.
|
stringpatcher/stringpatcher.py
|
cli
|
3ev0/toolshed
| 0 |
python
|
@click.group()
def cli():
" Script to patch strings in (binary) file. \nThe script works with output format of unix 'strings' utility.\n "
|
@click.group()
def cli():
" Script to patch strings in (binary) file. \nThe script works with output format of unix 'strings' utility.\n "<|docstring|>Script to patch strings in (binary) file.
The script works with output format of unix 'strings' utility.<|endoftext|>
|
78a6018b224ff8f87d21211e7b4bedb3a3061d0e1cf1460ac3f9444be2660f19
|
@cli.command()
@click.option('--enc', '-e', default='l', type=click.Choice(['s', 'S', 'b', 'l', 'B', 'L']), help='Select character size and endianness: s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit')
@click.argument('filepath', type=click.Path(writable=True))
def show(filepath, enc):
' Run strings on the file and output results. Outputs list of <offset> <string> '
click.echo('# Strings output:')
click.echo(('# encoding parameter: %s' % enc))
click.echo(('# filepath: %s' % filepath))
click.echo('# =====================')
results = subprocess.check_output(['strings', '-a', '-t', 'd', '-e', enc, filepath])
sys.stdout.write(str(results, encoding='ascii'))
|
Run strings on the file and output results. Outputs list of <offset> <string>
|
stringpatcher/stringpatcher.py
|
show
|
3ev0/toolshed
| 0 |
python
|
@cli.command()
@click.option('--enc', '-e', default='l', type=click.Choice(['s', 'S', 'b', 'l', 'B', 'L']), help='Select character size and endianness: s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit')
@click.argument('filepath', type=click.Path(writable=True))
def show(filepath, enc):
' '
click.echo('# Strings output:')
click.echo(('# encoding parameter: %s' % enc))
click.echo(('# filepath: %s' % filepath))
click.echo('# =====================')
results = subprocess.check_output(['strings', '-a', '-t', 'd', '-e', enc, filepath])
sys.stdout.write(str(results, encoding='ascii'))
|
@cli.command()
@click.option('--enc', '-e', default='l', type=click.Choice(['s', 'S', 'b', 'l', 'B', 'L']), help='Select character size and endianness: s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit')
@click.argument('filepath', type=click.Path(writable=True))
def show(filepath, enc):
' '
click.echo('# Strings output:')
click.echo(('# encoding parameter: %s' % enc))
click.echo(('# filepath: %s' % filepath))
click.echo('# =====================')
results = subprocess.check_output(['strings', '-a', '-t', 'd', '-e', enc, filepath])
sys.stdout.write(str(results, encoding='ascii'))<|docstring|>Run strings on the file and output results. Outputs list of <offset> <string><|endoftext|>
|
18ad1f3f725b3677422b5517ee51b75df95a0f66502a1e892b7ec1c824d407f4
|
def init_session(self):
'Initialize ``self.state`` with a default state, which ``tatk.util.camrest.state.default_state`` returns.'
self.state = default_state()
|
Initialize ``self.state`` with a default state, which ``tatk.util.camrest.state.default_state`` returns.
|
tatk/dst/rule/camrest/state_tracker.py
|
init_session
|
keshuichonglx/tatk
| 2 |
python
|
def init_session(self):
self.state = default_state()
|
def init_session(self):
self.state = default_state()<|docstring|>Initialize ``self.state`` with a default state, which ``tatk.util.camrest.state.default_state`` returns.<|endoftext|>
|
7e4ef0a146bc1da9063db200f39a2ad93851f0699b823c0f792523d0326f31b8
|
def test_find_class():
' _find_class() works as expected '
class Sub(_type.Type):
' Subtype '
def __repr__(self):
' repr'
return 'Sub'
assert (_type._find_class(_type.Type, '__init__') is _type.Type)
assert (_type._find_class(_type.Type, '__repr__') is _type.Type)
assert (_type._find_class(Sub, '__init__') is _type.Type)
assert (_type._find_class(Sub, '__repr__') is Sub)
|
_find_class() works as expected
|
tests/unit/test_type.py
|
test_find_class
|
ndparker/gensaschema
| 3 |
python
|
def test_find_class():
' '
class Sub(_type.Type):
' Subtype '
def __repr__(self):
' repr'
return 'Sub'
assert (_type._find_class(_type.Type, '__init__') is _type.Type)
assert (_type._find_class(_type.Type, '__repr__') is _type.Type)
assert (_type._find_class(Sub, '__init__') is _type.Type)
assert (_type._find_class(Sub, '__repr__') is Sub)
|
def test_find_class():
' '
class Sub(_type.Type):
' Subtype '
def __repr__(self):
' repr'
return 'Sub'
assert (_type._find_class(_type.Type, '__init__') is _type.Type)
assert (_type._find_class(_type.Type, '__repr__') is _type.Type)
assert (_type._find_class(Sub, '__init__') is _type.Type)
assert (_type._find_class(Sub, '__repr__') is Sub)<|docstring|>_find_class() works as expected<|endoftext|>
|
8edd4f44c7e5ba05b2c24307743c8b58d8ea1728fdf38fc53af161438a734290
|
def __repr__(self):
' repr'
return 'Sub'
|
repr
|
tests/unit/test_type.py
|
__repr__
|
ndparker/gensaschema
| 3 |
python
|
def ____(self):
' '
return 'Sub'
|
def ____(self):
' '
return 'Sub'<|docstring|>repr<|endoftext|>
|
9c29e0e77b4166a889c51ff28e9310f73d7a785d1fd0107c4a9fc6eae1fa84c8
|
def _make_mlp(hidden_dims, dropout):
'Creates a MLP with specified dimensions.'
@jraph.concatenated_args
def make_fn(inputs):
x = inputs
for dim in hidden_dims:
x = nn.Dense(features=dim)(x)
x = nn.LayerNorm()(x)
x = nn.relu(x)
x = dropout(x)
return x
return make_fn
|
Creates a MLP with specified dimensions.
|
algorithmic_efficiency/workloads/ogbg/ogbg_jax/models.py
|
_make_mlp
|
ClashLuke/algorithmic-efficiency
| 2 |
python
|
def _make_mlp(hidden_dims, dropout):
@jraph.concatenated_args
def make_fn(inputs):
x = inputs
for dim in hidden_dims:
x = nn.Dense(features=dim)(x)
x = nn.LayerNorm()(x)
x = nn.relu(x)
x = dropout(x)
return x
return make_fn
|
def _make_mlp(hidden_dims, dropout):
@jraph.concatenated_args
def make_fn(inputs):
x = inputs
for dim in hidden_dims:
x = nn.Dense(features=dim)(x)
x = nn.LayerNorm()(x)
x = nn.relu(x)
x = dropout(x)
return x
return make_fn<|docstring|>Creates a MLP with specified dimensions.<|endoftext|>
|
62fe2039f878fcc4b60e350554c4b4ea869954eecbd1e749bd0a30e9a41dca86
|
def _strip_args(launch_path):
"\n Return preprocessed argument list only containing options valid in roslaunch (not roslaunch2). Also append\n path to generated launch file. For example, this returns\n ['/home/user/catkin_ws/devel/bin/roslaunch2', '/tmp/tmpd8xFTj.launch', '--timeout=5']\n\n :param launch_path: path to generated launch file\n :return: list of options to be passed to roslaunch.main()\n "
import sys
dummy = argparse.ArgumentParser()
dummy.add_argument('--files')
dummy.add_argument('--args')
dummy.add_argument('--find-node')
dummy.add_argument('-c', '--child')
dummy.add_argument('--local')
dummy.add_argument('--screen')
dummy.add_argument('-u', '--server_uri')
dummy.add_argument('--run_id')
dummy.add_argument('--wait')
dummy.add_argument('-p', '--port')
dummy.add_argument('--core')
dummy.add_argument('--pid')
dummy.add_argument('-v')
dummy.add_argument('--dump-params')
dummy.add_argument('--skip-log-check')
dummy.add_argument('--disable-title')
dummy.add_argument('-w', '--numworkers')
dummy.add_argument('-t', '--timeout')
(_, unknown_args) = dummy.parse_known_args()
args = [arg for arg in sys.argv if (arg not in unknown_args)]
args.insert(1, launch_path)
return args
|
Return preprocessed argument list only containing options valid in roslaunch (not roslaunch2). Also append
path to generated launch file. For example, this returns
['/home/user/catkin_ws/devel/bin/roslaunch2', '/tmp/tmpd8xFTj.launch', '--timeout=5']
:param launch_path: path to generated launch file
:return: list of options to be passed to roslaunch.main()
|
src/roslaunch2/__init__.py
|
_strip_args
|
CodeFinder2/roslaunch2
| 10 |
python
|
def _strip_args(launch_path):
"\n Return preprocessed argument list only containing options valid in roslaunch (not roslaunch2). Also append\n path to generated launch file. For example, this returns\n ['/home/user/catkin_ws/devel/bin/roslaunch2', '/tmp/tmpd8xFTj.launch', '--timeout=5']\n\n :param launch_path: path to generated launch file\n :return: list of options to be passed to roslaunch.main()\n "
import sys
dummy = argparse.ArgumentParser()
dummy.add_argument('--files')
dummy.add_argument('--args')
dummy.add_argument('--find-node')
dummy.add_argument('-c', '--child')
dummy.add_argument('--local')
dummy.add_argument('--screen')
dummy.add_argument('-u', '--server_uri')
dummy.add_argument('--run_id')
dummy.add_argument('--wait')
dummy.add_argument('-p', '--port')
dummy.add_argument('--core')
dummy.add_argument('--pid')
dummy.add_argument('-v')
dummy.add_argument('--dump-params')
dummy.add_argument('--skip-log-check')
dummy.add_argument('--disable-title')
dummy.add_argument('-w', '--numworkers')
dummy.add_argument('-t', '--timeout')
(_, unknown_args) = dummy.parse_known_args()
args = [arg for arg in sys.argv if (arg not in unknown_args)]
args.insert(1, launch_path)
return args
|
def _strip_args(launch_path):
"\n Return preprocessed argument list only containing options valid in roslaunch (not roslaunch2). Also append\n path to generated launch file. For example, this returns\n ['/home/user/catkin_ws/devel/bin/roslaunch2', '/tmp/tmpd8xFTj.launch', '--timeout=5']\n\n :param launch_path: path to generated launch file\n :return: list of options to be passed to roslaunch.main()\n "
import sys
dummy = argparse.ArgumentParser()
dummy.add_argument('--files')
dummy.add_argument('--args')
dummy.add_argument('--find-node')
dummy.add_argument('-c', '--child')
dummy.add_argument('--local')
dummy.add_argument('--screen')
dummy.add_argument('-u', '--server_uri')
dummy.add_argument('--run_id')
dummy.add_argument('--wait')
dummy.add_argument('-p', '--port')
dummy.add_argument('--core')
dummy.add_argument('--pid')
dummy.add_argument('-v')
dummy.add_argument('--dump-params')
dummy.add_argument('--skip-log-check')
dummy.add_argument('--disable-title')
dummy.add_argument('-w', '--numworkers')
dummy.add_argument('-t', '--timeout')
(_, unknown_args) = dummy.parse_known_args()
args = [arg for arg in sys.argv if (arg not in unknown_args)]
args.insert(1, launch_path)
return args<|docstring|>Return preprocessed argument list only containing options valid in roslaunch (not roslaunch2). Also append
path to generated launch file. For example, this returns
['/home/user/catkin_ws/devel/bin/roslaunch2', '/tmp/tmpd8xFTj.launch', '--timeout=5']
:param launch_path: path to generated launch file
:return: list of options to be passed to roslaunch.main()<|endoftext|>
|
2d77f15eee0d1de9e3c374b324338ad8d989e8fd567dc9cb7a82dcc6c373b054
|
def _argument_parser(parents=None):
'\n Defines the command line argument parser for roslaunch2.\n\n :param parents: Parents of argparse.ArgumentParser (not needed in most of the cases)\n :return: argparse.ArgumentParser instance\n '
if (parents is None):
parents = []
parser = argparse.ArgumentParser(description='roslaunch2 - Python based launch files for ROS (1)', add_help=False, parents=parents, conflict_handler=('resolve' if parents else 'error'))
parser.add_argument('-h', '--help', default=False, action='help', help='Show this help message and exit')
parser.add_argument('--no-colors', default=False, action='store_true', help='Do not use colored output during processing')
parser.add_argument('--version', action='version', version='%(prog)s v{version}, (C) Copyright Adrian Böckenkamp, 24/10/2021'.format(version=__version__))
parser.add_argument('-d', '--dry-run', default=False, action='store_true', help='Just print the launch file to stdout, do not run roslaunch')
parser.add_argument('package', nargs='?', help='ROS package name to search for <launchfile>')
parser.add_argument('launchfile', nargs='+', help='Python based launch file')
parser.add_argument('--ros-args', default=False, action='store_true', help='Display command-line arguments for this launch file')
return parser
|
Defines the command line argument parser for roslaunch2.
:param parents: Parents of argparse.ArgumentParser (not needed in most of the cases)
:return: argparse.ArgumentParser instance
|
src/roslaunch2/__init__.py
|
_argument_parser
|
CodeFinder2/roslaunch2
| 10 |
python
|
def _argument_parser(parents=None):
'\n Defines the command line argument parser for roslaunch2.\n\n :param parents: Parents of argparse.ArgumentParser (not needed in most of the cases)\n :return: argparse.ArgumentParser instance\n '
if (parents is None):
parents = []
parser = argparse.ArgumentParser(description='roslaunch2 - Python based launch files for ROS (1)', add_help=False, parents=parents, conflict_handler=('resolve' if parents else 'error'))
parser.add_argument('-h', '--help', default=False, action='help', help='Show this help message and exit')
parser.add_argument('--no-colors', default=False, action='store_true', help='Do not use colored output during processing')
parser.add_argument('--version', action='version', version='%(prog)s v{version}, (C) Copyright Adrian Böckenkamp, 24/10/2021'.format(version=__version__))
parser.add_argument('-d', '--dry-run', default=False, action='store_true', help='Just print the launch file to stdout, do not run roslaunch')
parser.add_argument('package', nargs='?', help='ROS package name to search for <launchfile>')
parser.add_argument('launchfile', nargs='+', help='Python based launch file')
parser.add_argument('--ros-args', default=False, action='store_true', help='Display command-line arguments for this launch file')
return parser
|
def _argument_parser(parents=None):
'\n Defines the command line argument parser for roslaunch2.\n\n :param parents: Parents of argparse.ArgumentParser (not needed in most of the cases)\n :return: argparse.ArgumentParser instance\n '
if (parents is None):
parents = []
parser = argparse.ArgumentParser(description='roslaunch2 - Python based launch files for ROS (1)', add_help=False, parents=parents, conflict_handler=('resolve' if parents else 'error'))
parser.add_argument('-h', '--help', default=False, action='help', help='Show this help message and exit')
parser.add_argument('--no-colors', default=False, action='store_true', help='Do not use colored output during processing')
parser.add_argument('--version', action='version', version='%(prog)s v{version}, (C) Copyright Adrian Böckenkamp, 24/10/2021'.format(version=__version__))
parser.add_argument('-d', '--dry-run', default=False, action='store_true', help='Just print the launch file to stdout, do not run roslaunch')
parser.add_argument('package', nargs='?', help='ROS package name to search for <launchfile>')
parser.add_argument('launchfile', nargs='+', help='Python based launch file')
parser.add_argument('--ros-args', default=False, action='store_true', help='Display command-line arguments for this launch file')
return parser<|docstring|>Defines the command line argument parser for roslaunch2.
:param parents: Parents of argparse.ArgumentParser (not needed in most of the cases)
:return: argparse.ArgumentParser instance<|endoftext|>
|
980a0a14f24d7908c6806f9f3cd288ca8f1b4a90a4ad3e8c4f15e0b768b970fa
|
def start(launch_obj, dry_run=False, silent=False):
'\n Generates a temporary roslaunch XML file from the given roslaunch2 Launch instance and passes it over to roslaunch.\n Returns after roslaunch has terminated and temporary files have been removed.\n\n :param launch_obj: Instance of class launch.Launch\n :param dry_run: If Only print generated XML (default: False)\n :param silent: Hide roslaunch output\n '
if silent:
import sys
import os
sys.stdout = open(os.devnull, 'w')
if (not isinstance(launch_obj, Launch)):
critical("Your launch module's main() function must return an instance of roslaunch2.launch.Launch in order to be compatible with roslaunch2. Please fix your launch module code.")
return
content = launch_obj.generate()
if (not dry_run):
import tempfile
import roslaunch
ftmp = tempfile.NamedTemporaryFile(mode='w', suffix='.launch', delete=False)
ftmp.write(content)
ftmp.close()
try:
on_initialize.fire()
roslaunch.main(_strip_args(ftmp.name))
except Exception:
pass
utils.silent_remove(ftmp.name)
else:
print(content)
on_terminate.fire()
Machine.cleanup()
|
Generates a temporary roslaunch XML file from the given roslaunch2 Launch instance and passes it over to roslaunch.
Returns after roslaunch has terminated and temporary files have been removed.
:param launch_obj: Instance of class launch.Launch
:param dry_run: If Only print generated XML (default: False)
:param silent: Hide roslaunch output
|
src/roslaunch2/__init__.py
|
start
|
CodeFinder2/roslaunch2
| 10 |
python
|
def start(launch_obj, dry_run=False, silent=False):
'\n Generates a temporary roslaunch XML file from the given roslaunch2 Launch instance and passes it over to roslaunch.\n Returns after roslaunch has terminated and temporary files have been removed.\n\n :param launch_obj: Instance of class launch.Launch\n :param dry_run: If Only print generated XML (default: False)\n :param silent: Hide roslaunch output\n '
if silent:
import sys
import os
sys.stdout = open(os.devnull, 'w')
if (not isinstance(launch_obj, Launch)):
critical("Your launch module's main() function must return an instance of roslaunch2.launch.Launch in order to be compatible with roslaunch2. Please fix your launch module code.")
return
content = launch_obj.generate()
if (not dry_run):
import tempfile
import roslaunch
ftmp = tempfile.NamedTemporaryFile(mode='w', suffix='.launch', delete=False)
ftmp.write(content)
ftmp.close()
try:
on_initialize.fire()
roslaunch.main(_strip_args(ftmp.name))
except Exception:
pass
utils.silent_remove(ftmp.name)
else:
print(content)
on_terminate.fire()
Machine.cleanup()
|
def start(launch_obj, dry_run=False, silent=False):
'\n Generates a temporary roslaunch XML file from the given roslaunch2 Launch instance and passes it over to roslaunch.\n Returns after roslaunch has terminated and temporary files have been removed.\n\n :param launch_obj: Instance of class launch.Launch\n :param dry_run: If Only print generated XML (default: False)\n :param silent: Hide roslaunch output\n '
if silent:
import sys
import os
sys.stdout = open(os.devnull, 'w')
if (not isinstance(launch_obj, Launch)):
critical("Your launch module's main() function must return an instance of roslaunch2.launch.Launch in order to be compatible with roslaunch2. Please fix your launch module code.")
return
content = launch_obj.generate()
if (not dry_run):
import tempfile
import roslaunch
ftmp = tempfile.NamedTemporaryFile(mode='w', suffix='.launch', delete=False)
ftmp.write(content)
ftmp.close()
try:
on_initialize.fire()
roslaunch.main(_strip_args(ftmp.name))
except Exception:
pass
utils.silent_remove(ftmp.name)
else:
print(content)
on_terminate.fire()
Machine.cleanup()<|docstring|>Generates a temporary roslaunch XML file from the given roslaunch2 Launch instance and passes it over to roslaunch.
Returns after roslaunch has terminated and temporary files have been removed.
:param launch_obj: Instance of class launch.Launch
:param dry_run: If Only print generated XML (default: False)
:param silent: Hide roslaunch output<|endoftext|>
|
4a72ab1478427e973d182afa4a32836d979704bf25fa69bdf3a039753ca0d0d2
|
def start_async(launch_obj, silent=False):
'\n Call method start() in a separate process and returns without waiting for roslaunch to terminate. If p is the\n returned object, call roslaunch2.terminate(p) to shutdown roslaunch(2) and wait until roslaunch has terminated.\n\n :param launch_obj: Instance of class launch.Launch\n :param silent: Hide roslaunch output\n :return: Instance of class multiprocessing.Process\n '
from multiprocessing import Process
p = Process(target=start, args=(launch_obj, False, silent))
p.start()
return p
|
Call method start() in a separate process and returns without waiting for roslaunch to terminate. If p is the
returned object, call roslaunch2.terminate(p) to shutdown roslaunch(2) and wait until roslaunch has terminated.
:param launch_obj: Instance of class launch.Launch
:param silent: Hide roslaunch output
:return: Instance of class multiprocessing.Process
|
src/roslaunch2/__init__.py
|
start_async
|
CodeFinder2/roslaunch2
| 10 |
python
|
def start_async(launch_obj, silent=False):
'\n Call method start() in a separate process and returns without waiting for roslaunch to terminate. If p is the\n returned object, call roslaunch2.terminate(p) to shutdown roslaunch(2) and wait until roslaunch has terminated.\n\n :param launch_obj: Instance of class launch.Launch\n :param silent: Hide roslaunch output\n :return: Instance of class multiprocessing.Process\n '
from multiprocessing import Process
p = Process(target=start, args=(launch_obj, False, silent))
p.start()
return p
|
def start_async(launch_obj, silent=False):
'\n Call method start() in a separate process and returns without waiting for roslaunch to terminate. If p is the\n returned object, call roslaunch2.terminate(p) to shutdown roslaunch(2) and wait until roslaunch has terminated.\n\n :param launch_obj: Instance of class launch.Launch\n :param silent: Hide roslaunch output\n :return: Instance of class multiprocessing.Process\n '
from multiprocessing import Process
p = Process(target=start, args=(launch_obj, False, silent))
p.start()
return p<|docstring|>Call method start() in a separate process and returns without waiting for roslaunch to terminate. If p is the
returned object, call roslaunch2.terminate(p) to shutdown roslaunch(2) and wait until roslaunch has terminated.
:param launch_obj: Instance of class launch.Launch
:param silent: Hide roslaunch output
:return: Instance of class multiprocessing.Process<|endoftext|>
|
33d7de88e50b8fea8e638ff15240732efc9f62041e74a96724b223ce51f6c106
|
def terminate(instance):
'\n Terminates the given roslaunch2 instance (a multiprocessing.Process instance) and waits until it has exited.\n\n :param instance: Object returned by start_async() to be terminated\n :return: None\n '
if (instance is None):
return
from multiprocessing import Process
assert isinstance(instance, Process)
import os
import signal
os.kill(instance.pid, signal.SIGINT)
instance.join(10.0)
instance.terminate()
instance.join()
|
Terminates the given roslaunch2 instance (a multiprocessing.Process instance) and waits until it has exited.
:param instance: Object returned by start_async() to be terminated
:return: None
|
src/roslaunch2/__init__.py
|
terminate
|
CodeFinder2/roslaunch2
| 10 |
python
|
def terminate(instance):
'\n Terminates the given roslaunch2 instance (a multiprocessing.Process instance) and waits until it has exited.\n\n :param instance: Object returned by start_async() to be terminated\n :return: None\n '
if (instance is None):
return
from multiprocessing import Process
assert isinstance(instance, Process)
import os
import signal
os.kill(instance.pid, signal.SIGINT)
instance.join(10.0)
instance.terminate()
instance.join()
|
def terminate(instance):
'\n Terminates the given roslaunch2 instance (a multiprocessing.Process instance) and waits until it has exited.\n\n :param instance: Object returned by start_async() to be terminated\n :return: None\n '
if (instance is None):
return
from multiprocessing import Process
assert isinstance(instance, Process)
import os
import signal
os.kill(instance.pid, signal.SIGINT)
instance.join(10.0)
instance.terminate()
instance.join()<|docstring|>Terminates the given roslaunch2 instance (a multiprocessing.Process instance) and waits until it has exited.
:param instance: Object returned by start_async() to be terminated
:return: None<|endoftext|>
|
5394356272287dc89327be93923c6a7b1a957565c7a5cf7bfb6dc433d3518368
|
def main(command_line_args=None):
'\n Defines the core logic (= Python based dynamic launch files) of roslaunch2. It does NOT create any\n launch modules or the like. This function is not meant to be called directly. See `start()` and\n `start_async()` for more details.\n\n :param command_line_args: List with command line arguments as strings\n :return: None\n '
import os.path
parser = _argument_parser()
(args, _) = parser.parse_known_args(args=command_line_args)
init_logger((not args.no_colors))
if (len(args.launchfile) > 1):
logger.warning('Multiple launch files at once are not supported (yet), just using the first.')
args.launchfile = args.launchfile[0]
if (not os.path.splitext(args.launchfile)[1]):
args.launchfile += '.pyl'
if args.package:
try:
args.launchfile = Package(args.package).find(args.launchfile)
except rospkg.ResourceNotFound as ex:
critical("Launch module '{:s}' not found in package '{:s}', searched in: \n- {:s}".format(args.launchfile, args.package, '\n- '.join(ex.ros_paths)))
return
try:
m = Package.import_launch_module(args.launchfile)
except ValueError:
critical("Launch module '{:s}' not found.".format(args.launchfile))
return
if (not hasattr(m, 'main')):
critical("Launch module '{:s}' has no main(**kwargs) function! Please fix your launch module code.".format(args.launchfile))
return
if args.ros_args:
m.main()
sys.argv[0] = args.launchfile
parents = LaunchParameter.launch_parameter_list
if (len(parents) == 0):
return
elif (len(parents) == 1):
parser = parents[0]
else:
parser = LaunchParameter(description=parents[0].description, conflict_handler='resolve', parents=parents)
sys.argv.append('--usage')
parser.add_argument('--usage', default=False, action='help', help=argparse.SUPPRESS)
parser.parse_known_args()
return
launch_tree = m.main()
start(launch_obj=launch_tree, dry_run=args.dry_run)
|
Defines the core logic (= Python based dynamic launch files) of roslaunch2. It does NOT create any
launch modules or the like. This function is not meant to be called directly. See `start()` and
`start_async()` for more details.
:param command_line_args: List with command line arguments as strings
:return: None
|
src/roslaunch2/__init__.py
|
main
|
CodeFinder2/roslaunch2
| 10 |
python
|
def main(command_line_args=None):
'\n Defines the core logic (= Python based dynamic launch files) of roslaunch2. It does NOT create any\n launch modules or the like. This function is not meant to be called directly. See `start()` and\n `start_async()` for more details.\n\n :param command_line_args: List with command line arguments as strings\n :return: None\n '
import os.path
parser = _argument_parser()
(args, _) = parser.parse_known_args(args=command_line_args)
init_logger((not args.no_colors))
if (len(args.launchfile) > 1):
logger.warning('Multiple launch files at once are not supported (yet), just using the first.')
args.launchfile = args.launchfile[0]
if (not os.path.splitext(args.launchfile)[1]):
args.launchfile += '.pyl'
if args.package:
try:
args.launchfile = Package(args.package).find(args.launchfile)
except rospkg.ResourceNotFound as ex:
critical("Launch module '{:s}' not found in package '{:s}', searched in: \n- {:s}".format(args.launchfile, args.package, '\n- '.join(ex.ros_paths)))
return
try:
m = Package.import_launch_module(args.launchfile)
except ValueError:
critical("Launch module '{:s}' not found.".format(args.launchfile))
return
if (not hasattr(m, 'main')):
critical("Launch module '{:s}' has no main(**kwargs) function! Please fix your launch module code.".format(args.launchfile))
return
if args.ros_args:
m.main()
sys.argv[0] = args.launchfile
parents = LaunchParameter.launch_parameter_list
if (len(parents) == 0):
return
elif (len(parents) == 1):
parser = parents[0]
else:
parser = LaunchParameter(description=parents[0].description, conflict_handler='resolve', parents=parents)
sys.argv.append('--usage')
parser.add_argument('--usage', default=False, action='help', help=argparse.SUPPRESS)
parser.parse_known_args()
return
launch_tree = m.main()
start(launch_obj=launch_tree, dry_run=args.dry_run)
|
def main(command_line_args=None):
'\n Defines the core logic (= Python based dynamic launch files) of roslaunch2. It does NOT create any\n launch modules or the like. This function is not meant to be called directly. See `start()` and\n `start_async()` for more details.\n\n :param command_line_args: List with command line arguments as strings\n :return: None\n '
import os.path
parser = _argument_parser()
(args, _) = parser.parse_known_args(args=command_line_args)
init_logger((not args.no_colors))
if (len(args.launchfile) > 1):
logger.warning('Multiple launch files at once are not supported (yet), just using the first.')
args.launchfile = args.launchfile[0]
if (not os.path.splitext(args.launchfile)[1]):
args.launchfile += '.pyl'
if args.package:
try:
args.launchfile = Package(args.package).find(args.launchfile)
except rospkg.ResourceNotFound as ex:
critical("Launch module '{:s}' not found in package '{:s}', searched in: \n- {:s}".format(args.launchfile, args.package, '\n- '.join(ex.ros_paths)))
return
try:
m = Package.import_launch_module(args.launchfile)
except ValueError:
critical("Launch module '{:s}' not found.".format(args.launchfile))
return
if (not hasattr(m, 'main')):
critical("Launch module '{:s}' has no main(**kwargs) function! Please fix your launch module code.".format(args.launchfile))
return
if args.ros_args:
m.main()
sys.argv[0] = args.launchfile
parents = LaunchParameter.launch_parameter_list
if (len(parents) == 0):
return
elif (len(parents) == 1):
parser = parents[0]
else:
parser = LaunchParameter(description=parents[0].description, conflict_handler='resolve', parents=parents)
sys.argv.append('--usage')
parser.add_argument('--usage', default=False, action='help', help=argparse.SUPPRESS)
parser.parse_known_args()
return
launch_tree = m.main()
start(launch_obj=launch_tree, dry_run=args.dry_run)<|docstring|>Defines the core logic (= Python based dynamic launch files) of roslaunch2. It does NOT create any
launch modules or the like. This function is not meant to be called directly. See `start()` and
`start_async()` for more details.
:param command_line_args: List with command line arguments as strings
:return: None<|endoftext|>
|
3dcccf32da3cd5ee332fb433f5c82d7647ec2b6cab2dcddec80c192e4a684056
|
def too_often(detections: List[dict], often: int=4, time_window: int=60000) -> Tuple[(List[dict], List[dict])]:
"\n Analyse by too often classifier.\n\n Note: detections should be grouped by ``device_id``.\n See: ``group_by_device_id()``.\n The additional group by ``resolution`` is not required, but is not prohibited.\n So it may be work in the same grouped detections than for ``(near_)hot_pixel(2)`` classifiers.\n\n :param detections: list of detections\n :param often: classified threshold\n :param time_window: timestamp distance\n\n Classifier work similar to ``near_hot_pixel2`` classifier but in this we use ``timestamp`` object's key as group key.\n At first, te detections from the same original image frame (with the same ``timestamp`` value) are counted as one detection.\n At second, all other detections who distance is less than ``time_window`` are counted to ``artifact_too_often`` object's key.\n\n The distance measurement of keys is the Euclidean distance between ``timestamp`` and ``timestamp'`` in 1D space.\n\n Required keys:\n * ``timestamp``: for group by the same original image frame, and count of detections in near\n\n Keys will be add:\n * ``artifact_hot_pixel``: count of detections in near ``timestamp``.\n * ``classified``: set to ``artifact`` when detection will be classified as too_often artifact.\n\n Example::\n\n for by_device_id in group_by_device_id(detections):\n too_often(by_device_id)\n\n :return: tuple of (list of classified, list of no classified)\n "
grouped = group_by_timestamp_division(detections, 1)
if (len(grouped.keys()) == 1):
for group in grouped.values():
for d in group:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
else:
to_compare = itertools.combinations(grouped.keys(), 2)
for (key, key_prim) in to_compare:
for d in [*grouped.get(key), *grouped.get(key_prim)]:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
if (abs((key_prim - key_prim)) < time_window):
d[ARTIFACT_TOO_OFTEN] += 1
return classify_by_lambda(detections, (lambda x: (x.get(ARTIFACT_TOO_OFTEN) >= often)))
|
Analyse by too often classifier.
Note: detections should be grouped by ``device_id``.
See: ``group_by_device_id()``.
The additional group by ``resolution`` is not required, but is not prohibited.
So it may be work in the same grouped detections than for ``(near_)hot_pixel(2)`` classifiers.
:param detections: list of detections
:param often: classified threshold
:param time_window: timestamp distance
Classifier work similar to ``near_hot_pixel2`` classifier but in this we use ``timestamp`` object's key as group key.
At first, te detections from the same original image frame (with the same ``timestamp`` value) are counted as one detection.
At second, all other detections who distance is less than ``time_window`` are counted to ``artifact_too_often`` object's key.
The distance measurement of keys is the Euclidean distance between ``timestamp`` and ``timestamp'`` in 1D space.
Required keys:
* ``timestamp``: for group by the same original image frame, and count of detections in near
Keys will be add:
* ``artifact_hot_pixel``: count of detections in near ``timestamp``.
* ``classified``: set to ``artifact`` when detection will be classified as too_often artifact.
Example::
for by_device_id in group_by_device_id(detections):
too_often(by_device_id)
:return: tuple of (list of classified, list of no classified)
|
hit_analysis/classification/artifact/too_often.py
|
too_often
|
credo-science/credo-classify
| 0 |
python
|
def too_often(detections: List[dict], often: int=4, time_window: int=60000) -> Tuple[(List[dict], List[dict])]:
"\n Analyse by too often classifier.\n\n Note: detections should be grouped by ``device_id``.\n See: ``group_by_device_id()``.\n The additional group by ``resolution`` is not required, but is not prohibited.\n So it may be work in the same grouped detections than for ``(near_)hot_pixel(2)`` classifiers.\n\n :param detections: list of detections\n :param often: classified threshold\n :param time_window: timestamp distance\n\n Classifier work similar to ``near_hot_pixel2`` classifier but in this we use ``timestamp`` object's key as group key.\n At first, te detections from the same original image frame (with the same ``timestamp`` value) are counted as one detection.\n At second, all other detections who distance is less than ``time_window`` are counted to ``artifact_too_often`` object's key.\n\n The distance measurement of keys is the Euclidean distance between ``timestamp`` and ``timestamp'`` in 1D space.\n\n Required keys:\n * ``timestamp``: for group by the same original image frame, and count of detections in near\n\n Keys will be add:\n * ``artifact_hot_pixel``: count of detections in near ``timestamp``.\n * ``classified``: set to ``artifact`` when detection will be classified as too_often artifact.\n\n Example::\n\n for by_device_id in group_by_device_id(detections):\n too_often(by_device_id)\n\n :return: tuple of (list of classified, list of no classified)\n "
grouped = group_by_timestamp_division(detections, 1)
if (len(grouped.keys()) == 1):
for group in grouped.values():
for d in group:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
else:
to_compare = itertools.combinations(grouped.keys(), 2)
for (key, key_prim) in to_compare:
for d in [*grouped.get(key), *grouped.get(key_prim)]:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
if (abs((key_prim - key_prim)) < time_window):
d[ARTIFACT_TOO_OFTEN] += 1
return classify_by_lambda(detections, (lambda x: (x.get(ARTIFACT_TOO_OFTEN) >= often)))
|
def too_often(detections: List[dict], often: int=4, time_window: int=60000) -> Tuple[(List[dict], List[dict])]:
"\n Analyse by too often classifier.\n\n Note: detections should be grouped by ``device_id``.\n See: ``group_by_device_id()``.\n The additional group by ``resolution`` is not required, but is not prohibited.\n So it may be work in the same grouped detections than for ``(near_)hot_pixel(2)`` classifiers.\n\n :param detections: list of detections\n :param often: classified threshold\n :param time_window: timestamp distance\n\n Classifier work similar to ``near_hot_pixel2`` classifier but in this we use ``timestamp`` object's key as group key.\n At first, te detections from the same original image frame (with the same ``timestamp`` value) are counted as one detection.\n At second, all other detections who distance is less than ``time_window`` are counted to ``artifact_too_often`` object's key.\n\n The distance measurement of keys is the Euclidean distance between ``timestamp`` and ``timestamp'`` in 1D space.\n\n Required keys:\n * ``timestamp``: for group by the same original image frame, and count of detections in near\n\n Keys will be add:\n * ``artifact_hot_pixel``: count of detections in near ``timestamp``.\n * ``classified``: set to ``artifact`` when detection will be classified as too_often artifact.\n\n Example::\n\n for by_device_id in group_by_device_id(detections):\n too_often(by_device_id)\n\n :return: tuple of (list of classified, list of no classified)\n "
grouped = group_by_timestamp_division(detections, 1)
if (len(grouped.keys()) == 1):
for group in grouped.values():
for d in group:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
else:
to_compare = itertools.combinations(grouped.keys(), 2)
for (key, key_prim) in to_compare:
for d in [*grouped.get(key), *grouped.get(key_prim)]:
get_and_set(d, ARTIFACT_TOO_OFTEN, 0)
if (abs((key_prim - key_prim)) < time_window):
d[ARTIFACT_TOO_OFTEN] += 1
return classify_by_lambda(detections, (lambda x: (x.get(ARTIFACT_TOO_OFTEN) >= often)))<|docstring|>Analyse by too often classifier.
Note: detections should be grouped by ``device_id``.
See: ``group_by_device_id()``.
The additional group by ``resolution`` is not required, but is not prohibited.
So it may be work in the same grouped detections than for ``(near_)hot_pixel(2)`` classifiers.
:param detections: list of detections
:param often: classified threshold
:param time_window: timestamp distance
Classifier work similar to ``near_hot_pixel2`` classifier but in this we use ``timestamp`` object's key as group key.
At first, te detections from the same original image frame (with the same ``timestamp`` value) are counted as one detection.
At second, all other detections who distance is less than ``time_window`` are counted to ``artifact_too_often`` object's key.
The distance measurement of keys is the Euclidean distance between ``timestamp`` and ``timestamp'`` in 1D space.
Required keys:
* ``timestamp``: for group by the same original image frame, and count of detections in near
Keys will be add:
* ``artifact_hot_pixel``: count of detections in near ``timestamp``.
* ``classified``: set to ``artifact`` when detection will be classified as too_often artifact.
Example::
for by_device_id in group_by_device_id(detections):
too_often(by_device_id)
:return: tuple of (list of classified, list of no classified)<|endoftext|>
|
9c16c028e234b2a2184c029c0675291a0fae4384f685eb7b6bbd9a493f66deb2
|
def drawCharacters(data):
'\n Create a character map image from a Apple II character generator ROM.\n Each character is 8 bytes. Each byte is one line in the character,\n with each bit indicating an off/on pixel in that line, in reverse order.\n '
n = 256
arr = np.ndarray(((((n // 32) * 8) + 1), ((32 * 8) + 1)), dtype=np.uint8)
for i in range(0, n):
x = ((8 * (i % 32)) + 1)
y = ((8 * (i // 32)) + 1)
for charline in range(0, 8):
dataIndex = ((i * 8) + charline)
for bit in range(0, 8):
arr[((y + charline), ((x + 7) - bit))] = ((data[dataIndex] & (2 ** bit)) != 0)
plt.figure(figsize=(15, 5))
plt.imshow(arr, cmap='Greys_r', interpolation='nearest', aspect=(1 / 32), extent=[0, 32, 256, (- 1)])
plt.yticks([32, 96, 160, 224], ['$00', '$40', '$80', '$C0'], va='bottom', size=16)
plt.xticks([0, 8, 16, 24], ['$00', '$08', '$10', '$18'], ha='center', size=16)
plt.show()
|
Create a character map image from a Apple II character generator ROM.
Each character is 8 bytes. Each byte is one line in the character,
with each bit indicating an off/on pixel in that line, in reverse order.
|
convert_roms_arduino.py
|
drawCharacters
|
chris-torrence/arduino-at28c64
| 0 |
python
|
def drawCharacters(data):
'\n Create a character map image from a Apple II character generator ROM.\n Each character is 8 bytes. Each byte is one line in the character,\n with each bit indicating an off/on pixel in that line, in reverse order.\n '
n = 256
arr = np.ndarray(((((n // 32) * 8) + 1), ((32 * 8) + 1)), dtype=np.uint8)
for i in range(0, n):
x = ((8 * (i % 32)) + 1)
y = ((8 * (i // 32)) + 1)
for charline in range(0, 8):
dataIndex = ((i * 8) + charline)
for bit in range(0, 8):
arr[((y + charline), ((x + 7) - bit))] = ((data[dataIndex] & (2 ** bit)) != 0)
plt.figure(figsize=(15, 5))
plt.imshow(arr, cmap='Greys_r', interpolation='nearest', aspect=(1 / 32), extent=[0, 32, 256, (- 1)])
plt.yticks([32, 96, 160, 224], ['$00', '$40', '$80', '$C0'], va='bottom', size=16)
plt.xticks([0, 8, 16, 24], ['$00', '$08', '$10', '$18'], ha='center', size=16)
plt.show()
|
def drawCharacters(data):
'\n Create a character map image from a Apple II character generator ROM.\n Each character is 8 bytes. Each byte is one line in the character,\n with each bit indicating an off/on pixel in that line, in reverse order.\n '
n = 256
arr = np.ndarray(((((n // 32) * 8) + 1), ((32 * 8) + 1)), dtype=np.uint8)
for i in range(0, n):
x = ((8 * (i % 32)) + 1)
y = ((8 * (i // 32)) + 1)
for charline in range(0, 8):
dataIndex = ((i * 8) + charline)
for bit in range(0, 8):
arr[((y + charline), ((x + 7) - bit))] = ((data[dataIndex] & (2 ** bit)) != 0)
plt.figure(figsize=(15, 5))
plt.imshow(arr, cmap='Greys_r', interpolation='nearest', aspect=(1 / 32), extent=[0, 32, 256, (- 1)])
plt.yticks([32, 96, 160, 224], ['$00', '$40', '$80', '$C0'], va='bottom', size=16)
plt.xticks([0, 8, 16, 24], ['$00', '$08', '$10', '$18'], ha='center', size=16)
plt.show()<|docstring|>Create a character map image from a Apple II character generator ROM.
Each character is 8 bytes. Each byte is one line in the character,
with each bit indicating an off/on pixel in that line, in reverse order.<|endoftext|>
|
30993eb7673319fb5d64483775997745359e817ec33965f27a926208b84c79a8
|
def writeHeaderFile(inputFile, outputFile, data):
'\n Convert the data in an Apple II ROM\n into a form suitable for an Arduino header file.\n '
f = open(outputFile, 'w')
f.write('// Use PROGMEM to put into Arduino flash memory\n')
f.write((('// ' + inputFile) + ', created by applechargen.py\n'))
f.write(strftime('// %a, %d %b %Y %H:%M:%S\n', localtime()))
f.write('// #define __PROG_TYPES_COMPAT__\n')
f.write('// #include "avr/pgmspace.h"\n')
f.write((('#define ADDRESS_MAX ' + str(len(data))) + '\n'))
f.write('const unsigned char values[ADDRESS_MAX] PROGMEM = {\n')
for i in range(0, (len(data) // 16)):
for j in range(0, 16):
f.write((str(data[((i * 16) + j)]) + ','))
f.write('\n')
f.write('};\n')
f.close()
print(('Output written to: ' + outputFile))
|
Convert the data in an Apple II ROM
into a form suitable for an Arduino header file.
|
convert_roms_arduino.py
|
writeHeaderFile
|
chris-torrence/arduino-at28c64
| 0 |
python
|
def writeHeaderFile(inputFile, outputFile, data):
'\n Convert the data in an Apple II ROM\n into a form suitable for an Arduino header file.\n '
f = open(outputFile, 'w')
f.write('// Use PROGMEM to put into Arduino flash memory\n')
f.write((('// ' + inputFile) + ', created by applechargen.py\n'))
f.write(strftime('// %a, %d %b %Y %H:%M:%S\n', localtime()))
f.write('// #define __PROG_TYPES_COMPAT__\n')
f.write('// #include "avr/pgmspace.h"\n')
f.write((('#define ADDRESS_MAX ' + str(len(data))) + '\n'))
f.write('const unsigned char values[ADDRESS_MAX] PROGMEM = {\n')
for i in range(0, (len(data) // 16)):
for j in range(0, 16):
f.write((str(data[((i * 16) + j)]) + ','))
f.write('\n')
f.write('};\n')
f.close()
print(('Output written to: ' + outputFile))
|
def writeHeaderFile(inputFile, outputFile, data):
'\n Convert the data in an Apple II ROM\n into a form suitable for an Arduino header file.\n '
f = open(outputFile, 'w')
f.write('// Use PROGMEM to put into Arduino flash memory\n')
f.write((('// ' + inputFile) + ', created by applechargen.py\n'))
f.write(strftime('// %a, %d %b %Y %H:%M:%S\n', localtime()))
f.write('// #define __PROG_TYPES_COMPAT__\n')
f.write('// #include "avr/pgmspace.h"\n')
f.write((('#define ADDRESS_MAX ' + str(len(data))) + '\n'))
f.write('const unsigned char values[ADDRESS_MAX] PROGMEM = {\n')
for i in range(0, (len(data) // 16)):
for j in range(0, 16):
f.write((str(data[((i * 16) + j)]) + ','))
f.write('\n')
f.write('};\n')
f.close()
print(('Output written to: ' + outputFile))<|docstring|>Convert the data in an Apple II ROM
into a form suitable for an Arduino header file.<|endoftext|>
|
fcb67552427da2d7c48c5a26be8fd459bc57c0f1a7a29a490a00423e0595045b
|
def __init__(self, E, g=None, thresh=0.05, n=100):
'\n Args: \n E (WE class object): Word embeddings object.\n\n kwargs:\n g (np.array): Gender direction.\n thresh (float): The minimum indirect bias threshold, above \n which the association between a word and its\n neighbour is considered biased.\n n (int): Top `n` neighbours according to the cosine similarity.\n '
if (g is None):
g = get_g(E)
self.g = g
self.E = E
self.thresh = thresh
self.n = n
|
Args:
E (WE class object): Word embeddings object.
kwargs:
g (np.array): Gender direction.
thresh (float): The minimum indirect bias threshold, above
which the association between a word and its
neighbour is considered biased.
n (int): Top `n` neighbours according to the cosine similarity.
|
fee/metrics/proximity_bias.py
|
__init__
|
FEE-Fair-Embedding-Engine/FEE
| 8 |
python
|
def __init__(self, E, g=None, thresh=0.05, n=100):
'\n Args: \n E (WE class object): Word embeddings object.\n\n kwargs:\n g (np.array): Gender direction.\n thresh (float): The minimum indirect bias threshold, above \n which the association between a word and its\n neighbour is considered biased.\n n (int): Top `n` neighbours according to the cosine similarity.\n '
if (g is None):
g = get_g(E)
self.g = g
self.E = E
self.thresh = thresh
self.n = n
|
def __init__(self, E, g=None, thresh=0.05, n=100):
'\n Args: \n E (WE class object): Word embeddings object.\n\n kwargs:\n g (np.array): Gender direction.\n thresh (float): The minimum indirect bias threshold, above \n which the association between a word and its\n neighbour is considered biased.\n n (int): Top `n` neighbours according to the cosine similarity.\n '
if (g is None):
g = get_g(E)
self.g = g
self.E = E
self.thresh = thresh
self.n = n<|docstring|>Args:
E (WE class object): Word embeddings object.
kwargs:
g (np.array): Gender direction.
thresh (float): The minimum indirect bias threshold, above
which the association between a word and its
neighbour is considered biased.
n (int): Top `n` neighbours according to the cosine similarity.<|endoftext|>
|
4a47a1c4798686a84f07b1a02e0eebc4ff078a9cb84306701fcf5daf90df18dd
|
def compute(self, words):
'\n Args:\n words (str): A word or a list of worrds to compute the \n ProxBias for.\n Returns:\n The average proximity bias for the given list of `words`.\n Proximity bias is in simple terms the ratio of biased nieghbours\n according to indirect bias with respect to a word. \n\n '
if (not isinstance(words, list)):
words = [words]
pb = np.mean([_prox_bias(w, self.E, self.g, self.thresh, self.n) for w in words])
return pb
|
Args:
words (str): A word or a list of worrds to compute the
ProxBias for.
Returns:
The average proximity bias for the given list of `words`.
Proximity bias is in simple terms the ratio of biased nieghbours
according to indirect bias with respect to a word.
|
fee/metrics/proximity_bias.py
|
compute
|
FEE-Fair-Embedding-Engine/FEE
| 8 |
python
|
def compute(self, words):
'\n Args:\n words (str): A word or a list of worrds to compute the \n ProxBias for.\n Returns:\n The average proximity bias for the given list of `words`.\n Proximity bias is in simple terms the ratio of biased nieghbours\n according to indirect bias with respect to a word. \n\n '
if (not isinstance(words, list)):
words = [words]
pb = np.mean([_prox_bias(w, self.E, self.g, self.thresh, self.n) for w in words])
return pb
|
def compute(self, words):
'\n Args:\n words (str): A word or a list of worrds to compute the \n ProxBias for.\n Returns:\n The average proximity bias for the given list of `words`.\n Proximity bias is in simple terms the ratio of biased nieghbours\n according to indirect bias with respect to a word. \n\n '
if (not isinstance(words, list)):
words = [words]
pb = np.mean([_prox_bias(w, self.E, self.g, self.thresh, self.n) for w in words])
return pb<|docstring|>Args:
words (str): A word or a list of worrds to compute the
ProxBias for.
Returns:
The average proximity bias for the given list of `words`.
Proximity bias is in simple terms the ratio of biased nieghbours
according to indirect bias with respect to a word.<|endoftext|>
|
0a4725cab0e322034d8575c2ee76039cebe29903fd32adc7d129389fc58a53d5
|
def __init__(self, size: int=5, solver_type: (SolverType | str)=SolverType.MINIMAX, depth: int=1, extras: ((Sequence[Word] | Sequence[str]) | None)=None, lazy_eval: bool=True):
"Initialises a new instance of a Doddle object.\n\n Args:\n size (int, optional):\n The word length. Defaults to 5.\n\n solver_type (SolverType | str, optional):\n Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX.\n\n depth (int, optional):\n Depth of the search - how many moves to look ahead. Defaults to 1.\n\n extras (Sequence[Word] | Sequence[str] | None, optional):\n Any extra words to include in Doddle's dictionary. Defaults to None.\n\n lazy_eval (bool, optional):\n Whether to lazily score words as and when they are seen or to score every\n word against every word upfront. Lazy evaluation results in quicker\n initialisation but slower solves. The opposite is true when lazy initialisation\n is disabled. It is recommended to disable lazy evaluation if you plan to run\n Doddle multiple times within the same session for greater performance.\n Defaults to True.\n\n reporter (RunReporter | None, optional):\n A class that provided real-time reports (callback) as the solve progresses.\n Defaults to None.\n "
self.size = size
e = ([Word(extra) for extra in extras] if extras else [])
if isinstance(solver_type, str):
solve_type = SolverType.from_str(solver_type)
else:
solve_type = solver_type
(dictionary, scorer, histogram_builder, solver, simul_solver) = create_models(size, solver_type=solve_type, depth=depth, extras=e, lazy_eval=lazy_eval)
callback = NullRunReporter()
benchmarkReporter = NullBenchmarkReporter()
self.dictionary = dictionary
self.scorer = scorer
self.histogram_builder = histogram_builder
self.engine = Engine(dictionary, scorer, histogram_builder, solver, callback)
self.simul_engine = SimulEngine(dictionary, scorer, histogram_builder, simul_solver, callback)
self.benchmarker = Benchmarker(self.engine, benchmarkReporter)
self.simul_benchmarker = SimulBenchmarker(self.simul_engine, benchmarkReporter)
|
Initialises a new instance of a Doddle object.
Args:
size (int, optional):
The word length. Defaults to 5.
solver_type (SolverType | str, optional):
Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX.
depth (int, optional):
Depth of the search - how many moves to look ahead. Defaults to 1.
extras (Sequence[Word] | Sequence[str] | None, optional):
Any extra words to include in Doddle's dictionary. Defaults to None.
lazy_eval (bool, optional):
Whether to lazily score words as and when they are seen or to score every
word against every word upfront. Lazy evaluation results in quicker
initialisation but slower solves. The opposite is true when lazy initialisation
is disabled. It is recommended to disable lazy evaluation if you plan to run
Doddle multiple times within the same session for greater performance.
Defaults to True.
reporter (RunReporter | None, optional):
A class that provided real-time reports (callback) as the solve progresses.
Defaults to None.
|
src/doddle/facade.py
|
__init__
|
CatchemAL/Doddle
| 3 |
python
|
def __init__(self, size: int=5, solver_type: (SolverType | str)=SolverType.MINIMAX, depth: int=1, extras: ((Sequence[Word] | Sequence[str]) | None)=None, lazy_eval: bool=True):
"Initialises a new instance of a Doddle object.\n\n Args:\n size (int, optional):\n The word length. Defaults to 5.\n\n solver_type (SolverType | str, optional):\n Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX.\n\n depth (int, optional):\n Depth of the search - how many moves to look ahead. Defaults to 1.\n\n extras (Sequence[Word] | Sequence[str] | None, optional):\n Any extra words to include in Doddle's dictionary. Defaults to None.\n\n lazy_eval (bool, optional):\n Whether to lazily score words as and when they are seen or to score every\n word against every word upfront. Lazy evaluation results in quicker\n initialisation but slower solves. The opposite is true when lazy initialisation\n is disabled. It is recommended to disable lazy evaluation if you plan to run\n Doddle multiple times within the same session for greater performance.\n Defaults to True.\n\n reporter (RunReporter | None, optional):\n A class that provided real-time reports (callback) as the solve progresses.\n Defaults to None.\n "
self.size = size
e = ([Word(extra) for extra in extras] if extras else [])
if isinstance(solver_type, str):
solve_type = SolverType.from_str(solver_type)
else:
solve_type = solver_type
(dictionary, scorer, histogram_builder, solver, simul_solver) = create_models(size, solver_type=solve_type, depth=depth, extras=e, lazy_eval=lazy_eval)
callback = NullRunReporter()
benchmarkReporter = NullBenchmarkReporter()
self.dictionary = dictionary
self.scorer = scorer
self.histogram_builder = histogram_builder
self.engine = Engine(dictionary, scorer, histogram_builder, solver, callback)
self.simul_engine = SimulEngine(dictionary, scorer, histogram_builder, simul_solver, callback)
self.benchmarker = Benchmarker(self.engine, benchmarkReporter)
self.simul_benchmarker = SimulBenchmarker(self.simul_engine, benchmarkReporter)
|
def __init__(self, size: int=5, solver_type: (SolverType | str)=SolverType.MINIMAX, depth: int=1, extras: ((Sequence[Word] | Sequence[str]) | None)=None, lazy_eval: bool=True):
"Initialises a new instance of a Doddle object.\n\n Args:\n size (int, optional):\n The word length. Defaults to 5.\n\n solver_type (SolverType | str, optional):\n Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX.\n\n depth (int, optional):\n Depth of the search - how many moves to look ahead. Defaults to 1.\n\n extras (Sequence[Word] | Sequence[str] | None, optional):\n Any extra words to include in Doddle's dictionary. Defaults to None.\n\n lazy_eval (bool, optional):\n Whether to lazily score words as and when they are seen or to score every\n word against every word upfront. Lazy evaluation results in quicker\n initialisation but slower solves. The opposite is true when lazy initialisation\n is disabled. It is recommended to disable lazy evaluation if you plan to run\n Doddle multiple times within the same session for greater performance.\n Defaults to True.\n\n reporter (RunReporter | None, optional):\n A class that provided real-time reports (callback) as the solve progresses.\n Defaults to None.\n "
self.size = size
e = ([Word(extra) for extra in extras] if extras else [])
if isinstance(solver_type, str):
solve_type = SolverType.from_str(solver_type)
else:
solve_type = solver_type
(dictionary, scorer, histogram_builder, solver, simul_solver) = create_models(size, solver_type=solve_type, depth=depth, extras=e, lazy_eval=lazy_eval)
callback = NullRunReporter()
benchmarkReporter = NullBenchmarkReporter()
self.dictionary = dictionary
self.scorer = scorer
self.histogram_builder = histogram_builder
self.engine = Engine(dictionary, scorer, histogram_builder, solver, callback)
self.simul_engine = SimulEngine(dictionary, scorer, histogram_builder, simul_solver, callback)
self.benchmarker = Benchmarker(self.engine, benchmarkReporter)
self.simul_benchmarker = SimulBenchmarker(self.simul_engine, benchmarkReporter)<|docstring|>Initialises a new instance of a Doddle object.
Args:
size (int, optional):
The word length. Defaults to 5.
solver_type (SolverType | str, optional):
Enum stating the solver heuristic to use. Defaults to SolverType.MINIMAX.
depth (int, optional):
Depth of the search - how many moves to look ahead. Defaults to 1.
extras (Sequence[Word] | Sequence[str] | None, optional):
Any extra words to include in Doddle's dictionary. Defaults to None.
lazy_eval (bool, optional):
Whether to lazily score words as and when they are seen or to score every
word against every word upfront. Lazy evaluation results in quicker
initialisation but slower solves. The opposite is true when lazy initialisation
is disabled. It is recommended to disable lazy evaluation if you plan to run
Doddle multiple times within the same session for greater performance.
Defaults to True.
reporter (RunReporter | None, optional):
A class that provided real-time reports (callback) as the solve progresses.
Defaults to None.<|endoftext|>
|
2bbb17f7f5675a48c06d5acf742abfd567f6e43722cc52e34a1c148d901dba08
|
def __call__(self, answer: (WordType | Sequence[WordType]), guess: ((WordType | Sequence[WordType]) | None)=None) -> Scoreboard:
'Callable that runs a Doddle game and returns the resulting scoreboard.\n\n Args:\n answer (WordType | Sequence[WordType]):\n A word intended to be the answer. Alternatively, a sequence of words\n if you wish to play Doddle in simultaneous mode.\n\n guess (WordType | Sequence[WordType] | None, optional):\n An optional word to be played as the opening guess. You can pass a list\n of guesses if you want to play several openers. Defaults to None.\n\n Raises:\n ValueError:\n If the provided words are invalid.\n\n Returns:\n Scoreboard:\n A scoreboard showing how the game played out.\n '
solns = self.__to_word_list(answer, 'answer')
guesses = (self.__to_word_list(guess, 'guess') if guess else [])
size = len(solns[0])
missized_solns = [s.value for s in solns if (len(s) != self.size)]
if missized_solns:
message = f"All answers must be of length {self.size}: ({', '.join(missized_solns)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'''e.g.
doddle = Doddle(size={size})'''
raise ValueError(message)
missized_guesses = [g.value for g in guesses if (len(g) != self.size)]
if missized_guesses:
message = f"All guesses must be of size {self.size}: ({', '.join(missized_guesses)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'''e.g.
doddle = Doddle(size={len(missized_guesses[0])})'''
raise ValueError(message)
score_matrix = self.engine.histogram_builder.score_matrix
unknown_solns = [s.value for s in solns if (s not in score_matrix.potential_solns)]
if unknown_solns:
missing = ', '.join(unknown_solns)
missing_extras = "', '".join(unknown_solns)
message = f'''The following answers are not known to Doddle: {missing}
'''
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{answer}'])"
raise ValueError(message)
unknown_words = [g.value for g in guesses if (g not in score_matrix.all_words)]
if unknown_words:
missing = ', '.join(unknown_words)
missing_extras = "', '".join(unknown_words)
message = f'''The following guesses are not known to Doddle: {missing}
'''
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{missing_extras}'])"
raise ValueError(message)
if (len(solns) == 1):
game = self.engine.run(solns[0], guesses)
return game.scoreboard
simul_game = self.simul_engine.run(solns, guesses)
return simul_game.scoreboard
|
Callable that runs a Doddle game and returns the resulting scoreboard.
Args:
answer (WordType | Sequence[WordType]):
A word intended to be the answer. Alternatively, a sequence of words
if you wish to play Doddle in simultaneous mode.
guess (WordType | Sequence[WordType] | None, optional):
An optional word to be played as the opening guess. You can pass a list
of guesses if you want to play several openers. Defaults to None.
Raises:
ValueError:
If the provided words are invalid.
Returns:
Scoreboard:
A scoreboard showing how the game played out.
|
src/doddle/facade.py
|
__call__
|
CatchemAL/Doddle
| 3 |
python
|
def __call__(self, answer: (WordType | Sequence[WordType]), guess: ((WordType | Sequence[WordType]) | None)=None) -> Scoreboard:
'Callable that runs a Doddle game and returns the resulting scoreboard.\n\n Args:\n answer (WordType | Sequence[WordType]):\n A word intended to be the answer. Alternatively, a sequence of words\n if you wish to play Doddle in simultaneous mode.\n\n guess (WordType | Sequence[WordType] | None, optional):\n An optional word to be played as the opening guess. You can pass a list\n of guesses if you want to play several openers. Defaults to None.\n\n Raises:\n ValueError:\n If the provided words are invalid.\n\n Returns:\n Scoreboard:\n A scoreboard showing how the game played out.\n '
solns = self.__to_word_list(answer, 'answer')
guesses = (self.__to_word_list(guess, 'guess') if guess else [])
size = len(solns[0])
missized_solns = [s.value for s in solns if (len(s) != self.size)]
if missized_solns:
message = f"All answers must be of length {self.size}: ({', '.join(missized_solns)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'e.g.
doddle = Doddle(size={size})'
raise ValueError(message)
missized_guesses = [g.value for g in guesses if (len(g) != self.size)]
if missized_guesses:
message = f"All guesses must be of size {self.size}: ({', '.join(missized_guesses)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'e.g.
doddle = Doddle(size={len(missized_guesses[0])})'
raise ValueError(message)
score_matrix = self.engine.histogram_builder.score_matrix
unknown_solns = [s.value for s in solns if (s not in score_matrix.potential_solns)]
if unknown_solns:
missing = ', '.join(unknown_solns)
missing_extras = "', '".join(unknown_solns)
message = f'The following answers are not known to Doddle: {missing}
'
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{answer}'])"
raise ValueError(message)
unknown_words = [g.value for g in guesses if (g not in score_matrix.all_words)]
if unknown_words:
missing = ', '.join(unknown_words)
missing_extras = "', '".join(unknown_words)
message = f'The following guesses are not known to Doddle: {missing}
'
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{missing_extras}'])"
raise ValueError(message)
if (len(solns) == 1):
game = self.engine.run(solns[0], guesses)
return game.scoreboard
simul_game = self.simul_engine.run(solns, guesses)
return simul_game.scoreboard
|
def __call__(self, answer: (WordType | Sequence[WordType]), guess: ((WordType | Sequence[WordType]) | None)=None) -> Scoreboard:
'Callable that runs a Doddle game and returns the resulting scoreboard.\n\n Args:\n answer (WordType | Sequence[WordType]):\n A word intended to be the answer. Alternatively, a sequence of words\n if you wish to play Doddle in simultaneous mode.\n\n guess (WordType | Sequence[WordType] | None, optional):\n An optional word to be played as the opening guess. You can pass a list\n of guesses if you want to play several openers. Defaults to None.\n\n Raises:\n ValueError:\n If the provided words are invalid.\n\n Returns:\n Scoreboard:\n A scoreboard showing how the game played out.\n '
solns = self.__to_word_list(answer, 'answer')
guesses = (self.__to_word_list(guess, 'guess') if guess else [])
size = len(solns[0])
missized_solns = [s.value for s in solns if (len(s) != self.size)]
if missized_solns:
message = f"All answers must be of length {self.size}: ({', '.join(missized_solns)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'e.g.
doddle = Doddle(size={size})'
raise ValueError(message)
missized_guesses = [g.value for g in guesses if (len(g) != self.size)]
if missized_guesses:
message = f"All guesses must be of size {self.size}: ({', '.join(missized_guesses)}). "
message += 'To play Doddle with custom word lengths, please use the size argument when '
message += 'instantiating the Doddle object.\n\n'
message += f'e.g.
doddle = Doddle(size={len(missized_guesses[0])})'
raise ValueError(message)
score_matrix = self.engine.histogram_builder.score_matrix
unknown_solns = [s.value for s in solns if (s not in score_matrix.potential_solns)]
if unknown_solns:
missing = ', '.join(unknown_solns)
missing_extras = "', '".join(unknown_solns)
message = f'The following answers are not known to Doddle: {missing}
'
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{answer}'])"
raise ValueError(message)
unknown_words = [g.value for g in guesses if (g not in score_matrix.all_words)]
if unknown_words:
missing = ', '.join(unknown_words)
missing_extras = "', '".join(unknown_words)
message = f'The following guesses are not known to Doddle: {missing}
'
message += 'To play Doddle with custom words, please use the extras argument when '
message += 'instantiating the Doddle object.\n\n'
message += f"e.g. doddle = Doddle(size={size}, ..., extras=['{missing_extras}'])"
raise ValueError(message)
if (len(solns) == 1):
game = self.engine.run(solns[0], guesses)
return game.scoreboard
simul_game = self.simul_engine.run(solns, guesses)
return simul_game.scoreboard<|docstring|>Callable that runs a Doddle game and returns the resulting scoreboard.
Args:
answer (WordType | Sequence[WordType]):
A word intended to be the answer. Alternatively, a sequence of words
if you wish to play Doddle in simultaneous mode.
guess (WordType | Sequence[WordType] | None, optional):
An optional word to be played as the opening guess. You can pass a list
of guesses if you want to play several openers. Defaults to None.
Raises:
ValueError:
If the provided words are invalid.
Returns:
Scoreboard:
A scoreboard showing how the game played out.<|endoftext|>
|
111cb7bc92babe3aa313510ab9641fe87325b96f146bfa3d65aa4eef40136151
|
def formatter(_data, _meta):
'\n 将数据转换成用于提交和评分的格式。\n :param _data:\n 参考数据集中 [x]-[set]-with-answer.json 文件的格式,\n 用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,\n 然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。\n :param _meta: 参赛队伍的关键信息,用来呈现在排行榜。\n :return: 符合用于提交和评分的格式的数据,称之为"答卷"。\n '
_sheet = {'meta': {'team_name': _meta['team_name'], 'institution': _meta['institution'], 'email': _meta['email']}}
_methods = {'1': (lambda _item: int(_item['judge1'])), '2': (lambda _item: int(_item['judge2'])), '3': (lambda _item: [int(_item['judge1']), int(_item['judge2'])])}
for _item in _data:
(_task_id, _set_type, _item_id) = _item['qID'].split('-')
_key = f'subtask{_task_id}-{_set_type}'
if (((_set_type == 'dev') or (_set_type == 'val') or (_set_type == 'test')) and (_key not in _sheet)):
_sheet[_key] = {}
_sheet[_key][_item_id] = _methods[_task_id](_item)
return json.dumps(_sheet)
|
将数据转换成用于提交和评分的格式。
:param _data:
参考数据集中 [x]-[set]-with-answer.json 文件的格式,
用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,
然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。
:param _meta: 参赛队伍的关键信息,用来呈现在排行榜。
:return: 符合用于提交和评分的格式的数据,称之为"答卷"。
|
ref/evaluate.py
|
formatter
|
2030NLP/SpatialCognEval2021
| 35 |
python
|
def formatter(_data, _meta):
'\n 将数据转换成用于提交和评分的格式。\n :param _data:\n 参考数据集中 [x]-[set]-with-answer.json 文件的格式,\n 用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,\n 然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。\n :param _meta: 参赛队伍的关键信息,用来呈现在排行榜。\n :return: 符合用于提交和评分的格式的数据,称之为"答卷"。\n '
_sheet = {'meta': {'team_name': _meta['team_name'], 'institution': _meta['institution'], 'email': _meta['email']}}
_methods = {'1': (lambda _item: int(_item['judge1'])), '2': (lambda _item: int(_item['judge2'])), '3': (lambda _item: [int(_item['judge1']), int(_item['judge2'])])}
for _item in _data:
(_task_id, _set_type, _item_id) = _item['qID'].split('-')
_key = f'subtask{_task_id}-{_set_type}'
if (((_set_type == 'dev') or (_set_type == 'val') or (_set_type == 'test')) and (_key not in _sheet)):
_sheet[_key] = {}
_sheet[_key][_item_id] = _methods[_task_id](_item)
return json.dumps(_sheet)
|
def formatter(_data, _meta):
'\n 将数据转换成用于提交和评分的格式。\n :param _data:\n 参考数据集中 [x]-[set]-with-answer.json 文件的格式,\n 用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,\n 然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。\n :param _meta: 参赛队伍的关键信息,用来呈现在排行榜。\n :return: 符合用于提交和评分的格式的数据,称之为"答卷"。\n '
_sheet = {'meta': {'team_name': _meta['team_name'], 'institution': _meta['institution'], 'email': _meta['email']}}
_methods = {'1': (lambda _item: int(_item['judge1'])), '2': (lambda _item: int(_item['judge2'])), '3': (lambda _item: [int(_item['judge1']), int(_item['judge2'])])}
for _item in _data:
(_task_id, _set_type, _item_id) = _item['qID'].split('-')
_key = f'subtask{_task_id}-{_set_type}'
if (((_set_type == 'dev') or (_set_type == 'val') or (_set_type == 'test')) and (_key not in _sheet)):
_sheet[_key] = {}
_sheet[_key][_item_id] = _methods[_task_id](_item)
return json.dumps(_sheet)<|docstring|>将数据转换成用于提交和评分的格式。
:param _data:
参考数据集中 [x]-[set]-with-answer.json 文件的格式,
用参赛系统给 [x]-[dev/test].json 中读出的对象列表添加 judge1 或/和 judge2 字段,
然后直接将添加了答案字段的三个任务dev集(或test集)中的所有数据全都放在一个 List 里,构成此参数。
:param _meta: 参赛队伍的关键信息,用来呈现在排行榜。
:return: 符合用于提交和评分的格式的数据,称之为"答卷"。<|endoftext|>
|
54438dd392b5c72ec30e1b7dcc2d9cfc4159a0d8062fc260b11de57d0db9ca9a
|
def subtask1_eval(_answers, _ref):
'\n 子任务1的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map = {'11': 'TP', '00': 'TN', '10': 'FN', '01': 'FP'}
_st = {'TP': 0, 'TN': 0, 'FN': 0, 'FP': 0}
for (_k, _v) in _ref.items():
_ga = int(_v)
_aa = (int(_answers[_k]) if (_k in _answers) else 0)
_st[_map[f'{_ga}{_aa}']] += 1
_st['Accuracy'] = ((_st['TP'] + _st['TN']) / (((_st['TP'] + _st['FP']) + _st['FN']) + _st['TN']))
return _st
|
子任务1的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
|
ref/evaluate.py
|
subtask1_eval
|
2030NLP/SpatialCognEval2021
| 35 |
python
|
def subtask1_eval(_answers, _ref):
'\n 子任务1的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map = {'11': 'TP', '00': 'TN', '10': 'FN', '01': 'FP'}
_st = {'TP': 0, 'TN': 0, 'FN': 0, 'FP': 0}
for (_k, _v) in _ref.items():
_ga = int(_v)
_aa = (int(_answers[_k]) if (_k in _answers) else 0)
_st[_map[f'{_ga}{_aa}']] += 1
_st['Accuracy'] = ((_st['TP'] + _st['TN']) / (((_st['TP'] + _st['FP']) + _st['FN']) + _st['TN']))
return _st
|
def subtask1_eval(_answers, _ref):
'\n 子任务1的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map = {'11': 'TP', '00': 'TN', '10': 'FN', '01': 'FP'}
_st = {'TP': 0, 'TN': 0, 'FN': 0, 'FP': 0}
for (_k, _v) in _ref.items():
_ga = int(_v)
_aa = (int(_answers[_k]) if (_k in _answers) else 0)
_st[_map[f'{_ga}{_aa}']] += 1
_st['Accuracy'] = ((_st['TP'] + _st['TN']) / (((_st['TP'] + _st['FP']) + _st['FN']) + _st['TN']))
return _st<|docstring|>子任务1的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。<|endoftext|>
|
522aaec9937519a330de5f93fd783438d52bf11594df54c2a173ab8178f33ff1
|
def subtask2_eval(_answers, _ref):
'\n 子任务2的评分函数,与子任务1算法一致。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
return subtask1_eval(_answers, _ref)
|
子任务2的评分函数,与子任务1算法一致。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
|
ref/evaluate.py
|
subtask2_eval
|
2030NLP/SpatialCognEval2021
| 35 |
python
|
def subtask2_eval(_answers, _ref):
'\n 子任务2的评分函数,与子任务1算法一致。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
return subtask1_eval(_answers, _ref)
|
def subtask2_eval(_answers, _ref):
'\n 子任务2的评分函数,与子任务1算法一致。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
return subtask1_eval(_answers, _ref)<|docstring|>子任务2的评分函数,与子任务1算法一致。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。<|endoftext|>
|
4b067b5dc4b76022a9703359edbc0699047c92584c36b6ffac327743de6d6b60
|
def subtask3_eval(_answers, _ref):
'\n 子任务3的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map_1 = {'11': 'TP_1', '00': 'TN_1', '10': 'FN_1', '01': 'FP_1'}
_map_2 = {'11': 'TP_2', '00': 'TN_2'}
_st = {'TP_1': 0, 'TN_1': 0, 'FN_1': 0, 'FP_1': 0, 'TP_2': 0, 'TN_2': 0}
for (_k, _v) in _ref.items():
_ga = int(_v[0])
_aa = (int(_answers[_k][0]) if (_k in _answers) else 0)
_st[_map_1[f'{_ga}{_aa}']] += 1
_gb = int(_v[1])
_ab = (int(_answers[_k][1]) if (_k in _answers) else 0)
if ((_aa == 0 == _ga) and (_gb == _ab)):
_st[_map_2[f'{_gb}{_ab}']] += 1
_st['Precision'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FN_1'])) if ((_st['TN_1'] + _st['FN_1']) != 0) else 0)
_st['Recall'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FP_1'])) if ((_st['TN_1'] + _st['FP_1']) != 0) else 0)
_st['F1'] = ((((2 * _st['Precision']) * _st['Recall']) / (_st['Precision'] + _st['Recall'])) if ((_st['Precision'] + _st['Recall']) != 0) else 0)
return _st
|
子任务3的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。
|
ref/evaluate.py
|
subtask3_eval
|
2030NLP/SpatialCognEval2021
| 35 |
python
|
def subtask3_eval(_answers, _ref):
'\n 子任务3的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map_1 = {'11': 'TP_1', '00': 'TN_1', '10': 'FN_1', '01': 'FP_1'}
_map_2 = {'11': 'TP_2', '00': 'TN_2'}
_st = {'TP_1': 0, 'TN_1': 0, 'FN_1': 0, 'FP_1': 0, 'TP_2': 0, 'TN_2': 0}
for (_k, _v) in _ref.items():
_ga = int(_v[0])
_aa = (int(_answers[_k][0]) if (_k in _answers) else 0)
_st[_map_1[f'{_ga}{_aa}']] += 1
_gb = int(_v[1])
_ab = (int(_answers[_k][1]) if (_k in _answers) else 0)
if ((_aa == 0 == _ga) and (_gb == _ab)):
_st[_map_2[f'{_gb}{_ab}']] += 1
_st['Precision'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FN_1'])) if ((_st['TN_1'] + _st['FN_1']) != 0) else 0)
_st['Recall'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FP_1'])) if ((_st['TN_1'] + _st['FP_1']) != 0) else 0)
_st['F1'] = ((((2 * _st['Precision']) * _st['Recall']) / (_st['Precision'] + _st['Recall'])) if ((_st['Precision'] + _st['Recall']) != 0) else 0)
return _st
|
def subtask3_eval(_answers, _ref):
'\n 子任务3的评分函数。\n :param _answers: 答卷答案。\n :param _ref: 参考答案。\n :return: 统计数据对象。\n '
_map_1 = {'11': 'TP_1', '00': 'TN_1', '10': 'FN_1', '01': 'FP_1'}
_map_2 = {'11': 'TP_2', '00': 'TN_2'}
_st = {'TP_1': 0, 'TN_1': 0, 'FN_1': 0, 'FP_1': 0, 'TP_2': 0, 'TN_2': 0}
for (_k, _v) in _ref.items():
_ga = int(_v[0])
_aa = (int(_answers[_k][0]) if (_k in _answers) else 0)
_st[_map_1[f'{_ga}{_aa}']] += 1
_gb = int(_v[1])
_ab = (int(_answers[_k][1]) if (_k in _answers) else 0)
if ((_aa == 0 == _ga) and (_gb == _ab)):
_st[_map_2[f'{_gb}{_ab}']] += 1
_st['Precision'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FN_1'])) if ((_st['TN_1'] + _st['FN_1']) != 0) else 0)
_st['Recall'] = (((_st['TP_2'] + _st['TN_2']) / (_st['TN_1'] + _st['FP_1'])) if ((_st['TN_1'] + _st['FP_1']) != 0) else 0)
_st['F1'] = ((((2 * _st['Precision']) * _st['Recall']) / (_st['Precision'] + _st['Recall'])) if ((_st['Precision'] + _st['Recall']) != 0) else 0)
return _st<|docstring|>子任务3的评分函数。
:param _answers: 答卷答案。
:param _ref: 参考答案。
:return: 统计数据对象。<|endoftext|>
|
83d07c93311ddd0a208e5aabf7672ae902aa78af95233658c4a3193a9a2c7179
|
def chunk_sgml_file(sgml, chunk_on='PATDOC'):
' Iterate over patents in SGML-file.\n\n Parameters\n ----------\n sgml : str, file_like\n Path or file-handle to SGML-file or already read SGML-contents.\n chunk_on : str\n String to chunk file on. Defaults to "PATDOC".\n\n Yields\n ------\n chunk : str\n A single patent.\n '
if isinstance(sgml, str):
try:
with open(sgml) as f:
contents = f.read()
except IOError:
contents = sgml
elif hasattr(sgml, 'read'):
contents = sgml.read()
else:
raise ValueError('invalid sgml')
start_tag = '<{}'.format(chunk_on)
end_tag = '</{}'.format(chunk_on)
lines = contents.splitlines()
start_i = 0
for (i, line) in enumerate(lines):
if line.startswith(start_tag):
start_i = i
elif line.startswith(end_tag):
(yield '\n'.join(lines[start_i:(i + 1)]))
|
Iterate over patents in SGML-file.
Parameters
----------
sgml : str, file_like
Path or file-handle to SGML-file or already read SGML-contents.
chunk_on : str
String to chunk file on. Defaults to "PATDOC".
Yields
------
chunk : str
A single patent.
|
uspto_tools/parse/sgml.py
|
chunk_sgml_file
|
clicumu/uspto-tools
| 0 |
python
|
def chunk_sgml_file(sgml, chunk_on='PATDOC'):
' Iterate over patents in SGML-file.\n\n Parameters\n ----------\n sgml : str, file_like\n Path or file-handle to SGML-file or already read SGML-contents.\n chunk_on : str\n String to chunk file on. Defaults to "PATDOC".\n\n Yields\n ------\n chunk : str\n A single patent.\n '
if isinstance(sgml, str):
try:
with open(sgml) as f:
contents = f.read()
except IOError:
contents = sgml
elif hasattr(sgml, 'read'):
contents = sgml.read()
else:
raise ValueError('invalid sgml')
start_tag = '<{}'.format(chunk_on)
end_tag = '</{}'.format(chunk_on)
lines = contents.splitlines()
start_i = 0
for (i, line) in enumerate(lines):
if line.startswith(start_tag):
start_i = i
elif line.startswith(end_tag):
(yield '\n'.join(lines[start_i:(i + 1)]))
|
def chunk_sgml_file(sgml, chunk_on='PATDOC'):
' Iterate over patents in SGML-file.\n\n Parameters\n ----------\n sgml : str, file_like\n Path or file-handle to SGML-file or already read SGML-contents.\n chunk_on : str\n String to chunk file on. Defaults to "PATDOC".\n\n Yields\n ------\n chunk : str\n A single patent.\n '
if isinstance(sgml, str):
try:
with open(sgml) as f:
contents = f.read()
except IOError:
contents = sgml
elif hasattr(sgml, 'read'):
contents = sgml.read()
else:
raise ValueError('invalid sgml')
start_tag = '<{}'.format(chunk_on)
end_tag = '</{}'.format(chunk_on)
lines = contents.splitlines()
start_i = 0
for (i, line) in enumerate(lines):
if line.startswith(start_tag):
start_i = i
elif line.startswith(end_tag):
(yield '\n'.join(lines[start_i:(i + 1)]))<|docstring|>Iterate over patents in SGML-file.
Parameters
----------
sgml : str, file_like
Path or file-handle to SGML-file or already read SGML-contents.
chunk_on : str
String to chunk file on. Defaults to "PATDOC".
Yields
------
chunk : str
A single patent.<|endoftext|>
|
c715ea8da3f9072f61a04b8bc15dfa31e537a69c6727801eace87418b5ee889f
|
def parse_sgml_chunk(chunk):
' Parse SGML chunk into patent.\n\n Parameters\n ----------\n chunk : str\n Single USPTO patent in SGML format.\n\n Returns\n -------\n USPatent\n Parsed patent.\n '
soup = BeautifulSoup(chunk, 'lxml')
parsed = {'claims': [c.text.strip() for c in soup.find('cl').find_all('clm')], 'inventors': [sgml_to_inventor(tag) for tag in soup.find_all('b721')], 'patent_classification': [sgml_to_classification(tag) for tag in soup.find_all('b582')], 'us_references': [sgml_to_reference(tag) for tag in soup.find_all('b561')], 'application_number': soup.find('b210').text.replace(' ', ''), 'application_date': soup.find('b220').text.replace(' ', ''), 'patent_number': soup.find('b110').text}
check_if_none = {'brief_summary': soup.find('brfsum'), 'primary_examiner': soup.find('b746'), 'title': soup.find('b540'), 'kind': soup.find('b130'), 'description': soup.find('detdesc'), 'abstract': soup.find('sdoab')}
checked_for_none = {name: safe_text(val) for (name, val) in check_if_none.items()}
parsed.update(checked_for_none)
return USPatent(**parsed)
|
Parse SGML chunk into patent.
Parameters
----------
chunk : str
Single USPTO patent in SGML format.
Returns
-------
USPatent
Parsed patent.
|
uspto_tools/parse/sgml.py
|
parse_sgml_chunk
|
clicumu/uspto-tools
| 0 |
python
|
def parse_sgml_chunk(chunk):
' Parse SGML chunk into patent.\n\n Parameters\n ----------\n chunk : str\n Single USPTO patent in SGML format.\n\n Returns\n -------\n USPatent\n Parsed patent.\n '
soup = BeautifulSoup(chunk, 'lxml')
parsed = {'claims': [c.text.strip() for c in soup.find('cl').find_all('clm')], 'inventors': [sgml_to_inventor(tag) for tag in soup.find_all('b721')], 'patent_classification': [sgml_to_classification(tag) for tag in soup.find_all('b582')], 'us_references': [sgml_to_reference(tag) for tag in soup.find_all('b561')], 'application_number': soup.find('b210').text.replace(' ', ), 'application_date': soup.find('b220').text.replace(' ', ), 'patent_number': soup.find('b110').text}
check_if_none = {'brief_summary': soup.find('brfsum'), 'primary_examiner': soup.find('b746'), 'title': soup.find('b540'), 'kind': soup.find('b130'), 'description': soup.find('detdesc'), 'abstract': soup.find('sdoab')}
checked_for_none = {name: safe_text(val) for (name, val) in check_if_none.items()}
parsed.update(checked_for_none)
return USPatent(**parsed)
|
def parse_sgml_chunk(chunk):
' Parse SGML chunk into patent.\n\n Parameters\n ----------\n chunk : str\n Single USPTO patent in SGML format.\n\n Returns\n -------\n USPatent\n Parsed patent.\n '
soup = BeautifulSoup(chunk, 'lxml')
parsed = {'claims': [c.text.strip() for c in soup.find('cl').find_all('clm')], 'inventors': [sgml_to_inventor(tag) for tag in soup.find_all('b721')], 'patent_classification': [sgml_to_classification(tag) for tag in soup.find_all('b582')], 'us_references': [sgml_to_reference(tag) for tag in soup.find_all('b561')], 'application_number': soup.find('b210').text.replace(' ', ), 'application_date': soup.find('b220').text.replace(' ', ), 'patent_number': soup.find('b110').text}
check_if_none = {'brief_summary': soup.find('brfsum'), 'primary_examiner': soup.find('b746'), 'title': soup.find('b540'), 'kind': soup.find('b130'), 'description': soup.find('detdesc'), 'abstract': soup.find('sdoab')}
checked_for_none = {name: safe_text(val) for (name, val) in check_if_none.items()}
parsed.update(checked_for_none)
return USPatent(**parsed)<|docstring|>Parse SGML chunk into patent.
Parameters
----------
chunk : str
Single USPTO patent in SGML format.
Returns
-------
USPatent
Parsed patent.<|endoftext|>
|
ebda576e7ce86eec32fd8201c0de65a8f3f1e241a16941c79a5fa792cc161f23
|
def sgml_to_inventor(tag):
' Parse inventor tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Inventor tag.\n\n Returns\n -------\n Inventor\n '
nam_tag = tag.find('nam')
parsed = {'name': '{} {}'.format(safe_text(nam_tag.find('fnm')), safe_text(nam_tag.find('snm'))), 'city': safe_text(tag.find('city')), 'country': safe_text(tag.find('ctry'))}
return Inventor(**parsed)
|
Parse inventor tag.
Parameters
----------
tag : bs4.element.Tag
Inventor tag.
Returns
-------
Inventor
|
uspto_tools/parse/sgml.py
|
sgml_to_inventor
|
clicumu/uspto-tools
| 0 |
python
|
def sgml_to_inventor(tag):
' Parse inventor tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Inventor tag.\n\n Returns\n -------\n Inventor\n '
nam_tag = tag.find('nam')
parsed = {'name': '{} {}'.format(safe_text(nam_tag.find('fnm')), safe_text(nam_tag.find('snm'))), 'city': safe_text(tag.find('city')), 'country': safe_text(tag.find('ctry'))}
return Inventor(**parsed)
|
def sgml_to_inventor(tag):
' Parse inventor tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Inventor tag.\n\n Returns\n -------\n Inventor\n '
nam_tag = tag.find('nam')
parsed = {'name': '{} {}'.format(safe_text(nam_tag.find('fnm')), safe_text(nam_tag.find('snm'))), 'city': safe_text(tag.find('city')), 'country': safe_text(tag.find('ctry'))}
return Inventor(**parsed)<|docstring|>Parse inventor tag.
Parameters
----------
tag : bs4.element.Tag
Inventor tag.
Returns
-------
Inventor<|endoftext|>
|
e2a5f44bc737a282fc0041ffa0d0b90ce70dcb8448891421f1296830e06e3913
|
def sgml_to_classification(tag):
' Parse classification tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Classification tag.\n\n Returns\n -------\n PatentClassification\n '
return PatentClassification(us_classification=tag.text.strip())
|
Parse classification tag.
Parameters
----------
tag : bs4.element.Tag
Classification tag.
Returns
-------
PatentClassification
|
uspto_tools/parse/sgml.py
|
sgml_to_classification
|
clicumu/uspto-tools
| 0 |
python
|
def sgml_to_classification(tag):
' Parse classification tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Classification tag.\n\n Returns\n -------\n PatentClassification\n '
return PatentClassification(us_classification=tag.text.strip())
|
def sgml_to_classification(tag):
' Parse classification tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Classification tag.\n\n Returns\n -------\n PatentClassification\n '
return PatentClassification(us_classification=tag.text.strip())<|docstring|>Parse classification tag.
Parameters
----------
tag : bs4.element.Tag
Classification tag.
Returns
-------
PatentClassification<|endoftext|>
|
961ca6edcb61bef9666d4301587bd38d6f591f4e96c61851e992a2da68a0df91
|
def sgml_to_reference(tag):
' Parse reference tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Reference tag.\n\n Returns\n -------\n USReference\n '
p_num = tag.find('dnum').find('pdat').text
p_date = tag.find('date').find('pdat').text
name = '; '.join((t.text.strip() for t in tag.find_all('party-us')))
return USReference(patent_number=p_num, issue_date=p_date, patentee_name=name)
|
Parse reference tag.
Parameters
----------
tag : bs4.element.Tag
Reference tag.
Returns
-------
USReference
|
uspto_tools/parse/sgml.py
|
sgml_to_reference
|
clicumu/uspto-tools
| 0 |
python
|
def sgml_to_reference(tag):
' Parse reference tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Reference tag.\n\n Returns\n -------\n USReference\n '
p_num = tag.find('dnum').find('pdat').text
p_date = tag.find('date').find('pdat').text
name = '; '.join((t.text.strip() for t in tag.find_all('party-us')))
return USReference(patent_number=p_num, issue_date=p_date, patentee_name=name)
|
def sgml_to_reference(tag):
' Parse reference tag.\n\n Parameters\n ----------\n tag : bs4.element.Tag\n Reference tag.\n\n Returns\n -------\n USReference\n '
p_num = tag.find('dnum').find('pdat').text
p_date = tag.find('date').find('pdat').text
name = '; '.join((t.text.strip() for t in tag.find_all('party-us')))
return USReference(patent_number=p_num, issue_date=p_date, patentee_name=name)<|docstring|>Parse reference tag.
Parameters
----------
tag : bs4.element.Tag
Reference tag.
Returns
-------
USReference<|endoftext|>
|
92974f11f9430659abd34434a9bbb8ab51d71d6fb018cd1b31ae23a900f6ba9d
|
def process(self, element, restriction_tracker=beam.DoFn.RestrictionParam(ImpulseSeqGenRestrictionProvider())):
'\n :param element: (start_timestamp, end_timestamp, interval)\n :param restriction_tracker:\n :return: yields elements at processing real-time intervals with value of\n target output timestamp for the element.\n '
(start, _, interval) = element
if isinstance(start, Timestamp):
start = (start.micros / 1000000)
assert isinstance(restriction_tracker, sdf_utils.RestrictionTrackerView)
current_output_index = restriction_tracker.current_restriction().start
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
while (current_output_timestamp <= current_time):
if restriction_tracker.try_claim(current_output_index):
(yield current_output_timestamp)
current_output_index += 1
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
else:
return
restriction_tracker.defer_remainder(timestamp.Timestamp(current_output_timestamp))
|
:param element: (start_timestamp, end_timestamp, interval)
:param restriction_tracker:
:return: yields elements at processing real-time intervals with value of
target output timestamp for the element.
|
sdks/python/apache_beam/transforms/periodicsequence.py
|
process
|
Snowflake-Labs/beam
| 5,279 |
python
|
def process(self, element, restriction_tracker=beam.DoFn.RestrictionParam(ImpulseSeqGenRestrictionProvider())):
'\n :param element: (start_timestamp, end_timestamp, interval)\n :param restriction_tracker:\n :return: yields elements at processing real-time intervals with value of\n target output timestamp for the element.\n '
(start, _, interval) = element
if isinstance(start, Timestamp):
start = (start.micros / 1000000)
assert isinstance(restriction_tracker, sdf_utils.RestrictionTrackerView)
current_output_index = restriction_tracker.current_restriction().start
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
while (current_output_timestamp <= current_time):
if restriction_tracker.try_claim(current_output_index):
(yield current_output_timestamp)
current_output_index += 1
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
else:
return
restriction_tracker.defer_remainder(timestamp.Timestamp(current_output_timestamp))
|
def process(self, element, restriction_tracker=beam.DoFn.RestrictionParam(ImpulseSeqGenRestrictionProvider())):
'\n :param element: (start_timestamp, end_timestamp, interval)\n :param restriction_tracker:\n :return: yields elements at processing real-time intervals with value of\n target output timestamp for the element.\n '
(start, _, interval) = element
if isinstance(start, Timestamp):
start = (start.micros / 1000000)
assert isinstance(restriction_tracker, sdf_utils.RestrictionTrackerView)
current_output_index = restriction_tracker.current_restriction().start
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
while (current_output_timestamp <= current_time):
if restriction_tracker.try_claim(current_output_index):
(yield current_output_timestamp)
current_output_index += 1
current_output_timestamp = (start + (interval * current_output_index))
current_time = time.time()
else:
return
restriction_tracker.defer_remainder(timestamp.Timestamp(current_output_timestamp))<|docstring|>:param element: (start_timestamp, end_timestamp, interval)
:param restriction_tracker:
:return: yields elements at processing real-time intervals with value of
target output timestamp for the element.<|endoftext|>
|
b6475816d74f4ab9965fc5a49e123b88310e5b71cf44bf0ffd5055878d31c2dd
|
def __init__(self, start_timestamp=Timestamp.now(), stop_timestamp=MAX_TIMESTAMP, fire_interval=360.0, apply_windowing=False):
'\n :param start_timestamp: Timestamp for first element.\n :param stop_timestamp: Timestamp after which no elements will be output.\n :param fire_interval: Interval at which to output elements.\n :param apply_windowing: Whether each element should be assigned to\n individual window. If false, all elements will reside in global window.\n '
self.start_ts = start_timestamp
self.stop_ts = stop_timestamp
self.interval = fire_interval
self.apply_windowing = apply_windowing
|
:param start_timestamp: Timestamp for first element.
:param stop_timestamp: Timestamp after which no elements will be output.
:param fire_interval: Interval at which to output elements.
:param apply_windowing: Whether each element should be assigned to
individual window. If false, all elements will reside in global window.
|
sdks/python/apache_beam/transforms/periodicsequence.py
|
__init__
|
Snowflake-Labs/beam
| 5,279 |
python
|
def __init__(self, start_timestamp=Timestamp.now(), stop_timestamp=MAX_TIMESTAMP, fire_interval=360.0, apply_windowing=False):
'\n :param start_timestamp: Timestamp for first element.\n :param stop_timestamp: Timestamp after which no elements will be output.\n :param fire_interval: Interval at which to output elements.\n :param apply_windowing: Whether each element should be assigned to\n individual window. If false, all elements will reside in global window.\n '
self.start_ts = start_timestamp
self.stop_ts = stop_timestamp
self.interval = fire_interval
self.apply_windowing = apply_windowing
|
def __init__(self, start_timestamp=Timestamp.now(), stop_timestamp=MAX_TIMESTAMP, fire_interval=360.0, apply_windowing=False):
'\n :param start_timestamp: Timestamp for first element.\n :param stop_timestamp: Timestamp after which no elements will be output.\n :param fire_interval: Interval at which to output elements.\n :param apply_windowing: Whether each element should be assigned to\n individual window. If false, all elements will reside in global window.\n '
self.start_ts = start_timestamp
self.stop_ts = stop_timestamp
self.interval = fire_interval
self.apply_windowing = apply_windowing<|docstring|>:param start_timestamp: Timestamp for first element.
:param stop_timestamp: Timestamp after which no elements will be output.
:param fire_interval: Interval at which to output elements.
:param apply_windowing: Whether each element should be assigned to
individual window. If false, all elements will reside in global window.<|endoftext|>
|
284d4f0f91e507998741af450dae4fa864f43eb4164acddf1ddd88f155d29d10
|
def __init__(self, name=None, format='GIF', resolution=None, frame_rate=20, show_legend=True, show_cube=True, background_color=None, local_vars_configuration=None):
'AnimationOutputSettings - a model defined in OpenAPI'
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._format = None
self._resolution = None
self._frame_rate = None
self._show_legend = None
self._show_cube = None
self._background_color = None
self.discriminator = None
self.name = name
self.format = format
self.resolution = resolution
self.frame_rate = frame_rate
self.show_legend = show_legend
self.show_cube = show_cube
if (background_color is not None):
self.background_color = background_color
|
AnimationOutputSettings - a model defined in OpenAPI
|
simscale_sdk/models/animation_output_settings.py
|
__init__
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def __init__(self, name=None, format='GIF', resolution=None, frame_rate=20, show_legend=True, show_cube=True, background_color=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._format = None
self._resolution = None
self._frame_rate = None
self._show_legend = None
self._show_cube = None
self._background_color = None
self.discriminator = None
self.name = name
self.format = format
self.resolution = resolution
self.frame_rate = frame_rate
self.show_legend = show_legend
self.show_cube = show_cube
if (background_color is not None):
self.background_color = background_color
|
def __init__(self, name=None, format='GIF', resolution=None, frame_rate=20, show_legend=True, show_cube=True, background_color=None, local_vars_configuration=None):
if (local_vars_configuration is None):
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._format = None
self._resolution = None
self._frame_rate = None
self._show_legend = None
self._show_cube = None
self._background_color = None
self.discriminator = None
self.name = name
self.format = format
self.resolution = resolution
self.frame_rate = frame_rate
self.show_legend = show_legend
self.show_cube = show_cube
if (background_color is not None):
self.background_color = background_color<|docstring|>AnimationOutputSettings - a model defined in OpenAPI<|endoftext|>
|
d0bfdd80f29781dc598c86ad805d653288e6f2cb2059b2eb1d8f9e820b514cc7
|
@property
def name(self):
'Gets the name of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The name of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._name
|
Gets the name of this AnimationOutputSettings. # noqa: E501
:return: The name of this AnimationOutputSettings. # noqa: E501
:rtype: str
|
simscale_sdk/models/animation_output_settings.py
|
name
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def name(self):
'Gets the name of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The name of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._name
|
@property
def name(self):
'Gets the name of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The name of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._name<|docstring|>Gets the name of this AnimationOutputSettings. # noqa: E501
:return: The name of this AnimationOutputSettings. # noqa: E501
:rtype: str<|endoftext|>
|
1c166171b9c1fb6a9cbab4af91cd466f0826b9b074526c12f91651db4b6ec4fd
|
@name.setter
def name(self, name):
'Sets the name of this AnimationOutputSettings.\n\n\n :param name: The name of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (name is None)):
raise ValueError('Invalid value for `name`, must not be `None`')
self._name = name
|
Sets the name of this AnimationOutputSettings.
:param name: The name of this AnimationOutputSettings. # noqa: E501
:type: str
|
simscale_sdk/models/animation_output_settings.py
|
name
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@name.setter
def name(self, name):
'Sets the name of this AnimationOutputSettings.\n\n\n :param name: The name of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (name is None)):
raise ValueError('Invalid value for `name`, must not be `None`')
self._name = name
|
@name.setter
def name(self, name):
'Sets the name of this AnimationOutputSettings.\n\n\n :param name: The name of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (name is None)):
raise ValueError('Invalid value for `name`, must not be `None`')
self._name = name<|docstring|>Sets the name of this AnimationOutputSettings.
:param name: The name of this AnimationOutputSettings. # noqa: E501
:type: str<|endoftext|>
|
450e21011accc2e2c265a88dba005494ebed1e5f396e94053324877674af3bd9
|
@property
def format(self):
'Gets the format of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The format of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._format
|
Gets the format of this AnimationOutputSettings. # noqa: E501
:return: The format of this AnimationOutputSettings. # noqa: E501
:rtype: str
|
simscale_sdk/models/animation_output_settings.py
|
format
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def format(self):
'Gets the format of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The format of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._format
|
@property
def format(self):
'Gets the format of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The format of this AnimationOutputSettings. # noqa: E501\n :rtype: str\n '
return self._format<|docstring|>Gets the format of this AnimationOutputSettings. # noqa: E501
:return: The format of this AnimationOutputSettings. # noqa: E501
:rtype: str<|endoftext|>
|
cfa5b0cbf4692395a3dc5669db284d37eab7c1b40f1c9d414a3313cbfd48677f
|
@format.setter
def format(self, format):
'Sets the format of this AnimationOutputSettings.\n\n\n :param format: The format of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (format is None)):
raise ValueError('Invalid value for `format`, must not be `None`')
allowed_values = ['GIF', 'MP4']
if (self.local_vars_configuration.client_side_validation and (format not in allowed_values)):
raise ValueError('Invalid value for `format` ({0}), must be one of {1}'.format(format, allowed_values))
self._format = format
|
Sets the format of this AnimationOutputSettings.
:param format: The format of this AnimationOutputSettings. # noqa: E501
:type: str
|
simscale_sdk/models/animation_output_settings.py
|
format
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@format.setter
def format(self, format):
'Sets the format of this AnimationOutputSettings.\n\n\n :param format: The format of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (format is None)):
raise ValueError('Invalid value for `format`, must not be `None`')
allowed_values = ['GIF', 'MP4']
if (self.local_vars_configuration.client_side_validation and (format not in allowed_values)):
raise ValueError('Invalid value for `format` ({0}), must be one of {1}'.format(format, allowed_values))
self._format = format
|
@format.setter
def format(self, format):
'Sets the format of this AnimationOutputSettings.\n\n\n :param format: The format of this AnimationOutputSettings. # noqa: E501\n :type: str\n '
if (self.local_vars_configuration.client_side_validation and (format is None)):
raise ValueError('Invalid value for `format`, must not be `None`')
allowed_values = ['GIF', 'MP4']
if (self.local_vars_configuration.client_side_validation and (format not in allowed_values)):
raise ValueError('Invalid value for `format` ({0}), must be one of {1}'.format(format, allowed_values))
self._format = format<|docstring|>Sets the format of this AnimationOutputSettings.
:param format: The format of this AnimationOutputSettings. # noqa: E501
:type: str<|endoftext|>
|
3e9a6658ebffb6896f1656422919b4c33c2319fbe39054856b3c353b426d5303
|
@property
def resolution(self):
'Gets the resolution of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The resolution of this AnimationOutputSettings. # noqa: E501\n :rtype: ResolutionInfo\n '
return self._resolution
|
Gets the resolution of this AnimationOutputSettings. # noqa: E501
:return: The resolution of this AnimationOutputSettings. # noqa: E501
:rtype: ResolutionInfo
|
simscale_sdk/models/animation_output_settings.py
|
resolution
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def resolution(self):
'Gets the resolution of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The resolution of this AnimationOutputSettings. # noqa: E501\n :rtype: ResolutionInfo\n '
return self._resolution
|
@property
def resolution(self):
'Gets the resolution of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The resolution of this AnimationOutputSettings. # noqa: E501\n :rtype: ResolutionInfo\n '
return self._resolution<|docstring|>Gets the resolution of this AnimationOutputSettings. # noqa: E501
:return: The resolution of this AnimationOutputSettings. # noqa: E501
:rtype: ResolutionInfo<|endoftext|>
|
0251f6998f154922dd937a06c223afab72597c1220c547fd3b657f7ca56d6ba3
|
@resolution.setter
def resolution(self, resolution):
'Sets the resolution of this AnimationOutputSettings.\n\n\n :param resolution: The resolution of this AnimationOutputSettings. # noqa: E501\n :type: ResolutionInfo\n '
if (self.local_vars_configuration.client_side_validation and (resolution is None)):
raise ValueError('Invalid value for `resolution`, must not be `None`')
self._resolution = resolution
|
Sets the resolution of this AnimationOutputSettings.
:param resolution: The resolution of this AnimationOutputSettings. # noqa: E501
:type: ResolutionInfo
|
simscale_sdk/models/animation_output_settings.py
|
resolution
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@resolution.setter
def resolution(self, resolution):
'Sets the resolution of this AnimationOutputSettings.\n\n\n :param resolution: The resolution of this AnimationOutputSettings. # noqa: E501\n :type: ResolutionInfo\n '
if (self.local_vars_configuration.client_side_validation and (resolution is None)):
raise ValueError('Invalid value for `resolution`, must not be `None`')
self._resolution = resolution
|
@resolution.setter
def resolution(self, resolution):
'Sets the resolution of this AnimationOutputSettings.\n\n\n :param resolution: The resolution of this AnimationOutputSettings. # noqa: E501\n :type: ResolutionInfo\n '
if (self.local_vars_configuration.client_side_validation and (resolution is None)):
raise ValueError('Invalid value for `resolution`, must not be `None`')
self._resolution = resolution<|docstring|>Sets the resolution of this AnimationOutputSettings.
:param resolution: The resolution of this AnimationOutputSettings. # noqa: E501
:type: ResolutionInfo<|endoftext|>
|
ad500e15e9a924e2d9307550d6f4f1bc16c89117b91b1ec642ca0daf557f36ec
|
@property
def frame_rate(self):
'Gets the frame_rate of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :rtype: int\n '
return self._frame_rate
|
Gets the frame_rate of this AnimationOutputSettings. # noqa: E501
:return: The frame_rate of this AnimationOutputSettings. # noqa: E501
:rtype: int
|
simscale_sdk/models/animation_output_settings.py
|
frame_rate
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def frame_rate(self):
'Gets the frame_rate of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :rtype: int\n '
return self._frame_rate
|
@property
def frame_rate(self):
'Gets the frame_rate of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :rtype: int\n '
return self._frame_rate<|docstring|>Gets the frame_rate of this AnimationOutputSettings. # noqa: E501
:return: The frame_rate of this AnimationOutputSettings. # noqa: E501
:rtype: int<|endoftext|>
|
b20aeaa589b0e3e575ff32e7f34bbbfcb693c080a5bd72cc0be37dc312f80bd4
|
@frame_rate.setter
def frame_rate(self, frame_rate):
'Sets the frame_rate of this AnimationOutputSettings.\n\n\n :param frame_rate: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :type: int\n '
if (self.local_vars_configuration.client_side_validation and (frame_rate is None)):
raise ValueError('Invalid value for `frame_rate`, must not be `None`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate > 60)):
raise ValueError('Invalid value for `frame_rate`, must be a value less than or equal to `60`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate < 1)):
raise ValueError('Invalid value for `frame_rate`, must be a value greater than or equal to `1`')
self._frame_rate = frame_rate
|
Sets the frame_rate of this AnimationOutputSettings.
:param frame_rate: The frame_rate of this AnimationOutputSettings. # noqa: E501
:type: int
|
simscale_sdk/models/animation_output_settings.py
|
frame_rate
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@frame_rate.setter
def frame_rate(self, frame_rate):
'Sets the frame_rate of this AnimationOutputSettings.\n\n\n :param frame_rate: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :type: int\n '
if (self.local_vars_configuration.client_side_validation and (frame_rate is None)):
raise ValueError('Invalid value for `frame_rate`, must not be `None`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate > 60)):
raise ValueError('Invalid value for `frame_rate`, must be a value less than or equal to `60`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate < 1)):
raise ValueError('Invalid value for `frame_rate`, must be a value greater than or equal to `1`')
self._frame_rate = frame_rate
|
@frame_rate.setter
def frame_rate(self, frame_rate):
'Sets the frame_rate of this AnimationOutputSettings.\n\n\n :param frame_rate: The frame_rate of this AnimationOutputSettings. # noqa: E501\n :type: int\n '
if (self.local_vars_configuration.client_side_validation and (frame_rate is None)):
raise ValueError('Invalid value for `frame_rate`, must not be `None`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate > 60)):
raise ValueError('Invalid value for `frame_rate`, must be a value less than or equal to `60`')
if (self.local_vars_configuration.client_side_validation and (frame_rate is not None) and (frame_rate < 1)):
raise ValueError('Invalid value for `frame_rate`, must be a value greater than or equal to `1`')
self._frame_rate = frame_rate<|docstring|>Sets the frame_rate of this AnimationOutputSettings.
:param frame_rate: The frame_rate of this AnimationOutputSettings. # noqa: E501
:type: int<|endoftext|>
|
8c9646d5870b2095e9457aa3413d880e320448535c1cbd98c38d618e41faaa70
|
@property
def show_legend(self):
'Gets the show_legend of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_legend of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_legend
|
Gets the show_legend of this AnimationOutputSettings. # noqa: E501
:return: The show_legend of this AnimationOutputSettings. # noqa: E501
:rtype: bool
|
simscale_sdk/models/animation_output_settings.py
|
show_legend
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def show_legend(self):
'Gets the show_legend of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_legend of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_legend
|
@property
def show_legend(self):
'Gets the show_legend of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_legend of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_legend<|docstring|>Gets the show_legend of this AnimationOutputSettings. # noqa: E501
:return: The show_legend of this AnimationOutputSettings. # noqa: E501
:rtype: bool<|endoftext|>
|
f602a6f827d1d219e1cb1aa81e0277b0bb0f423301ef2be6ba595237896c800b
|
@show_legend.setter
def show_legend(self, show_legend):
'Sets the show_legend of this AnimationOutputSettings.\n\n\n :param show_legend: The show_legend of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_legend is None)):
raise ValueError('Invalid value for `show_legend`, must not be `None`')
self._show_legend = show_legend
|
Sets the show_legend of this AnimationOutputSettings.
:param show_legend: The show_legend of this AnimationOutputSettings. # noqa: E501
:type: bool
|
simscale_sdk/models/animation_output_settings.py
|
show_legend
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@show_legend.setter
def show_legend(self, show_legend):
'Sets the show_legend of this AnimationOutputSettings.\n\n\n :param show_legend: The show_legend of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_legend is None)):
raise ValueError('Invalid value for `show_legend`, must not be `None`')
self._show_legend = show_legend
|
@show_legend.setter
def show_legend(self, show_legend):
'Sets the show_legend of this AnimationOutputSettings.\n\n\n :param show_legend: The show_legend of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_legend is None)):
raise ValueError('Invalid value for `show_legend`, must not be `None`')
self._show_legend = show_legend<|docstring|>Sets the show_legend of this AnimationOutputSettings.
:param show_legend: The show_legend of this AnimationOutputSettings. # noqa: E501
:type: bool<|endoftext|>
|
8a41717bd755ff98eefbdd8c98ff459e0695ff862092e0b1fa42eabc89eb262f
|
@property
def show_cube(self):
'Gets the show_cube of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_cube of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_cube
|
Gets the show_cube of this AnimationOutputSettings. # noqa: E501
:return: The show_cube of this AnimationOutputSettings. # noqa: E501
:rtype: bool
|
simscale_sdk/models/animation_output_settings.py
|
show_cube
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def show_cube(self):
'Gets the show_cube of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_cube of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_cube
|
@property
def show_cube(self):
'Gets the show_cube of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The show_cube of this AnimationOutputSettings. # noqa: E501\n :rtype: bool\n '
return self._show_cube<|docstring|>Gets the show_cube of this AnimationOutputSettings. # noqa: E501
:return: The show_cube of this AnimationOutputSettings. # noqa: E501
:rtype: bool<|endoftext|>
|
a490c85b1ee17dde66fc8942ccfdbad8501945800e4d10f4b3f512ef20b277c7
|
@show_cube.setter
def show_cube(self, show_cube):
'Sets the show_cube of this AnimationOutputSettings.\n\n\n :param show_cube: The show_cube of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_cube is None)):
raise ValueError('Invalid value for `show_cube`, must not be `None`')
self._show_cube = show_cube
|
Sets the show_cube of this AnimationOutputSettings.
:param show_cube: The show_cube of this AnimationOutputSettings. # noqa: E501
:type: bool
|
simscale_sdk/models/animation_output_settings.py
|
show_cube
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@show_cube.setter
def show_cube(self, show_cube):
'Sets the show_cube of this AnimationOutputSettings.\n\n\n :param show_cube: The show_cube of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_cube is None)):
raise ValueError('Invalid value for `show_cube`, must not be `None`')
self._show_cube = show_cube
|
@show_cube.setter
def show_cube(self, show_cube):
'Sets the show_cube of this AnimationOutputSettings.\n\n\n :param show_cube: The show_cube of this AnimationOutputSettings. # noqa: E501\n :type: bool\n '
if (self.local_vars_configuration.client_side_validation and (show_cube is None)):
raise ValueError('Invalid value for `show_cube`, must not be `None`')
self._show_cube = show_cube<|docstring|>Sets the show_cube of this AnimationOutputSettings.
:param show_cube: The show_cube of this AnimationOutputSettings. # noqa: E501
:type: bool<|endoftext|>
|
40467dfd80988c70f09e43f31185b1c664be3dbcf142393c3bc836317775e305
|
@property
def background_color(self):
'Gets the background_color of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The background_color of this AnimationOutputSettings. # noqa: E501\n :rtype: Color\n '
return self._background_color
|
Gets the background_color of this AnimationOutputSettings. # noqa: E501
:return: The background_color of this AnimationOutputSettings. # noqa: E501
:rtype: Color
|
simscale_sdk/models/animation_output_settings.py
|
background_color
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@property
def background_color(self):
'Gets the background_color of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The background_color of this AnimationOutputSettings. # noqa: E501\n :rtype: Color\n '
return self._background_color
|
@property
def background_color(self):
'Gets the background_color of this AnimationOutputSettings. # noqa: E501\n\n\n :return: The background_color of this AnimationOutputSettings. # noqa: E501\n :rtype: Color\n '
return self._background_color<|docstring|>Gets the background_color of this AnimationOutputSettings. # noqa: E501
:return: The background_color of this AnimationOutputSettings. # noqa: E501
:rtype: Color<|endoftext|>
|
64d9e99eb1250e34a5f593a85fd5dc9b79b0e0b444c7b6456e4102d4ac2c8112
|
@background_color.setter
def background_color(self, background_color):
'Sets the background_color of this AnimationOutputSettings.\n\n\n :param background_color: The background_color of this AnimationOutputSettings. # noqa: E501\n :type: Color\n '
self._background_color = background_color
|
Sets the background_color of this AnimationOutputSettings.
:param background_color: The background_color of this AnimationOutputSettings. # noqa: E501
:type: Color
|
simscale_sdk/models/animation_output_settings.py
|
background_color
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
@background_color.setter
def background_color(self, background_color):
'Sets the background_color of this AnimationOutputSettings.\n\n\n :param background_color: The background_color of this AnimationOutputSettings. # noqa: E501\n :type: Color\n '
self._background_color = background_color
|
@background_color.setter
def background_color(self, background_color):
'Sets the background_color of this AnimationOutputSettings.\n\n\n :param background_color: The background_color of this AnimationOutputSettings. # noqa: E501\n :type: Color\n '
self._background_color = background_color<|docstring|>Sets the background_color of this AnimationOutputSettings.
:param background_color: The background_color of this AnimationOutputSettings. # noqa: E501
:type: Color<|endoftext|>
|
5a4e41bb6a0def746593298cb605df98f1366e957c4ca89b12010ea7db707963
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
simscale_sdk/models/animation_output_settings.py
|
to_dict
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
simscale_sdk/models/animation_output_settings.py
|
to_str
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
simscale_sdk/models/animation_output_settings.py
|
__repr__
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
9d88de8e4b93693624fec12c7a927bed2d1875c8c856cb529ba4c03a87f295a4
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, AnimationOutputSettings)):
return False
return (self.to_dict() == other.to_dict())
|
Returns true if both objects are equal
|
simscale_sdk/models/animation_output_settings.py
|
__eq__
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def __eq__(self, other):
if (not isinstance(other, AnimationOutputSettings)):
return False
return (self.to_dict() == other.to_dict())
|
def __eq__(self, other):
if (not isinstance(other, AnimationOutputSettings)):
return False
return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
|
93625ef6cbd1b0a64739a96b1ec915d8bc4d951a3686efad78fbe1033bb7257a
|
def __ne__(self, other):
'Returns true if both objects are not equal'
if (not isinstance(other, AnimationOutputSettings)):
return True
return (self.to_dict() != other.to_dict())
|
Returns true if both objects are not equal
|
simscale_sdk/models/animation_output_settings.py
|
__ne__
|
SimScaleGmbH/simscale-python-sdk
| 8 |
python
|
def __ne__(self, other):
if (not isinstance(other, AnimationOutputSettings)):
return True
return (self.to_dict() != other.to_dict())
|
def __ne__(self, other):
if (not isinstance(other, AnimationOutputSettings)):
return True
return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
13fcfedc2d9aa181f8aaebcb8d73a5be0efb4fc0733913ee2c908c87befd2796
|
def sam_simulation(meteo_data, verbose=False, **kwargs):
' SAM performance simulation\n\n Perform a PVWATTS simulation using the SAM python implementation.\n Args:\n meteo_data (pd.DataFrame): Solar radiation dataframe\n kwargs (dictionary): Dictionary containing simulation parameters\n\n Returns:\n CP (float): Capacity factor\n Generation (float): Generation over the year of simulation\n meto_data (pd.DataFrame): Dataframe with hourly generation\n '
params = {'lat': kwargs['lat'], 'lon': kwargs['lng'], 'timezone': kwargs['timezone'], 'elevation': kwargs['elevation'], 'sys_capacity': kwargs['system_capacity'], 'dc_ac_ratio': kwargs['dc_ac_ratio'], 'inv_eff': kwargs['inv_eff'], 'losses': kwargs['losses'], 'configuration': kwargs['configuration'], 'tilt': kwargs['tilt']}
if verbose:
print({key: type(value) for (key, value) in params.items()})
ssc = sscapi.PySSC()
wfd = ssc.data_create()
ssc.data_set_number(wfd, 'lat', params['lat'])
ssc.data_set_number(wfd, 'lon', params['lon'])
ssc.data_set_number(wfd, 'tz', params['timezone'])
ssc.data_set_number(wfd, 'elev', params['elevation'])
ssc.data_set_array(wfd, 'year', meteo_data.index.year)
ssc.data_set_array(wfd, 'month', meteo_data.index.month)
ssc.data_set_array(wfd, 'day', meteo_data.index.day)
ssc.data_set_array(wfd, 'hour', meteo_data.index.hour)
ssc.data_set_array(wfd, 'minute', meteo_data.index.minute)
ssc.data_set_array(wfd, 'dn', meteo_data['DNI'])
ssc.data_set_array(wfd, 'df', meteo_data['DHI'])
ssc.data_set_array(wfd, 'wspd', meteo_data['Wind Speed'])
ssc.data_set_array(wfd, 'tdry', meteo_data['Temperature'])
dat = ssc.data_create()
ssc.data_set_table(dat, 'solar_resource_data', wfd)
ssc.data_free(wfd)
ssc.data_set_number(dat, 'system_capacity', params['sys_capacity'])
ssc.data_set_number(dat, 'dc_ac_ratio', params['dc_ac_ratio'])
ssc.data_set_number(dat, 'tilt', params['tilt'])
ssc.data_set_number(dat, 'azimuth', 180)
ssc.data_set_number(dat, 'inv_eff', params['inv_eff'])
ssc.data_set_number(dat, 'losses', params['losses'])
ssc.data_set_number(dat, 'gcr', 0.4)
ssc.data_set_number(dat, 'adjust:constant', 0)
system_capacity = params['sys_capacity']
value = params['configuration']
if isinstance(params['configuration'], dict):
d = {}
for (key, val) in value.iteritems():
ssc.data_set_number(dat, 'array_type', val)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
d[key] = CP
d[('gen_' + key)] = generation
ssc.data_free(dat)
ssc.module_free(mod)
return d
else:
ssc.data_set_number(dat, 'array_type', value)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
meteo_data['cf'] = (meteo_data['generation'] / system_capacity)
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
click.secho('Output data from SAM.', fg='green')
click.secho(f'Capacity Factor: {CP}', fg='blue')
click.secho(f'Annual Generation: {generation}', fg='blue')
ssc.data_free(dat)
ssc.module_free(mod)
meteo_data.to_csv((output_path / 'test.csv'))
return (meteo_data, (CP, generation))
return True
|
SAM performance simulation
Perform a PVWATTS simulation using the SAM python implementation.
Args:
meteo_data (pd.DataFrame): Solar radiation dataframe
kwargs (dictionary): Dictionary containing simulation parameters
Returns:
CP (float): Capacity factor
Generation (float): Generation over the year of simulation
meto_data (pd.DataFrame): Dataframe with hourly generation
|
swc/sam_simulation_cli.py
|
sam_simulation
|
pesap/swc
| 1 |
python
|
def sam_simulation(meteo_data, verbose=False, **kwargs):
' SAM performance simulation\n\n Perform a PVWATTS simulation using the SAM python implementation.\n Args:\n meteo_data (pd.DataFrame): Solar radiation dataframe\n kwargs (dictionary): Dictionary containing simulation parameters\n\n Returns:\n CP (float): Capacity factor\n Generation (float): Generation over the year of simulation\n meto_data (pd.DataFrame): Dataframe with hourly generation\n '
params = {'lat': kwargs['lat'], 'lon': kwargs['lng'], 'timezone': kwargs['timezone'], 'elevation': kwargs['elevation'], 'sys_capacity': kwargs['system_capacity'], 'dc_ac_ratio': kwargs['dc_ac_ratio'], 'inv_eff': kwargs['inv_eff'], 'losses': kwargs['losses'], 'configuration': kwargs['configuration'], 'tilt': kwargs['tilt']}
if verbose:
print({key: type(value) for (key, value) in params.items()})
ssc = sscapi.PySSC()
wfd = ssc.data_create()
ssc.data_set_number(wfd, 'lat', params['lat'])
ssc.data_set_number(wfd, 'lon', params['lon'])
ssc.data_set_number(wfd, 'tz', params['timezone'])
ssc.data_set_number(wfd, 'elev', params['elevation'])
ssc.data_set_array(wfd, 'year', meteo_data.index.year)
ssc.data_set_array(wfd, 'month', meteo_data.index.month)
ssc.data_set_array(wfd, 'day', meteo_data.index.day)
ssc.data_set_array(wfd, 'hour', meteo_data.index.hour)
ssc.data_set_array(wfd, 'minute', meteo_data.index.minute)
ssc.data_set_array(wfd, 'dn', meteo_data['DNI'])
ssc.data_set_array(wfd, 'df', meteo_data['DHI'])
ssc.data_set_array(wfd, 'wspd', meteo_data['Wind Speed'])
ssc.data_set_array(wfd, 'tdry', meteo_data['Temperature'])
dat = ssc.data_create()
ssc.data_set_table(dat, 'solar_resource_data', wfd)
ssc.data_free(wfd)
ssc.data_set_number(dat, 'system_capacity', params['sys_capacity'])
ssc.data_set_number(dat, 'dc_ac_ratio', params['dc_ac_ratio'])
ssc.data_set_number(dat, 'tilt', params['tilt'])
ssc.data_set_number(dat, 'azimuth', 180)
ssc.data_set_number(dat, 'inv_eff', params['inv_eff'])
ssc.data_set_number(dat, 'losses', params['losses'])
ssc.data_set_number(dat, 'gcr', 0.4)
ssc.data_set_number(dat, 'adjust:constant', 0)
system_capacity = params['sys_capacity']
value = params['configuration']
if isinstance(params['configuration'], dict):
d = {}
for (key, val) in value.iteritems():
ssc.data_set_number(dat, 'array_type', val)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
d[key] = CP
d[('gen_' + key)] = generation
ssc.data_free(dat)
ssc.module_free(mod)
return d
else:
ssc.data_set_number(dat, 'array_type', value)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
meteo_data['cf'] = (meteo_data['generation'] / system_capacity)
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
click.secho('Output data from SAM.', fg='green')
click.secho(f'Capacity Factor: {CP}', fg='blue')
click.secho(f'Annual Generation: {generation}', fg='blue')
ssc.data_free(dat)
ssc.module_free(mod)
meteo_data.to_csv((output_path / 'test.csv'))
return (meteo_data, (CP, generation))
return True
|
def sam_simulation(meteo_data, verbose=False, **kwargs):
' SAM performance simulation\n\n Perform a PVWATTS simulation using the SAM python implementation.\n Args:\n meteo_data (pd.DataFrame): Solar radiation dataframe\n kwargs (dictionary): Dictionary containing simulation parameters\n\n Returns:\n CP (float): Capacity factor\n Generation (float): Generation over the year of simulation\n meto_data (pd.DataFrame): Dataframe with hourly generation\n '
params = {'lat': kwargs['lat'], 'lon': kwargs['lng'], 'timezone': kwargs['timezone'], 'elevation': kwargs['elevation'], 'sys_capacity': kwargs['system_capacity'], 'dc_ac_ratio': kwargs['dc_ac_ratio'], 'inv_eff': kwargs['inv_eff'], 'losses': kwargs['losses'], 'configuration': kwargs['configuration'], 'tilt': kwargs['tilt']}
if verbose:
print({key: type(value) for (key, value) in params.items()})
ssc = sscapi.PySSC()
wfd = ssc.data_create()
ssc.data_set_number(wfd, 'lat', params['lat'])
ssc.data_set_number(wfd, 'lon', params['lon'])
ssc.data_set_number(wfd, 'tz', params['timezone'])
ssc.data_set_number(wfd, 'elev', params['elevation'])
ssc.data_set_array(wfd, 'year', meteo_data.index.year)
ssc.data_set_array(wfd, 'month', meteo_data.index.month)
ssc.data_set_array(wfd, 'day', meteo_data.index.day)
ssc.data_set_array(wfd, 'hour', meteo_data.index.hour)
ssc.data_set_array(wfd, 'minute', meteo_data.index.minute)
ssc.data_set_array(wfd, 'dn', meteo_data['DNI'])
ssc.data_set_array(wfd, 'df', meteo_data['DHI'])
ssc.data_set_array(wfd, 'wspd', meteo_data['Wind Speed'])
ssc.data_set_array(wfd, 'tdry', meteo_data['Temperature'])
dat = ssc.data_create()
ssc.data_set_table(dat, 'solar_resource_data', wfd)
ssc.data_free(wfd)
ssc.data_set_number(dat, 'system_capacity', params['sys_capacity'])
ssc.data_set_number(dat, 'dc_ac_ratio', params['dc_ac_ratio'])
ssc.data_set_number(dat, 'tilt', params['tilt'])
ssc.data_set_number(dat, 'azimuth', 180)
ssc.data_set_number(dat, 'inv_eff', params['inv_eff'])
ssc.data_set_number(dat, 'losses', params['losses'])
ssc.data_set_number(dat, 'gcr', 0.4)
ssc.data_set_number(dat, 'adjust:constant', 0)
system_capacity = params['sys_capacity']
value = params['configuration']
if isinstance(params['configuration'], dict):
d = {}
for (key, val) in value.iteritems():
ssc.data_set_number(dat, 'array_type', val)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
d[key] = CP
d[('gen_' + key)] = generation
ssc.data_free(dat)
ssc.module_free(mod)
return d
else:
ssc.data_set_number(dat, 'array_type', value)
mod = ssc.module_create('pvwattsv5')
ssc.module_exec(mod, dat)
meteo_data['generation'] = np.array(ssc.data_get_array(dat, 'gen'))
meteo_data['cf'] = (meteo_data['generation'] / system_capacity)
CP = (meteo_data['generation'].sum() / ((525600 / int('60')) * system_capacity))
generation = meteo_data['generation'].sum()
click.secho('Output data from SAM.', fg='green')
click.secho(f'Capacity Factor: {CP}', fg='blue')
click.secho(f'Annual Generation: {generation}', fg='blue')
ssc.data_free(dat)
ssc.module_free(mod)
meteo_data.to_csv((output_path / 'test.csv'))
return (meteo_data, (CP, generation))
return True<|docstring|>SAM performance simulation
Perform a PVWATTS simulation using the SAM python implementation.
Args:
meteo_data (pd.DataFrame): Solar radiation dataframe
kwargs (dictionary): Dictionary containing simulation parameters
Returns:
CP (float): Capacity factor
Generation (float): Generation over the year of simulation
meto_data (pd.DataFrame): Dataframe with hourly generation<|endoftext|>
|
a2246424dffa900fab5dc3fca1512b765b7d77d0734288dfcc97daa6acdafb05
|
def __init__(self, x, weights=None, preshuffle=True):
'Initialize tracked random sampling object\n\n Parameters\n ----------\n x : Collection as a valid input to `list()`.\n Represents elements, from which samples should be drawn.\n weights : As `weights` argument in `random.choices()`, optional\n Weights of pool indices. `None` is treated as equal weights of\n necessary length.\n preshuffle : bool, optional\n Whether to shuffle a pool once during creation. Useful if input is\n in no particular order and there is a need to start random sampling\n right away (the most common situation).\n '
self._pool = list(x)
if preshuffle:
random.shuffle(self._pool)
self._inds = list(range(len(x)))
self._weights = weights
|
Initialize tracked random sampling object
Parameters
----------
x : Collection as a valid input to `list()`.
Represents elements, from which samples should be drawn.
weights : As `weights` argument in `random.choices()`, optional
Weights of pool indices. `None` is treated as equal weights of
necessary length.
preshuffle : bool, optional
Whether to shuffle a pool once during creation. Useful if input is
in no particular order and there is a need to start random sampling
right away (the most common situation).
|
tracked-sample/tracked_sample.py
|
__init__
|
echasnovski/curiosity-projects
| 0 |
python
|
def __init__(self, x, weights=None, preshuffle=True):
'Initialize tracked random sampling object\n\n Parameters\n ----------\n x : Collection as a valid input to `list()`.\n Represents elements, from which samples should be drawn.\n weights : As `weights` argument in `random.choices()`, optional\n Weights of pool indices. `None` is treated as equal weights of\n necessary length.\n preshuffle : bool, optional\n Whether to shuffle a pool once during creation. Useful if input is\n in no particular order and there is a need to start random sampling\n right away (the most common situation).\n '
self._pool = list(x)
if preshuffle:
random.shuffle(self._pool)
self._inds = list(range(len(x)))
self._weights = weights
|
def __init__(self, x, weights=None, preshuffle=True):
'Initialize tracked random sampling object\n\n Parameters\n ----------\n x : Collection as a valid input to `list()`.\n Represents elements, from which samples should be drawn.\n weights : As `weights` argument in `random.choices()`, optional\n Weights of pool indices. `None` is treated as equal weights of\n necessary length.\n preshuffle : bool, optional\n Whether to shuffle a pool once during creation. Useful if input is\n in no particular order and there is a need to start random sampling\n right away (the most common situation).\n '
self._pool = list(x)
if preshuffle:
random.shuffle(self._pool)
self._inds = list(range(len(x)))
self._weights = weights<|docstring|>Initialize tracked random sampling object
Parameters
----------
x : Collection as a valid input to `list()`.
Represents elements, from which samples should be drawn.
weights : As `weights` argument in `random.choices()`, optional
Weights of pool indices. `None` is treated as equal weights of
necessary length.
preshuffle : bool, optional
Whether to shuffle a pool once during creation. Useful if input is
in no particular order and there is a need to start random sampling
right away (the most common situation).<|endoftext|>
|
d9d4d1e71ddd01ea74f8d583d641ff6712b86c52d4fa3c343a9505c4830301db
|
def _pop(self, index):
'Return item at index and place it at the beginning'
res = self._pool.pop(index)
self._pool.insert(0, res)
return res
|
Return item at index and place it at the beginning
|
tracked-sample/tracked_sample.py
|
_pop
|
echasnovski/curiosity-projects
| 0 |
python
|
def _pop(self, index):
res = self._pool.pop(index)
self._pool.insert(0, res)
return res
|
def _pop(self, index):
res = self._pool.pop(index)
self._pool.insert(0, res)
return res<|docstring|>Return item at index and place it at the beginning<|endoftext|>
|
02cf9073f6aae87b142bb97e775874a2030bb67a398e68e1874485b9ede808db
|
def draw(self, size=1):
'Draw sample\n\n Draw sample based on the following algorithm:\n - Randomly sample with supplied `weights` an index of the current pool,\n at which element will be drawn.\n - "Pop" this element from the pool: extract it, put at the beginning of\n the pool, and return it.\n - Repeat.\n\n Parameters\n ----------\n size : int, optional\n Size of sample to draw, by default 1\n\n Returns\n -------\n draw : list\n List of drawn elements\n '
inds = random.choices(self._inds, weights=self._weights, k=size)
return [self._pop(i) for i in inds]
|
Draw sample
Draw sample based on the following algorithm:
- Randomly sample with supplied `weights` an index of the current pool,
at which element will be drawn.
- "Pop" this element from the pool: extract it, put at the beginning of
the pool, and return it.
- Repeat.
Parameters
----------
size : int, optional
Size of sample to draw, by default 1
Returns
-------
draw : list
List of drawn elements
|
tracked-sample/tracked_sample.py
|
draw
|
echasnovski/curiosity-projects
| 0 |
python
|
def draw(self, size=1):
'Draw sample\n\n Draw sample based on the following algorithm:\n - Randomly sample with supplied `weights` an index of the current pool,\n at which element will be drawn.\n - "Pop" this element from the pool: extract it, put at the beginning of\n the pool, and return it.\n - Repeat.\n\n Parameters\n ----------\n size : int, optional\n Size of sample to draw, by default 1\n\n Returns\n -------\n draw : list\n List of drawn elements\n '
inds = random.choices(self._inds, weights=self._weights, k=size)
return [self._pop(i) for i in inds]
|
def draw(self, size=1):
'Draw sample\n\n Draw sample based on the following algorithm:\n - Randomly sample with supplied `weights` an index of the current pool,\n at which element will be drawn.\n - "Pop" this element from the pool: extract it, put at the beginning of\n the pool, and return it.\n - Repeat.\n\n Parameters\n ----------\n size : int, optional\n Size of sample to draw, by default 1\n\n Returns\n -------\n draw : list\n List of drawn elements\n '
inds = random.choices(self._inds, weights=self._weights, k=size)
return [self._pop(i) for i in inds]<|docstring|>Draw sample
Draw sample based on the following algorithm:
- Randomly sample with supplied `weights` an index of the current pool,
at which element will be drawn.
- "Pop" this element from the pool: extract it, put at the beginning of
the pool, and return it.
- Repeat.
Parameters
----------
size : int, optional
Size of sample to draw, by default 1
Returns
-------
draw : list
List of drawn elements<|endoftext|>
|
fd6fe24f09cdab317d6d6e46c3146be69d1a102b7b39f9d688ceba307ab985a6
|
def __len__(self):
'This method is called when you do len(instance)\n for an instance of this class.\n '
return len(self.samples_frame)
|
This method is called when you do len(instance)
for an instance of this class.
|
src/datamodules/datasets/hateful_memes_dataset.py
|
__len__
|
nav13n/multimodal-learning
| 1 |
python
|
def __len__(self):
'This method is called when you do len(instance)\n for an instance of this class.\n '
return len(self.samples_frame)
|
def __len__(self):
'This method is called when you do len(instance)\n for an instance of this class.\n '
return len(self.samples_frame)<|docstring|>This method is called when you do len(instance)
for an instance of this class.<|endoftext|>
|
b5125d009972dd1c306617266f1e03610b711528c95b925080beaaaf93188fdc
|
def __getitem__(self, idx):
'This method is called when you do instance[key]\n for an instance of this class.\n '
if torch.is_tensor(idx):
idx = idx.tolist()
img_id = self.samples_frame.loc[(idx, 'id')]
image = Image.open(self.samples_frame.loc[(idx, 'img')]).convert('RGB')
image = self.image_transform(image)
text = self.transform_text(self.samples_frame.loc[(idx, 'text')])
if ('label' in self.samples_frame.columns):
label = torch.Tensor([self.samples_frame.loc[(idx, 'label')]]).long().squeeze()
sample = {'id': img_id, 'image': image, 'text': text, 'label': label}
else:
sample = {'id': img_id, 'image': image, 'text': text}
return sample
|
This method is called when you do instance[key]
for an instance of this class.
|
src/datamodules/datasets/hateful_memes_dataset.py
|
__getitem__
|
nav13n/multimodal-learning
| 1 |
python
|
def __getitem__(self, idx):
'This method is called when you do instance[key]\n for an instance of this class.\n '
if torch.is_tensor(idx):
idx = idx.tolist()
img_id = self.samples_frame.loc[(idx, 'id')]
image = Image.open(self.samples_frame.loc[(idx, 'img')]).convert('RGB')
image = self.image_transform(image)
text = self.transform_text(self.samples_frame.loc[(idx, 'text')])
if ('label' in self.samples_frame.columns):
label = torch.Tensor([self.samples_frame.loc[(idx, 'label')]]).long().squeeze()
sample = {'id': img_id, 'image': image, 'text': text, 'label': label}
else:
sample = {'id': img_id, 'image': image, 'text': text}
return sample
|
def __getitem__(self, idx):
'This method is called when you do instance[key]\n for an instance of this class.\n '
if torch.is_tensor(idx):
idx = idx.tolist()
img_id = self.samples_frame.loc[(idx, 'id')]
image = Image.open(self.samples_frame.loc[(idx, 'img')]).convert('RGB')
image = self.image_transform(image)
text = self.transform_text(self.samples_frame.loc[(idx, 'text')])
if ('label' in self.samples_frame.columns):
label = torch.Tensor([self.samples_frame.loc[(idx, 'label')]]).long().squeeze()
sample = {'id': img_id, 'image': image, 'text': text, 'label': label}
else:
sample = {'id': img_id, 'image': image, 'text': text}
return sample<|docstring|>This method is called when you do instance[key]
for an instance of this class.<|endoftext|>
|
ad06a2a5db4b8a925a41efd721b968fbcbff81e0e8e02e612d0f63ed62e8b497
|
def get_all_virtual_machines(self):
'\n Function to get all virtual machines and related configurations information\n '
virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ''
summary = vm.summary
if (summary.guest is not None):
_ip_address = summary.guest.ipAddress
if (_ip_address is None):
_ip_address = ''
_mac_address = []
all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
if all_devices:
for dev in all_devices:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
_mac_address.append(dev.macAddress)
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = dict()
net_dict[device.macAddress]['ipv4'] = []
net_dict[device.macAddress]['ipv6'] = []
for ip_addr in device.ipAddress:
if ('::' in ip_addr):
net_dict[device.macAddress]['ipv6'].append(ip_addr)
else:
net_dict[device.macAddress]['ipv4'].append(ip_addr)
esxi_hostname = None
esxi_parent = None
if summary.runtime.host:
esxi_hostname = summary.runtime.host.summary.config.name
esxi_parent = summary.runtime.host.parent
cluster_name = None
if (esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource)):
cluster_name = summary.runtime.host.parent.name
virtual_machine = {summary.config.name: {'guest_fullname': summary.config.guestFullName, 'power_state': summary.runtime.powerState, 'ip_address': _ip_address, 'mac_address': _mac_address, 'uuid': summary.config.uuid, 'vm_network': net_dict, 'esxi_hostname': esxi_hostname, 'cluster': cluster_name}}
vm_type = self.module.params.get('vm_type')
is_template = _get_vm_prop(vm, ('config', 'template'))
if ((vm_type == 'vm') and (not is_template)):
_virtual_machines.update(virtual_machine)
elif ((vm_type == 'template') and is_template):
_virtual_machines.update(virtual_machine)
elif (vm_type == 'all'):
_virtual_machines.update(virtual_machine)
return _virtual_machines
|
Function to get all virtual machines and related configurations information
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vm_facts.py
|
get_all_virtual_machines
|
otus-devops-2019-02/yyashkin_infra
| 1 |
python
|
def get_all_virtual_machines(self):
'\n \n '
virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address =
summary = vm.summary
if (summary.guest is not None):
_ip_address = summary.guest.ipAddress
if (_ip_address is None):
_ip_address =
_mac_address = []
all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
if all_devices:
for dev in all_devices:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
_mac_address.append(dev.macAddress)
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = dict()
net_dict[device.macAddress]['ipv4'] = []
net_dict[device.macAddress]['ipv6'] = []
for ip_addr in device.ipAddress:
if ('::' in ip_addr):
net_dict[device.macAddress]['ipv6'].append(ip_addr)
else:
net_dict[device.macAddress]['ipv4'].append(ip_addr)
esxi_hostname = None
esxi_parent = None
if summary.runtime.host:
esxi_hostname = summary.runtime.host.summary.config.name
esxi_parent = summary.runtime.host.parent
cluster_name = None
if (esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource)):
cluster_name = summary.runtime.host.parent.name
virtual_machine = {summary.config.name: {'guest_fullname': summary.config.guestFullName, 'power_state': summary.runtime.powerState, 'ip_address': _ip_address, 'mac_address': _mac_address, 'uuid': summary.config.uuid, 'vm_network': net_dict, 'esxi_hostname': esxi_hostname, 'cluster': cluster_name}}
vm_type = self.module.params.get('vm_type')
is_template = _get_vm_prop(vm, ('config', 'template'))
if ((vm_type == 'vm') and (not is_template)):
_virtual_machines.update(virtual_machine)
elif ((vm_type == 'template') and is_template):
_virtual_machines.update(virtual_machine)
elif (vm_type == 'all'):
_virtual_machines.update(virtual_machine)
return _virtual_machines
|
def get_all_virtual_machines(self):
'\n \n '
virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address =
summary = vm.summary
if (summary.guest is not None):
_ip_address = summary.guest.ipAddress
if (_ip_address is None):
_ip_address =
_mac_address = []
all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
if all_devices:
for dev in all_devices:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
_mac_address.append(dev.macAddress)
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = dict()
net_dict[device.macAddress]['ipv4'] = []
net_dict[device.macAddress]['ipv6'] = []
for ip_addr in device.ipAddress:
if ('::' in ip_addr):
net_dict[device.macAddress]['ipv6'].append(ip_addr)
else:
net_dict[device.macAddress]['ipv4'].append(ip_addr)
esxi_hostname = None
esxi_parent = None
if summary.runtime.host:
esxi_hostname = summary.runtime.host.summary.config.name
esxi_parent = summary.runtime.host.parent
cluster_name = None
if (esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource)):
cluster_name = summary.runtime.host.parent.name
virtual_machine = {summary.config.name: {'guest_fullname': summary.config.guestFullName, 'power_state': summary.runtime.powerState, 'ip_address': _ip_address, 'mac_address': _mac_address, 'uuid': summary.config.uuid, 'vm_network': net_dict, 'esxi_hostname': esxi_hostname, 'cluster': cluster_name}}
vm_type = self.module.params.get('vm_type')
is_template = _get_vm_prop(vm, ('config', 'template'))
if ((vm_type == 'vm') and (not is_template)):
_virtual_machines.update(virtual_machine)
elif ((vm_type == 'template') and is_template):
_virtual_machines.update(virtual_machine)
elif (vm_type == 'all'):
_virtual_machines.update(virtual_machine)
return _virtual_machines<|docstring|>Function to get all virtual machines and related configurations information<|endoftext|>
|
d117dba1626348355fa0ea7455cecd48140fce774cc5399b0e46102786489bf4
|
def question(bot, msg):
'Provide information about a Stack Exchange question.'
(domain, question_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
question = _question_info(site, question_id)
msg.respond(_format_question(question, site), ping=False)
|
Provide information about a Stack Exchange question.
|
ircbot/plugin/stack_exchange.py
|
question
|
singingtelegram/ircbot
| 10 |
python
|
def question(bot, msg):
(domain, question_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
question = _question_info(site, question_id)
msg.respond(_format_question(question, site), ping=False)
|
def question(bot, msg):
(domain, question_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
question = _question_info(site, question_id)
msg.respond(_format_question(question, site), ping=False)<|docstring|>Provide information about a Stack Exchange question.<|endoftext|>
|
897f403ff618da9a123e6d1613f5a6cfabca903597ff80df692bbd1f3a4f82e6
|
def answer(bot, msg):
'Provide information about a Stack Exchange answer (or really just the question).'
(domain, answer_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
answer = _answer_info(site, answer_id)
question = _question_info(site, answer.question_id)
msg.respond(_format_question(question, site), ping=False)
|
Provide information about a Stack Exchange answer (or really just the question).
|
ircbot/plugin/stack_exchange.py
|
answer
|
singingtelegram/ircbot
| 10 |
python
|
def answer(bot, msg):
(domain, answer_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
answer = _answer_info(site, answer_id)
question = _question_info(site, answer.question_id)
msg.respond(_format_question(question, site), ping=False)
|
def answer(bot, msg):
(domain, answer_id) = msg.match.groups()
site = _sites().get(domain)
if (site is not None):
answer = _answer_info(site, answer_id)
question = _question_info(site, answer.question_id)
msg.respond(_format_question(question, site), ping=False)<|docstring|>Provide information about a Stack Exchange answer (or really just the question).<|endoftext|>
|
d5cea7063376903d8a03cc0ee3c960ef3f660c8876cfacd0c0b10c1bdc64adfe
|
@unittest.skipIf((platform.system() == 'Windows'), 'Platform currently not supported.')
def test__collectExtendedAttributes_on_file_with_ext_attribs(self):
'\n Should store the extended attributes in data\n :return:\n '
from artefact.localhost.file import FileMetadata
expected_data = 'Extended Attributes: com.apple.FinderInfo\n'
expected_filepath = '/opt/local/var/macports'
collector: FileMetadata = FileMetadata(parameters={}, parent=None)
collector.source_path = expected_filepath
collector._collect_extended_attributes()
actual_data = collector.data[(- 1)].collecteddata
self.assertEqual(expected_data, actual_data)
|
Should store the extended attributes in data
:return:
|
tests/integration_test.py
|
test__collectExtendedAttributes_on_file_with_ext_attribs
|
3Peso/mosk
| 3 |
python
|
@unittest.skipIf((platform.system() == 'Windows'), 'Platform currently not supported.')
def test__collectExtendedAttributes_on_file_with_ext_attribs(self):
'\n Should store the extended attributes in data\n :return:\n '
from artefact.localhost.file import FileMetadata
expected_data = 'Extended Attributes: com.apple.FinderInfo\n'
expected_filepath = '/opt/local/var/macports'
collector: FileMetadata = FileMetadata(parameters={}, parent=None)
collector.source_path = expected_filepath
collector._collect_extended_attributes()
actual_data = collector.data[(- 1)].collecteddata
self.assertEqual(expected_data, actual_data)
|
@unittest.skipIf((platform.system() == 'Windows'), 'Platform currently not supported.')
def test__collectExtendedAttributes_on_file_with_ext_attribs(self):
'\n Should store the extended attributes in data\n :return:\n '
from artefact.localhost.file import FileMetadata
expected_data = 'Extended Attributes: com.apple.FinderInfo\n'
expected_filepath = '/opt/local/var/macports'
collector: FileMetadata = FileMetadata(parameters={}, parent=None)
collector.source_path = expected_filepath
collector._collect_extended_attributes()
actual_data = collector.data[(- 1)].collecteddata
self.assertEqual(expected_data, actual_data)<|docstring|>Should store the extended attributes in data
:return:<|endoftext|>
|
8765a773c58f4e253ff26b2f8b9074e831b1b54876cbcf14a4d713da213177ea
|
def test__collect(self):
'\n Data should be initialized with NTPTime object.\n :return:\n '
actual_collector = TimeFromNTPServer(parameters={'timeserver': '0.de.pool.ntp.org'}, parent=None)
actual_collector._collect()
self.assertIsInstance(actual_collector.data[0].collecteddata, str)
|
Data should be initialized with NTPTime object.
:return:
|
tests/integration_test.py
|
test__collect
|
3Peso/mosk
| 3 |
python
|
def test__collect(self):
'\n Data should be initialized with NTPTime object.\n :return:\n '
actual_collector = TimeFromNTPServer(parameters={'timeserver': '0.de.pool.ntp.org'}, parent=None)
actual_collector._collect()
self.assertIsInstance(actual_collector.data[0].collecteddata, str)
|
def test__collect(self):
'\n Data should be initialized with NTPTime object.\n :return:\n '
actual_collector = TimeFromNTPServer(parameters={'timeserver': '0.de.pool.ntp.org'}, parent=None)
actual_collector._collect()
self.assertIsInstance(actual_collector.data[0].collecteddata, str)<|docstring|>Data should be initialized with NTPTime object.
:return:<|endoftext|>
|
078db38f72da51a1c10b74b1dca3c6d8a841c8b45a474e410c408f46a3b35d71
|
def test__collect_no_ntp_server(self):
'\n Should use default ntp server.\n :return:\n '
expected_ntp_server = '0.de.pool.ntp.org'
actual_collector = TimeFromNTPServer(parameters={}, parent=None)
actual_collector._collect()
self.assertEqual(actual_collector.timeserver, expected_ntp_server)
|
Should use default ntp server.
:return:
|
tests/integration_test.py
|
test__collect_no_ntp_server
|
3Peso/mosk
| 3 |
python
|
def test__collect_no_ntp_server(self):
'\n Should use default ntp server.\n :return:\n '
expected_ntp_server = '0.de.pool.ntp.org'
actual_collector = TimeFromNTPServer(parameters={}, parent=None)
actual_collector._collect()
self.assertEqual(actual_collector.timeserver, expected_ntp_server)
|
def test__collect_no_ntp_server(self):
'\n Should use default ntp server.\n :return:\n '
expected_ntp_server = '0.de.pool.ntp.org'
actual_collector = TimeFromNTPServer(parameters={}, parent=None)
actual_collector._collect()
self.assertEqual(actual_collector.timeserver, expected_ntp_server)<|docstring|>Should use default ntp server.
:return:<|endoftext|>
|
ddb6336a31904d34a464852821ff5845e72228ca137e048c402231d6c0da77e7
|
def glyphList():
'Return a list of glyphs saved already.'
ls = sorted(os.listdir('glyphs'))
ret = []
for l in ls:
if ('.json' != l[(- 5):]):
wrn(('%s is not named correctly.' % l))
else:
ret.append(l[:(- 5)])
return ret
|
Return a list of glyphs saved already.
|
glyph.py
|
glyphList
|
sangh/LaserShow
| 0 |
python
|
def glyphList():
ls = sorted(os.listdir('glyphs'))
ret = []
for l in ls:
if ('.json' != l[(- 5):]):
wrn(('%s is not named correctly.' % l))
else:
ret.append(l[:(- 5)])
return ret
|
def glyphList():
ls = sorted(os.listdir('glyphs'))
ret = []
for l in ls:
if ('.json' != l[(- 5):]):
wrn(('%s is not named correctly.' % l))
else:
ret.append(l[:(- 5)])
return ret<|docstring|>Return a list of glyphs saved already.<|endoftext|>
|
1435c9949e4c1c9d6855dc8f0b851b87af298fc7e06c11fc76cd45c6057ac05b
|
def interpolateEvenSpacedPtsOnALine(nPts, pt1, pt2):
'Return a list of nPts between pt1 and pt2 not inc. pt1 but inc. pt2.'
'So pt2 is always the last pt in the list, and the list is nPts long.'
expath = []
xOffset = (float((pt2[0] - pt1[0])) / nPts)
yOffset = (float((pt2[1] - pt1[1])) / nPts)
for i in range(1, nPts):
newX = int((((i * xOffset) + pt1[0]) // 1))
newY = int((((i * yOffset) + pt1[1]) // 1))
expath.append((newX, newY))
expath.append(pt2)
return expath
|
Return a list of nPts between pt1 and pt2 not inc. pt1 but inc. pt2.
|
glyph.py
|
interpolateEvenSpacedPtsOnALine
|
sangh/LaserShow
| 0 |
python
|
def interpolateEvenSpacedPtsOnALine(nPts, pt1, pt2):
'So pt2 is always the last pt in the list, and the list is nPts long.'
expath = []
xOffset = (float((pt2[0] - pt1[0])) / nPts)
yOffset = (float((pt2[1] - pt1[1])) / nPts)
for i in range(1, nPts):
newX = int((((i * xOffset) + pt1[0]) // 1))
newY = int((((i * yOffset) + pt1[1]) // 1))
expath.append((newX, newY))
expath.append(pt2)
return expath
|
def interpolateEvenSpacedPtsOnALine(nPts, pt1, pt2):
'So pt2 is always the last pt in the list, and the list is nPts long.'
expath = []
xOffset = (float((pt2[0] - pt1[0])) / nPts)
yOffset = (float((pt2[1] - pt1[1])) / nPts)
for i in range(1, nPts):
newX = int((((i * xOffset) + pt1[0]) // 1))
newY = int((((i * yOffset) + pt1[1]) // 1))
expath.append((newX, newY))
expath.append(pt2)
return expath<|docstring|>Return a list of nPts between pt1 and pt2 not inc. pt1 but inc. pt2.<|endoftext|>
|
3412e9c45d723ed02aae84a8c5853d2b510b250e362ca8a11ab512b8cfaa32c9
|
def glyphExpandToPts(nPoints, glyph):
'Return the glyph expanded to nPoints triplets.'
lenTot = 0.0
lit = True
lpt = None
dummyPts = 0
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if lit:
d = distanceEuclidean(lpt, pt)
if (0.0 == d):
dummyPts = (dummyPts + 1)
lenTot = (lenTot + d)
else:
dummyPts = (dummyPts + 1)
lpt = pt
expandToPts = (nPoints - dummyPts)
if (len(filter((lambda p: (not isinstance(p, str))), glyph['path'])) >= expandToPts):
raise SyntaxError('nPoints bigger than point-points in path?!?')
def ptToTriplet(lit, pt):
if lit:
blanked = 0
else:
blanked = 1
return (pt[0], pt[1], blanked)
expath = []
lit = True
lpt = None
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if ((lpt is None) or (not lit)):
expath.append(ptToTriplet(lit, pt))
else:
dist = distanceEuclidean(lpt, pt)
nPtsToAdd = int((((expandToPts * dist) / lenTot) // 1))
if (0 < nPtsToAdd):
interPts = interpolateEvenSpacedPtsOnALine(nPtsToAdd, lpt, pt)
expath = (expath + map((lambda p: ptToTriplet(lit, p)), interPts))
else:
expath.append(ptToTriplet(lit, pt))
lpt = pt
le = len(expath)
if (le > nPoints):
wrn(('Truncated %d from glyph, the glyphExpandToPts fn is broken.' % (le - nPoints)))
return expath[0:nPoints]
elif (le < nPoints):
return (expath + ((nPoints - le) * [expath[(- 1)]]))
else:
return expath
|
Return the glyph expanded to nPoints triplets.
|
glyph.py
|
glyphExpandToPts
|
sangh/LaserShow
| 0 |
python
|
def glyphExpandToPts(nPoints, glyph):
lenTot = 0.0
lit = True
lpt = None
dummyPts = 0
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if lit:
d = distanceEuclidean(lpt, pt)
if (0.0 == d):
dummyPts = (dummyPts + 1)
lenTot = (lenTot + d)
else:
dummyPts = (dummyPts + 1)
lpt = pt
expandToPts = (nPoints - dummyPts)
if (len(filter((lambda p: (not isinstance(p, str))), glyph['path'])) >= expandToPts):
raise SyntaxError('nPoints bigger than point-points in path?!?')
def ptToTriplet(lit, pt):
if lit:
blanked = 0
else:
blanked = 1
return (pt[0], pt[1], blanked)
expath = []
lit = True
lpt = None
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if ((lpt is None) or (not lit)):
expath.append(ptToTriplet(lit, pt))
else:
dist = distanceEuclidean(lpt, pt)
nPtsToAdd = int((((expandToPts * dist) / lenTot) // 1))
if (0 < nPtsToAdd):
interPts = interpolateEvenSpacedPtsOnALine(nPtsToAdd, lpt, pt)
expath = (expath + map((lambda p: ptToTriplet(lit, p)), interPts))
else:
expath.append(ptToTriplet(lit, pt))
lpt = pt
le = len(expath)
if (le > nPoints):
wrn(('Truncated %d from glyph, the glyphExpandToPts fn is broken.' % (le - nPoints)))
return expath[0:nPoints]
elif (le < nPoints):
return (expath + ((nPoints - le) * [expath[(- 1)]]))
else:
return expath
|
def glyphExpandToPts(nPoints, glyph):
lenTot = 0.0
lit = True
lpt = None
dummyPts = 0
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if lit:
d = distanceEuclidean(lpt, pt)
if (0.0 == d):
dummyPts = (dummyPts + 1)
lenTot = (lenTot + d)
else:
dummyPts = (dummyPts + 1)
lpt = pt
expandToPts = (nPoints - dummyPts)
if (len(filter((lambda p: (not isinstance(p, str))), glyph['path'])) >= expandToPts):
raise SyntaxError('nPoints bigger than point-points in path?!?')
def ptToTriplet(lit, pt):
if lit:
blanked = 0
else:
blanked = 1
return (pt[0], pt[1], blanked)
expath = []
lit = True
lpt = None
for pt in glyph['path']:
if ('on' == pt):
lit = True
elif ('off' == pt):
lit = False
else:
if ((lpt is None) or (not lit)):
expath.append(ptToTriplet(lit, pt))
else:
dist = distanceEuclidean(lpt, pt)
nPtsToAdd = int((((expandToPts * dist) / lenTot) // 1))
if (0 < nPtsToAdd):
interPts = interpolateEvenSpacedPtsOnALine(nPtsToAdd, lpt, pt)
expath = (expath + map((lambda p: ptToTriplet(lit, p)), interPts))
else:
expath.append(ptToTriplet(lit, pt))
lpt = pt
le = len(expath)
if (le > nPoints):
wrn(('Truncated %d from glyph, the glyphExpandToPts fn is broken.' % (le - nPoints)))
return expath[0:nPoints]
elif (le < nPoints):
return (expath + ((nPoints - le) * [expath[(- 1)]]))
else:
return expath<|docstring|>Return the glyph expanded to nPoints triplets.<|endoftext|>
|
9174be77f5fa09792b6f207905e2a70a4f22d682593edc95d755070eefdb3694
|
def available_friendly_bases(self, playerId):
'\n Return list of available bases regarding a given player\n '
res = [base for (base, basePlayerId) in self.bases.items() if ((basePlayerId == playerId) and (self.board[base]['coinId'] is None))]
return res
|
Return list of available bases regarding a given player
|
warchest/board.py
|
available_friendly_bases
|
pprablanc/warchest
| 0 |
python
|
def available_friendly_bases(self, playerId):
'\n \n '
res = [base for (base, basePlayerId) in self.bases.items() if ((basePlayerId == playerId) and (self.board[base]['coinId'] is None))]
return res
|
def available_friendly_bases(self, playerId):
'\n \n '
res = [base for (base, basePlayerId) in self.bases.items() if ((basePlayerId == playerId) and (self.board[base]['coinId'] is None))]
return res<|docstring|>Return list of available bases regarding a given player<|endoftext|>
|
f7038c38c2978c15ba48a76cbec8b928c0e86bba3a12583fddc910f2408b4daf
|
def check_unit_deployed(self, coinId):
'\n Return 1 if unit is already deployed on the board\n '
for unit in self.units_deployed.keys():
if (unit == coinId):
return 1
return 0
|
Return 1 if unit is already deployed on the board
|
warchest/board.py
|
check_unit_deployed
|
pprablanc/warchest
| 0 |
python
|
def check_unit_deployed(self, coinId):
'\n \n '
for unit in self.units_deployed.keys():
if (unit == coinId):
return 1
return 0
|
def check_unit_deployed(self, coinId):
'\n \n '
for unit in self.units_deployed.keys():
if (unit == coinId):
return 1
return 0<|docstring|>Return 1 if unit is already deployed on the board<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.