code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def split_next_and_previous_event_columns(self, requested_columns):
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ())
|
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
|
def on_tape(*files):
for path in files:
try:
if os.stat(path).st_blocks == 0:
return True
except AttributeError:
return False
return False
|
Determine whether any of the given files are on tape
Parameters
----------
*files : `str`
one or more paths to GWF files
Returns
-------
True/False : `bool`
`True` if any of the files are determined to be on tape,
otherwise `False`
|
def _add_default_entries(input_dict, defaults_dict):
for key, value in defaults_dict.iteritems():
if key == 'patients':
print('Cannot default `patients`.')
continue
if isinstance(value, dict):
if key not in input_dict or input_dict[key] is None:
input_dict[key] = value
else:
r = _add_default_entries(input_dict.get(key, {}), value)
input_dict[key] = r
else:
if key not in input_dict or input_dict[key] is None:
input_dict[key] = value
return input_dict
|
Add the entries in defaults dict into input_dict if they don't exist in input_dict
This is based on the accepted answer at
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
:param dict input_dict: The dict to be updated
:param dict defaults_dict: Dict containing the defaults for entries in input_dict
:return: updated dict
:rtype: dict
|
def scan(cls, filter_builder=None, **scan_filter):
scan_kwargs = {'scan_filter': build_condition(scan_filter)}
if filter_builder:
cls._build_filter_expression(filter_builder, scan_kwargs)
return ResultSet(cls, 'scan', scan_kwargs)
|
High level scan API.
:param filter_builder: filter expression builder.
:type filter_builder: :class:`~bynamodb.filterexps.Operator`
|
def update_dict(d, u):
for key, val in u.items():
if isinstance(val, collections.Mapping):
d[key] = update_dict(d.get(key, {}), val)
else:
d[key] = u[key]
return d
|
Recursively updates nested dict d from nested dict u
|
def handle_exception (self):
etype, evalue = sys.exc_info()[:2]
log.debug(LOG_CHECK, "Error in %s: %s %s", self.url, etype, evalue, exception=True)
if (etype in ExcNoCacheList) or \
(etype == socket.error and evalue.args[0]==errno.EBADF) or \
not evalue:
self.caching = False
errmsg = unicode(etype.__name__)
uvalue = strformat.unicode_safe(evalue)
if uvalue:
errmsg += u": %s" % uvalue
return strformat.limit(errmsg, length=240)
|
An exception occurred. Log it and set the cache flag.
|
def poll_results_check(self):
if not self.consumers:
LOGGER.debug('Skipping poll results check, no consumers')
return
LOGGER.debug('Checking for poll results')
while True:
try:
stats = self.stats_queue.get(False)
except queue.Empty:
break
try:
self.poll_data['processes'].remove(stats['name'])
except ValueError:
pass
self.collect_results(stats)
if self.poll_data['processes']:
LOGGER.warning('Did not receive results from %r',
self.poll_data['processes'])
|
Check the polling results by checking to see if the stats queue is
empty. If it is not, try and collect stats. If it is set a timer to
call ourselves in _POLL_RESULTS_INTERVAL.
|
def find_line_beginning(strings: Sequence[str],
linestart: Optional[str]) -> int:
if linestart is None:
for i in range(len(strings)):
if is_empty_string(strings[i]):
return i
return -1
for i in range(len(strings)):
if strings[i].find(linestart) == 0:
return i
return -1
|
Finds the index of the line in ``strings`` that begins with ``linestart``,
or ``-1`` if none is found.
If ``linestart is None``, match an empty line.
|
def turn_right(self, angle_degrees, rate=RATE):
flight_time = angle_degrees / rate
self.start_turn_right(rate)
time.sleep(flight_time)
self.stop()
|
Turn to the right, staying on the spot
:param angle_degrees: How far to turn (degrees)
:param rate: The trurning speed (degrees/second)
:return:
|
def find_files_cmd(data_path, minutes, start_time, end_time):
if minutes:
return FIND_MINUTES_COMMAND.format(
data_path=data_path,
minutes=minutes,
)
if start_time:
if end_time:
return FIND_RANGE_COMMAND.format(
data_path=data_path,
start_time=start_time,
end_time=end_time,
)
else:
return FIND_START_COMMAND.format(
data_path=data_path,
start_time=start_time,
)
|
Find the log files depending on their modification time.
:param data_path: the path to the Kafka data directory
:type data_path: str
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
:returns: the find command
:rtype: str
|
def queries():
query = request.form['query']
name = request.form.get('name')
app.db.add_gemini_query(name, query)
return redirect(request.referrer)
|
Store a new GEMINI query.
|
def _pfp__set_value(self, value):
if self._pfp__frozen:
raise errors.UnmodifiableConst()
if len(value) != len(self._pfp__children):
raise errors.PfpError("struct initialization has wrong number of members")
for x in six.moves.range(len(self._pfp__children)):
self._pfp__children[x]._pfp__set_value(value[x])
|
Initialize the struct. Value should be an array of
fields, one each for each struct member.
:value: An array of fields to initialize the struct with
:returns: None
|
def apply_log(a: tuple, func: Callable[[Any], Tuple[Any, Log]]) -> Tuple[Any, Log]:
value, log = a
new, entry = func(value)
return new, log + entry
|
Apply a function to a value with a log.
Helper function to apply a function to a value with a log tuple.
|
def _request(self, url, params=None, timeout=10):
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
|
Send a request with parameters.
|
def parse_soap_enveloped_saml_thingy(text, expected_tags):
envelope = defusedxml.ElementTree.fromstring(text)
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
body = None
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
body = part
break
if body is None:
return ""
saml_part = body[0]
if saml_part.tag in expected_tags:
return ElementTree.tostring(saml_part, encoding="UTF-8")
else:
raise WrongMessageType("Was '%s' expected one of %s" % (saml_part.tag,
expected_tags))
|
Parses a SOAP enveloped SAML thing and returns the thing as
a string.
:param text: The SOAP object as XML string
:param expected_tags: What the tag of the SAML thingy is expected to be.
:return: SAML thingy as a string
|
def __stopOpenThread(self):
print 'call stopOpenThread'
try:
if self.__sendCommand('thread stop')[0] == 'Done':
return self.__sendCommand('ifconfig down')[0] == 'Done'
else:
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("stopOpenThread() Error: " + str(e))
|
stop OpenThread stack
Returns:
True: successful to stop OpenThread stack and thread interface down
False: fail to stop OpenThread stack
|
def wndifd(a, b):
assert isinstance(a, stypes.SpiceCell)
assert isinstance(b, stypes.SpiceCell)
assert a.dtype == 1
assert b.dtype == 1
c = stypes.SpiceCell.double(a.size + b.size)
libspice.wndifd_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c))
return c
|
Place the difference of two double precision windows into
a third window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wndifd_c.html
:param a: Input window A.
:type a: spiceypy.utils.support_types.SpiceCell
:param b: Input window B.
:type b: spiceypy.utils.support_types.SpiceCell
:return: Difference of a and b.
:rtype: spiceypy.utils.support_types.SpiceCell
|
def import_path(path):
sys.path.insert(0, ".")
parts = path.split(".")
module = None
for i in range(1, len(parts)+1):
if module is not None and hasattr(module, parts[i-1]):
try:
return _import_attributes(module, parts[i-1:])
except AttributeError:
pass
module_path = ".".join(parts[0:i])
module = importlib.import_module(module_path)
return module
|
Imports any valid python module or attribute path as though it were a
module
:Example:
>>> from yamlconf import import_path
>>> from my_package.my_module.my_submodule import attribute
>>> attribute.sub_attribute == \
... import_path("y_package.my_module.my_submodule.attribute.sub_attribute")
True
>>>
:Parameters:
path : `str`
A valid python path that crosses modules and/or attributes
|
def get_entity_mm():
type_builtins = {
'integer': SimpleType(None, 'integer'),
'string': SimpleType(None, 'string')
}
entity_mm = metamodel_from_file(join(this_folder, 'entity.tx'),
classes=[SimpleType],
builtins=type_builtins)
return entity_mm
|
Builds and returns a meta-model for Entity language.
|
def matrix_product(mat1, mat2):
return np.dot(mat2.T, mat1.T).T
|
Compute the product of two Fortran contiguous matrices.
This is to avoid the overhead of NumPy converting to C-contiguous
before computing a matrix product.
Does so via ``A B = (B^T A^T)^T`` since ``B^T`` and ``A^T`` will be
C-contiguous without a copy, then the product ``P = B^T A^T`` will
be C-contiguous and we can return the view ``P^T`` without a copy.
Args:
mat1 (numpy.ndarray): The left-hand side matrix.
mat2 (numpy.ndarray): The right-hand side matrix.
Returns:
numpy.ndarray: The product of the two matrices.
|
def extract_patch_images(f, which_set):
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images
|
Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
|
def to_dict(self):
return dict({'title': self.title,
'album': self.album,
'year': self.year,
'lyrics': self.lyrics,
'image': self.song_art_image_url})
|
Create a dictionary from the song object
Used in save_lyrics to create json object
:return: Dictionary
|
def wishart_pfaffian(self):
return np.array(
[Pfaffian(self, val).value for i, val in np.ndenumerate(self._chisq)]
).reshape(self._chisq.shape)
|
ndarray of wishart pfaffian CDF, before normalization
|
def get_instance_id(self, instance):
" Returns instance pk even if multiple instances were passed to RichTextField. "
if type(instance) in [list, tuple]:
core_signals.request_finished.connect(receiver=RichTextField.reset_instance_counter_listener)
if RichTextField.__inst_counter >= len(instance):
return None
else:
obj_id = self.instance[ RichTextField.__inst_counter ].pk
RichTextField.__inst_counter += 1
else:
obj_id = instance.pk
return obj_id
|
Returns instance pk even if multiple instances were passed to RichTextField.
|
def connection(self):
if self._connections:
if not self._connections.acquire(self._blocking):
raise TooManyConnections
try:
con = self._cache.get(0)
except Empty:
con = self.steady_connection()
return PooledPgConnection(self, con)
|
Get a steady, cached PostgreSQL connection from the pool.
|
def run_coroutine_threadsafe(coro, loop):
if not asyncio.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
try:
_chain_future(asyncio.ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
|
Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
|
def get_template(cls, message, messenger):
template = message.context.get('tpl', None)
if template:
return template
if cls.template is None:
cls.template = 'sitemessage/messages/%s__%s.%s' % (
cls.get_alias(), messenger.get_alias(), cls.template_ext
)
return cls.template
|
Get a template path to compile a message.
1. `tpl` field of message context;
2. `template` field of message class;
3. deduced from message, messenger data and `template_ext` message type field
(e.g. `sitemessage/messages/plain__smtp.txt` for `plain` message type).
:param Message message: Message model
:param MessengerBase messenger: a MessengerBase heir
:return: str
:rtype: str
|
def _return(self, ary):
if isinstance(ary, Array):
return ary
return Array(ary, copy=False)
|
Wrap the ary to return an Array type
|
def addToService(self, service, namespace=None, seperator='.'):
if namespace is None:
namespace = []
if isinstance(namespace, basestring):
namespace = [namespace]
for n, m in inspect.getmembers(self, inspect.ismethod):
if hasattr(m, 'export_rpc'):
try:
name = seperator.join(namespace + m.export_rpc)
except TypeError:
name = seperator.join(namespace + [m.export_rpc])
service.add(m, name)
|
Add this Handler's exported methods to an RPC Service instance.
|
def save(self):
e = self.editor
self._style = e.current_style
self._show_line_numbers = e.show_line_numbers
self._highlight_search = e.highlight_search
self._show_ruler = e.show_ruler
self._relative_number = e.relative_number
self._cursorcolumn = e.cursorcolumn
self._cursorline = e.cursorline
self._colorcolumn = e.colorcolumn
|
Back up current editor state.
|
def run_on(*, event: str):
def decorator(callback):
@functools.wraps(callback)
def decorator_wrapper():
RTMClient.on(event=event, callback=callback)
return decorator_wrapper()
return decorator
|
A decorator to store and link a callback to an event.
|
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
self, assertion_tag):
prefix_map = self.get_prefix_map(
[self.encrypted_assertion._to_element_tree().find(assertion_tag)])
tree = self._to_element_tree()
self.set_prefixes(
tree.find(
self.encrypted_assertion._to_element_tree().tag).find(
assertion_tag), prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8')
|
Makes a encrypted assertion only containing self contained
namespaces.
:param assertion_tag: Tag for the assertion to be transformed.
:return: A new samlp.Resonse in string representation.
|
def plot_degbandshalffill():
ulim = [3.45, 5.15, 6.85, 8.55]
bands = range(1, 5)
for band, u_int in zip(bands, ulim):
name = 'Z_half_'+str(band)+'band'
dop = [0.5]
data = ssplt.calc_z(band, dop, np.arange(0, u_int, 0.1),0., name)
plt.plot(data['u_int'], data['zeta'][0, :, 0], label='$N={}$'.format(str(band)))
ssplt.label_saves('Z_half_multiorb.png')
|
Plot of Quasiparticle weight for degenerate
half-filled bands, showing the Mott transition
|
def deactivate(self):
if lib.EnvDeactivateRouter(self._env, self._name.encode()) == 0:
raise RuntimeError("Unable to deactivate router %s" % self._name)
|
Deactivate the Router.
|
def print_subprocess_output(subp):
if subp:
if subp.errorcode != 0:
print('<error errorcode="%s">' % str(subp.errorcode))
print(subp.stderr)
print("</error>")
print_tag('stdout', '\n%s\n' % subp.stdout)
else:
print_tag('success', '\n%s\n' % subp.stdout)
print_tag('warnings', '\n%s\n' % subp.stderr)
|
Prints the stdout and stderr output.
|
def parse_qs(self, qs):
qs_state = urllib2.urlparse.parse_qs(qs)
ret = {}
for qs_var, qs_value_list in qs_state.items():
if len(qs_value_list) > 1:
return None
ret[qs_var] = qs_value_list[0]
return ret
|
Parse query string, but enforce one instance of each variable.
Return a dict with the variables on success
Return None on parse error
|
def generate(env):
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = ''
|
Add Builders and construction variables for compaq visual fortran to an Environment.
|
def _get_instance_changes(current, state):
current_keys = set(current.keys())
state_keys = set(state.keys())
changed = salt.utils.data.compare_dicts(current, state)
for change in salt.utils.data.compare_dicts(current, state):
if change in changed and changed[change]['old'] == "":
del changed[change]
if change in changed and changed[change]['new'] == "":
del changed[change]
return changed
|
get modified properties
|
def find(self, oid):
if not isinstance(oid, Oid):
raise TypeError("Need crytypescrypto.oid.Oid as argument")
found = []
index = -1
end = len(self)
while True:
index = libcrypto.X509_get_ext_by_NID(self.cert.cert, oid.nid,
index)
if index >= end or index < 0:
break
found.append(self[index])
return found
|
Return list of extensions with given Oid
|
def set_working_directory(self, dirname):
if dirname:
self.main.workingdirectory.chdir(dirname, refresh_explorer=True,
refresh_console=False)
|
Set current working directory.
In the workingdirectory and explorer plugins.
|
def off_policy_train_batch(self, batch_info: BatchInfo):
self.model.train()
rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device)
batch_result = self.algo.optimizer_step(
batch_info=batch_info,
device=self.device,
model=self.model,
rollout=rollout
)
batch_info['sub_batch_data'].append(batch_result)
|
Perform an 'off-policy' training step of sampling the replay buffer and gradient descent
|
def mock_server_receive_request(client, server):
header = mock_server_receive(client, 16)
length = _UNPACK_INT(header[:4])[0]
request_id = _UNPACK_INT(header[4:8])[0]
opcode = _UNPACK_INT(header[12:])[0]
msg_bytes = mock_server_receive(client, length - 16)
if opcode not in OPCODES:
raise NotImplementedError("Don't know how to unpack opcode %d yet"
% opcode)
return OPCODES[opcode].unpack(msg_bytes, client, server, request_id)
|
Take a client socket and return a Request.
|
def get_factor_nodes(self):
self.check_model()
variable_nodes = self.get_variable_nodes()
factor_nodes = set(self.nodes()) - set(variable_nodes)
return list(factor_nodes)
|
Returns factors nodes present in the graph.
Before calling this method make sure that all the factors are added
properly.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi2 = DiscreteFactor(['b', 'c'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_factors(phi1, phi2)
>>> G.add_edges_from([('a', phi1), ('b', phi1),
... ('b', phi2), ('c', phi2)])
>>> G.get_factor_nodes()
[<DiscreteFactor representing phi(b:2, c:2) at 0x4b8c7f0>,
<DiscreteFactor representing phi(a:2, b:2) at 0x4b8c5b0>]
|
def salience(self, salience):
lib.EnvSetActivationSalience(self._env, self._act, salience)
|
Activation salience value.
|
def define_from_header(cls, image_header):
self = CsuConfiguration()
self._csu_bar_left = []
self._csu_bar_right = []
self._csu_bar_slit_center = []
self._csu_bar_slit_width = []
for i in range(EMIR_NBARS):
ibar = i + 1
keyword = 'CSUP{}'.format(ibar)
if keyword in image_header:
self._csu_bar_left.append(image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
keyword = 'CSUP{}'.format(ibar + EMIR_NBARS)
if keyword in image_header:
self._csu_bar_right.append(341.5 - image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
self._csu_bar_slit_center.append(
(self._csu_bar_left[i] + self._csu_bar_right[i]) / 2
)
self._csu_bar_slit_width.append(
self._csu_bar_right[i] - self._csu_bar_left[i]
)
return self
|
Define class members directly from FITS header.
Parameters
----------
image_header : instance of hdulist.header
Header content from a FITS file.
|
def write(self, config_dir=None, config_name=None, codec=None):
if not config_dir:
config_dir = self._meta_config_dir
if not config_dir:
raise IOError("config_dir not set")
if not config_name:
config_name = self._defaults.get('config_name', None)
if not config_name:
raise KeyError('config_name not set')
if codec:
codec = munge.get_codec(codec)()
else:
codec = munge.get_codec(self._defaults['codec'])()
config_dir = os.path.expanduser(config_dir)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
codec.dumpu(self.data, os.path.join(config_dir, 'config.' + codec.extension))
|
writes config to config_dir using config_name
|
def fnames(self, names):
names = list(names[:len(self._fnames)])
self._fnames = names + self._fnames[len(names):]
|
Ensure constant size of fnames
|
def waveform_image(mediafile, xy_size, outdir=None, center_color=None, outer_color=None, bg_color=None):
try:
import waveform
except ImportError, exc:
raise ImportError("%s [get it at https://github.com/superjoe30/PyWaveform]" % exc)
outdir = outdir or os.path.dirname(mediafile)
outfile = os.path.join(outdir, os.path.splitext(os.path.basename(mediafile))[0] + ".png")
with transcode.to_wav(mediafile) as wavfile:
waveform.draw(wavfile, outfile, xy_size,
bgColor=bg_color or WAVE_BG_COLOR,
fgGradientCenter=center_color or WAVE_CENTER_COLOR,
fgGradientOuter=outer_color or WAVE_OUTER_COLOR)
return outfile
|
Create waveform image from audio data.
Return path to created image file.
|
def _listen_inbox_messages(self):
inbox_queue = Queue(maxsize=self._n_jobs * 4)
threads = []
try:
for i in range(self._n_jobs):
t = BotQueueWorker(name='InboxThread-t-{}'.format(i),
jobs=inbox_queue,
target=self._process_inbox_message)
t.start()
self._threads.append(t)
for message in self._reddit.inbox.stream():
if self._stop:
self._do_stop(inbox_queue, threads)
break
inbox_queue.put(message)
self.log.debug('Listen inbox stopped')
except Exception as e:
self._do_stop(inbox_queue, threads)
self.log.error('Exception while listening to inbox:')
self.log.error(str(e))
self.log.error('Waiting for 10 minutes and trying again.')
time.sleep(10 * 60)
self._listen_inbox_messages()
|
Start listening to messages, using a separate thread.
|
def serialize_dict(self, msg_dict):
serialized = json.dumps(msg_dict, namedtuple_as_object=False)
if PY2:
serialized = serialized.decode("utf-8")
serialized = "{}\nend\n".format(serialized)
return serialized
|
Serialize to JSON a message dictionary.
|
def path(self, filename):
if not self.backend.root:
raise OperationNotSupported(
'Direct file access is not supported by ' +
self.backend.__class__.__name__
)
return os.path.join(self.backend.root, filename)
|
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
:raises OperationNotSupported: when the backenddoesn't support direct file access
|
def set_if_empty(self, param, default):
if not self.has(param):
self.set(param, default)
|
Set the parameter to the default if it doesn't exist
|
def store(self, database, validate=True, role=None):
if validate:
self.validate()
self._id, self._rev = database.save(self.to_primitive(role=role))
return self
|
Store the document in the given database.
:param database: the `Database` object source for storing the document.
:return: an updated instance of `Document` / self.
|
def is_valid_package_name(name, raise_error=False):
is_valid = PACKAGE_NAME_REGEX.match(name)
if raise_error and not is_valid:
raise PackageRequestError("Not a valid package name: %r" % name)
return is_valid
|
Test the validity of a package name string.
Args:
name (str): Name to test.
raise_error (bool): If True, raise an exception on failure
Returns:
bool.
|
def num(value):
if re_hex_num.match(value):
return int(value, base=16)
else:
return int(value)
|
Convert a value from one of several bases to an int.
|
def metadata_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1)
credentials = dict(
AccessKeyId="test-key",
SecretAccessKey="test-secret-key",
Token="test-session-token",
Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
)
path = parsed_url.path
meta_data_prefix = "/latest/meta-data/"
if path.startswith(meta_data_prefix):
path = path[len(meta_data_prefix):]
if path == '':
result = 'iam'
elif path == 'iam':
result = json.dumps({
'security-credentials': {
'default-role': credentials
}
})
elif path == 'iam/security-credentials/':
result = 'default-role'
elif path == 'iam/security-credentials/default-role':
result = json.dumps(credentials)
else:
raise NotImplementedError(
"The {0} metadata path has not been implemented".format(path))
return 200, headers, result
|
Mock response for localhost metadata
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
|
def prettify_json(json_string):
try:
data = json.loads(json_string)
html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>'
except:
html = json_string
return mark_safe(html)
|
Given a JSON string, it returns it as a
safe formatted HTML
|
def get_short_help_str(self, limit=45):
return self.short_help or self.help and make_default_short_help(self.help, limit) or ''
|
Gets short help for the command or makes it by shortening the long help string.
|
def list_controls(self, limit=500, offset=0):
return self.__list(R_CONTROL, limit=limit, offset=offset)['controls']
|
List `all` the controls on this Thing.
Returns QAPI list function payload
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`limit` (optional) (integer) Return this many Point details
`offset` (optional) (integer) Return Point details starting at this offset
|
def remove_empty_files(root_directory, dry_run=False, ignore_errors=True,
enable_scandir=False):
file_list = []
for root, directories, files in _walk(root_directory,
enable_scandir=enable_scandir):
for file_name in files:
file_path = join_paths(root, file_name, strict=True)
if os.path.isfile(file_path) and not os.path.getsize(file_path):
if file_hash(file_path) == variables.hashes.empty_file.md5:
file_list.append(file_path)
file_list = sorted(set(file_list))
if not dry_run:
for afile in file_list:
try:
os.unlink(afile)
except OSError as err:
if ignore_errors:
logger.info("File {0} could not be deleted".format(afile))
else:
raise err
return file_list
|
Remove all empty files from a path. Returns list of the empty files removed.
:param root_directory: base directory to start at
:param dry_run: just return a list of what would be removed
:param ignore_errors: Permissions are a pain, just ignore if you blocked
:param enable_scandir: on python < 3.5 enable external scandir package
:return: list of removed files
|
def _is_second_run():
tracker_path = _get_not_configured_usage_tracker_path()
if not tracker_path.exists():
return False
current_pid = _get_shell_pid()
with tracker_path.open('r') as tracker:
try:
info = json.load(tracker)
except ValueError:
return False
if not (isinstance(info, dict) and info.get('pid') == current_pid):
return False
return (_get_previous_command() == 'fuck' or
time.time() - info.get('time', 0) < const.CONFIGURATION_TIMEOUT)
|
Returns `True` when we know that `fuck` called second time.
|
def get_results(self, *, block=False, timeout=None):
deadline = None
if timeout:
deadline = time.monotonic() + timeout / 1000
for child in self.children:
if deadline:
timeout = max(0, int((deadline - time.monotonic()) * 1000))
if isinstance(child, group):
yield list(child.get_results(block=block, timeout=timeout))
else:
yield child.get_result(block=block, timeout=timeout)
|
Get the results of each job in the group.
Parameters:
block(bool): Whether or not to block until the results are stored.
timeout(int): The maximum amount of time, in milliseconds,
to wait for results when block is True. Defaults to 10
seconds.
Raises:
ResultMissing: When block is False and the results aren't set.
ResultTimeout: When waiting for results times out.
Returns:
A result generator.
|
def list(self, toa=None, show_history=False):
if not toa:
toa = time.mktime(datetime.datetime.now().timetuple())
query = {
"$query": {
"master_id": self.master_id,
"processed": show_history,
"toa" : {"$lte" : toa}
},
"$orderby": {
"toa": 1
}
}
revisions = yield self.revisions.find(query)
raise Return(revisions)
|
Return all revisions for this stack
:param int toa: The time of action as a UTC timestamp
:param bool show_history: Whether to show historical revisions
|
def set_field(self, field_name, field_val=None):
field_name = self._normalize_field_name(field_name)
self.fields_set.append(field_name, [field_name, field_val])
return self
|
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
|
def get_formset(self, request, obj=None, **kwargs):
def _placeholder_initial(p):
return {
'slot': p.slot,
'title': p.title,
'role': p.role,
}
initial = []
if request.method == "GET":
placeholder_admin = self._get_parent_modeladmin()
data = placeholder_admin.get_placeholder_data(request, obj)
initial = [_placeholder_initial(d) for d in data]
FormSetClass = super(PlaceholderEditorInline, self).get_formset(request, obj, **kwargs)
FormSetClass.__init__ = curry(FormSetClass.__init__, initial=initial)
return FormSetClass
|
Pre-populate formset with the initial placeholders to display.
|
def generic_combine(method, arrays, masks=None, dtype=None,
out=None, zeros=None, scales=None, weights=None):
arrays = [numpy.asarray(arr, dtype=dtype) for arr in arrays]
if masks is not None:
masks = [numpy.asarray(msk) for msk in masks]
if out is None:
try:
outshape = (3,) + tuple(arrays[0].shape)
out = numpy.zeros(outshape, dtype)
except AttributeError:
raise TypeError('First element in arrays does '
'not have .shape attribute')
else:
out = numpy.asanyarray(out)
intl_combine.generic_combine(
method, arrays,
out[0], out[1], out[2],
masks, zeros, scales, weights
)
return out
|
Stack arrays using different methods.
:param method: the combination method
:type method: PyCObject
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param zeros:
:param scales:
:param weights:
:return: median, variance of the median and number of points stored
|
def _init_regs_random(self):
values = set()
while len(values) != len(self._arch_regs_parent):
values.add(random.randint(0, 2**self._arch_info.operand_size - 1))
values = list(values)
regs = {}
for idx, reg in enumerate(self._arch_regs_parent):
regs[reg] = values[idx] & (2**self._arch_regs_size[reg] - 1)
return regs
|
Initialize register with random values.
|
def set_knowledge_category(self, grade_id):
if self.get_knowledge_category_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_id):
raise errors.InvalidArgument()
self._my_map['knowledgeCategoryId'] = str(grade_id)
|
Sets the knowledge category.
arg: grade_id (osid.id.Id): the new knowledge category
raise: InvalidArgument - ``grade_id`` is invalid
raise: NoAccess - ``grade_id`` cannot be modified
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
def get_report_raw(year, report_type):
if not is_valid_report_type(report_type):
msg = '%s is not a valid report type.' % report_type
raise ValueError(msg)
url = get_url(year, report_type)
raw_contents = get_zipped_file(url)
return csv.DictReader(cStringIO.StringIO(raw_contents))
|
Download and extract a CO-TRACER report.
Generate a URL for the given report, download the corresponding archive,
extract the CSV report, and interpret it using the standard CSV library.
@param year: The year for which data should be downloaded.
@type year: int
@param report_type: The type of report that should be downloaded. Should be
one of the strings in constants.REPORT_TYPES.
@type report_type: str
@return: A DictReader with the loaded data. Note that this data has not
been interpreted so data fields like floating point values, dates, and
boolean values are still strings.
@rtype: csv.DictReader
|
def fetch_mood_station(self, station_id, terr=KKBOXTerritory.TAIWAN):
url = 'https://api.kkbox.com/v1.1/mood-stations/%s' % station_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
|
Fetches a mood station by given ID.
:param station_id: the station ID
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#moodstations-station_id`.
|
def main():
listname = sys.argv[2]
hostname = sys.argv[1]
mlist = MailList.MailList(listname, lock=False)
f = StringIO(sys.stdin.read())
msg = email.message_from_file(f, Message.Message)
h = HyperArch.HyperArchive(mlist)
sequence = h.sequence
h.processUnixMailbox(f)
f.close()
archive = h.archive
msgno = '%06d' % sequence
filename = msgno + '.html'
filepath = os.path.join(h.basedir, archive, filename)
h.close()
url = '%s%s/%s' % (mlist.GetBaseArchiveURL(), archive, filename)
ext_process(listname, hostname, url, filepath, msg)
|
This is the mainline.
It first invokes the pipermail archiver to add the message to the archive,
then calls the function above to do whatever with the archived message
after it's URL and path are known.
|
def first_order_score(y, mean, scale, shape, skewness):
return (y-mean)/np.power(scale,2)
|
GAS Normal Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Normal distribution
scale : float
scale parameter for the Normal distribution
shape : float
tail thickness parameter for the Normal distribution
skewness : float
skewness parameter for the Normal distribution
Returns
----------
- Score of the Normal family
|
def _preprocess_data(self, data):
original_type = data.dtype
if len(data.shape) == 1:
data = data[:, np.newaxis, np.newaxis]
elif len(data.shape) == 2:
data = data[:, :, np.newaxis]
elif len(data.shape) == 0 or len(data.shape) > 3:
raise ValueError(
'Illegal data array passed to image. Must be 1, 2, or 3 dimensional numpy array')
return data.astype(original_type)
|
Converts a data array to the preferred 3D structure.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to process.
Returns
-------
:obj:`numpy.ndarray`
The data re-formatted (if needed) as a 3D matrix
Raises
------
ValueError
If the data is not 1, 2, or 3D to begin with.
|
def DeregisterBlockchain():
Blockchain.SECONDS_PER_BLOCK = 15
Blockchain.DECREMENT_INTERVAL = 2000000
Blockchain.GENERATION_AMOUNT = [8, 7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
Blockchain._blockchain = None
Blockchain._validators = []
Blockchain._genesis_block = None
Blockchain._instance = None
Blockchain._blockrequests = set()
Blockchain._paused = False
Blockchain.BlockSearchTries = 0
Blockchain.CACHELIM = 4000
Blockchain.CMISSLIM = 5
Blockchain.LOOPTIME = .1
Blockchain.PersistCompleted = Events()
Blockchain.Notify = Events()
Blockchain._instance = None
|
Remove the default blockchain instance.
|
def remove_device(self, request):
devices = self.__get_u2f_devices()
for i in range(len(devices)):
if devices[i]['keyHandle'] == request['id']:
del devices[i]
self.__save_u2f_devices(devices)
return {
'status' : 'ok',
'message' : 'Successfully deleted your device!'
}
return {
'status' : 'failed',
'error' : 'No device with such an id been found!'
}
|
Removes device specified by id
|
def Error(self, backtrace, client_id=None):
logging.error("Hunt Error: %s", backtrace)
self.hunt_obj.LogClientError(client_id, backtrace=backtrace)
|
Logs an error for a client but does not terminate the hunt.
|
def git_version(short: 'Get short hash' = True, show: 'Print version to stdout' = False):
result = local(
['git', 'rev-parse', '--is-inside-work-tree'],
stdout='hide', stderr='hide', echo=False, raise_on_error=False)
if not result:
return None
result = local(
['git', 'describe', '--exact-match'],
stdout='capture', stderr='hide', echo=False, raise_on_error=False)
if result:
return result.stdout
result = local(
['git', 'rev-parse', '--short' if short else None, 'HEAD'],
stdout='capture', stderr='hide', echo=False, raise_on_error=False)
if result:
version = result.stdout.strip()
if show:
print(version)
return version
return None
|
Get tag associated with HEAD; fall back to SHA1.
If HEAD is tagged, return the tag name; otherwise fall back to
HEAD's short SHA1 hash.
.. note:: Only annotated tags are considered.
.. note:: The output isn't shown by default. To show it, pass the
``--show`` flag.
|
def to_array(self):
dt = np.dtype(list(zip(self.labels, (c.dtype for c in self.columns))))
arr = np.empty_like(self.columns[0], dt)
for label in self.labels:
arr[label] = self[label]
return arr
|
Convert the table to a structured NumPy array.
|
def _load_id_or_insert(self, session):
if self.id is None:
stable_id = self.get_stable_id()
id = session.execute(
select([Context.id]).where(Context.stable_id == stable_id)
).first()
if id is None:
self.id = session.execute(
Context.__table__.insert(),
{"type": self._get_table().__tablename__, "stable_id": stable_id},
).inserted_primary_key[0]
insert_args = self._get_insert_args()
insert_args["id"] = self.id
return insert_args
else:
self.id = id[0]
|
Load the id of the temporary context if it exists or return insert args.
As a side effect, this also inserts the Context object for the stableid.
:return: The record of the temporary context to insert.
:rtype: dict
|
def macro2micro(self, macro_indices):
def from_partition(partition, macro_indices):
micro_indices = itertools.chain.from_iterable(
partition[i] for i in macro_indices)
return tuple(sorted(micro_indices))
if self.blackbox and self.coarse_grain:
cg_micro_indices = from_partition(self.coarse_grain.partition,
macro_indices)
return from_partition(self.blackbox.partition,
reindex(cg_micro_indices))
elif self.blackbox:
return from_partition(self.blackbox.partition, macro_indices)
elif self.coarse_grain:
return from_partition(self.coarse_grain.partition, macro_indices)
return macro_indices
|
Return all micro indices which compose the elements specified by
``macro_indices``.
|
def as_json(data, **kwargs):
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data
|
Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
|
def list_subscribers(self, list_id):
return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)]
|
List subscribers of a list
:param list_id: list ID number
:return: :class:`~responsebot.models.User` object
|
def copy_script(self, filename, id_=-1):
self._copy(filename, id_=id_, file_type=SCRIPT_FILE_TYPE)
|
Copy a script to the distribution server.
Args:
filename: Full path to file to upload.
id_: ID of Script object to associate with, or -1 for new
Script (default).
|
def seal(mock):
_frankeinstainize(mock)
for attr in dir(mock):
try:
m = getattr(mock, attr)
except AttributeError:
continue
if not isinstance(m, NonCallableMock):
continue
if m._mock_new_parent is mock:
seal(m)
|
Disable the automatic generation of "submocks"
Given an input Mock, seals it to ensure no further mocks will be generated
when accessing an attribute that was not already defined.
Submocks are defined as all mocks which were created DIRECTLY from the
parent. If a mock is assigned to an attribute of an existing mock,
it is not considered a submock.
|
def _get_observer_fun(self, prop_name):
def _observer_fun(self, model, old, new):
if self._itsme:
return
self._on_prop_changed()
_observer_fun.__name__ = "property_%s_value_change" % prop_name
return _observer_fun
|
This is the code for an value change observer
|
def _replace_token_range(tokens, start, end, replacement):
tokens = tokens[:start] + replacement + tokens[end:]
return tokens
|
For a range indicated from start to end, replace with replacement.
|
def raises(cls, sender, attrname, error, args=ANYTHING, kwargs=ANYTHING):
"An alternative constructor which raises the given error"
def raise_error():
raise error
return cls(sender, attrname, returns=Invoke(raise_error), args=ANYTHING, kwargs=ANYTHING)
|
An alternative constructor which raises the given error
|
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None):
nowait = nowait and self.allow_nowait() and not cb
if nowait and consumer_tag == '':
consumer_tag = self._generate_consumer_tag()
args = Writer()
args.write_short(ticket or self.default_ticket).\
write_shortstr(queue).\
write_shortstr(consumer_tag).\
write_bits(no_local, no_ack, exclusive, nowait).\
write_table({})
self.send_frame(MethodFrame(self.channel_id, 60, 20, args))
if not nowait:
self._pending_consumers.append((consumer, cb))
self.channel.add_synchronous_cb(self._recv_consume_ok)
else:
self._consumer_cb[consumer_tag] = consumer
|
Start a queue consumer. If `cb` is supplied, will be called when
broker confirms that consumer is registered.
|
def terminal(self, out=None, border=None):
for qrcode in self:
qrcode.terminal(out=out, border=border)
|
\
Serializes the sequence of QR Codes as ANSI escape code.
See :py:meth:`QRCode.terminal()` for details.
|
def init_repo(path):
sh("git clone %s %s"%(pages_repo, path))
here = os.getcwd()
cd(path)
sh('git checkout gh-pages')
cd(here)
|
clone the gh-pages repo if we haven't already.
|
def read_data_types(self):
return {
'Binary': self.read_binary,
'BinaryArray': self.read_binary_array,
'KeyValue': self.read_key_value,
'KeyValueArray': self.read_key_value_array,
'String': self.read_string,
'StringArray': self.read_string_array,
'TCEntity': self.read_tc_entity,
'TCEntityArray': self.read_tc_entity_array,
}
|
Map of standard playbook variable types to read method.
|
def declareAsOntology(self, graph):
model = Model(graph)
ontology_file_id = 'MonarchData:' + self.name + ".ttl"
model.addOntologyDeclaration(ontology_file_id)
cur_time = datetime.now()
t_string = cur_time.strftime("%Y-%m-%d")
ontology_version = t_string
archive_url = 'MonarchArchive:' + 'ttl/' + self.name + '.ttl'
model.addOWLVersionIRI(ontology_file_id, archive_url)
model.addOWLVersionInfo(ontology_file_id, ontology_version)
|
The file we output needs to be declared as an ontology,
including it's version information.
TEC: I am not convinced dipper reformating external data as RDF triples
makes an OWL ontology (nor that it should be considered a goal).
Proper ontologies are built by ontologists. Dipper reformats data
and anotates/decorates it with a minimal set of carefully arranged
terms drawn from from multiple proper ontologies.
Which allows the whole (dipper's RDF triples and parent ontologies)
to function as a single ontology we can reason over when combined
in a store such as SciGraph.
Including more than the minimal ontological terms in dipper's RDF
output constitutes a liability as it allows greater divergence
between dipper artifacts and the proper ontologies.
Further information will be augmented in the dataset object.
:param version:
:return:
|
def set_hex_color(self, color, *, index=0, transition_time=None):
values = {
ATTR_LIGHT_COLOR_HEX: color,
}
if transition_time is not None:
values[ATTR_TRANSITION_TIME] = transition_time
return self.set_values(values, index=index)
|
Set hex color of the light.
|
def by_month(self, chamber, year=None, month=None):
check_chamber(chamber)
now = datetime.datetime.now()
year = year or now.year
month = month or now.month
path = "{chamber}/votes/{year}/{month}.json".format(
chamber=chamber, year=year, month=month)
return self.fetch(path, parse=lambda r: r['results'])
|
Return votes for a single month, defaulting to the current month.
|
def centroids(self, instrument, min_abundance=1e-4, points_per_fwhm=25):
assert self.ptr != ffi.NULL
centroids = ims.spectrum_envelope_centroids(self.ptr, instrument.ptr,
min_abundance, points_per_fwhm)
return _new_spectrum(CentroidedSpectrum, centroids)
|
Estimates centroided peaks for a given instrument model.
:param instrument: instrument model
:param min_abundance: minimum abundance for including a peak
:param points_per_fwhm: grid density used for envelope calculation
:returns: peaks visible with the instrument used
:rtype: TheoreticalSpectrum
|
def _parse_quoted_key(self):
quote_style = self._current
key_type = None
dotted = False
for t in KeyType:
if t.value == quote_style:
key_type = t
break
if key_type is None:
raise RuntimeError("Should not have entered _parse_quoted_key()")
self.inc()
self.mark()
while self._current != quote_style and self.inc():
pass
key = self.extract()
if self._current == ".":
self.inc()
dotted = True
key += "." + self._parse_key().as_string()
key_type = KeyType.Bare
else:
self.inc()
return Key(key, key_type, "", dotted)
|
Parses a key enclosed in either single or double quotes.
|
def manage(self):
hookenv._run_atstart()
try:
hook_name = hookenv.hook_name()
if hook_name == 'stop':
self.stop_services()
else:
self.reconfigure_services()
self.provide_data()
except SystemExit as x:
if x.code is None or x.code == 0:
hookenv._run_atexit()
hookenv._run_atexit()
|
Handle the current hook by doing The Right Thing with the registered services.
|
def get_profile_ports(self, **kwargs):
uri = self._helper.build_uri_with_query_string(kwargs, '/profile-ports')
return self._helper.do_get(uri)
|
Retrieves the port model associated with a server or server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
serverHardwareUri (str):
The URI of the server hardware associated with the resource.
Returns:
dict: Profile port.
|
def start(ctx, alias, description, f):
today = datetime.date.today()
try:
timesheet_collection = get_timesheet_collection_for_context(ctx, f)
except ParseError as e:
ctx.obj['view'].err(e)
return
t = timesheet_collection.latest()
today_entries = t.entries.filter(date=today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
description = ' '.join(description) if description else '?'
duration = (new_entry_start_time, None)
e = Entry(alias, duration, description)
t.entries[today].append(e)
t.save()
|
Use it when you start working on the given activity. This will add the
activity and the current time to your entries file. When you're finished,
use the stop command.
|
def update_assessment_taken(self, assessment_taken_form):
collection = JSONClientValidated('assessment',
collection='AssessmentTaken',
runtime=self._runtime)
if not isinstance(assessment_taken_form, ABCAssessmentTakenForm):
raise errors.InvalidArgument('argument type is not an AssessmentTakenForm')
if not assessment_taken_form.is_for_update():
raise errors.InvalidArgument('the AssessmentTakenForm is for update only, not create')
try:
if self._forms[assessment_taken_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('assessment_taken_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('assessment_taken_form did not originate from this session')
if not assessment_taken_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(assessment_taken_form._my_map)
self._forms[assessment_taken_form.get_id().get_identifier()] = UPDATED
return objects.AssessmentTaken(
osid_object_map=assessment_taken_form._my_map,
runtime=self._runtime,
proxy=self._proxy)
|
Updates an existing assessment taken.
arg: assessment_taken_form
(osid.assessment.AssessmentTakenForm): the form
containing the elements to be updated
raise: IllegalState - ``assessment_taken_form`` already used in
an update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``assessment_taken_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_offered_form`` did not
originate from
``get_assessment_taken_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.