code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def wash_for_js(text):
from invenio_utils.html import escape_javascript_string
if isinstance(text, six.string_types):
return '"%s"' % escape_javascript_string(
text,
escape_for_html=False,
escape_CDATA=False,
escape_script_tag_with_quote=None)
else:
return text | DEPRECATED: use htmlutils.escape_javascript_string() instead,
and take note that returned value is no longer enclosed into
quotes. |
def _post_init(self):
try:
return self.postinit()
except Exception as exc:
return self._onerror(Result.from_exception(exc, uuid=self.uuid)) | A post init trigger |
def check_required_params(self):
for param in self.REQUIRED_FIELDS:
if param not in self.params:
raise ValidationError("Missing parameter: {}".format(param)) | Check if all required parameters are set |
def _onsuccess(self, result):
if KSER_METRICS_ENABLED == "yes":
KSER_TASKS_STATUS.labels(
__hostname__, self.__class__.path, 'SUCCESS'
).inc()
if result:
result = self.result + result
else:
result = self.result
logger.info(
"{}.Success: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return self.onsuccess(result) | To execute on execution success
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result |
def _onerror(self, result):
if KSER_METRICS_ENABLED == "yes":
KSER_TASKS_STATUS.labels(
__hostname__, self.__class__.path, 'FAILED'
).inc()
if result:
result = self.result + result
else:
result = self.result
logger.error(
"{}.Failed: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return self.onerror(result) | To execute on execution failure
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result |
def _postrun(self, result):
logger.debug(
"{}.PostRun: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
return self.postrun(result) | To execute after exection
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result |
def _run(self):
if KSER_METRICS_ENABLED == "yes":
KSER_TASK_COUNT.inc()
logger.debug(
"{}.Run: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
return self.run() | Execution body
:return: Execution result
:rtype: kser.result.Result |
def unsafe_execute(self, result=None):
if result:
self.result += result
self._prerun()
return self._onsuccess(self._postrun(self._run())) | un-wrapped execution, can raise excepetion
:return: Execution result
:rtype: kser.result.Result |
def execute(self, result=None):
try:
return self.unsafe_execute(result=result)
except Exception as exc:
return self._onerror(Result.from_exception(exc, uuid=self.uuid)) | Execution 'wrapper' to make sure that it return a result
:return: Execution result
:rtype: kser.result.Result |
def to_Message(self, result=None):
return Message(
uuid=self.uuid, entrypoint=self.__class__.path, params=self.params,
result=result if result else self.result, metadata=self.metadata
) | Entrypoint -> Message
:param kser.result.Result result: Execution result
:return: Kafka message
:rtype kser.schemas.Message |
def from_Message(cls, kmsg):
return cls(
uuid=kmsg.uuid, params=kmsg.params, result=kmsg.result,
metadata=kmsg.metadata
) | Message -> Entrypoint
:param kser.schemas.Message kmsg: Kafka message
:return: a entrypoint
:rtype kser.entry.Entrypoint |
def save_as(self, filename=None):
if filename is None:
filename = self.filename
if filename is None:
filename = self.default_filename
if filename is None:
raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__))
self._do_save_as(filename)
self.filename = filename | Dumps object contents into file on disk.
Args:
filename (optional): defaults to self.filename. If passed, self.filename
will be updated to filename. |
def load(self, filename=None):
assert not self.__flag_loaded, "File can be loaded only once"
if filename is None:
filename = self.default_filename
assert filename is not None, \
"{0!s} class has no default filename".format(self.__class__.__name__)
# Convention: trying to open empty file is an error,
# because it could be of (almost) any type.
size = os.path.getsize(filename)
if size == 0:
raise RuntimeError("Empty file: '{0!s}'".format(filename))
self._test_magic(filename)
self._do_load(filename)
self.filename = filename
self.__flag_loaded = True | Loads file and registers filename as attribute. |
def init_default(self):
import f311
if self.default_filename is None:
raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__))
fullpath = f311.get_default_data_path(self.default_filename, class_=self.__class__)
self.load(fullpath)
self.filename = None | Initializes object with its default values
Tries to load self.default_filename from default
data directory. For safety, filename is reset to None so that it doesn't point to the
original file. |
def availability(self, availability):
allowed_values = ["available", "comingSoon", "retired"]
if availability is not None and availability not in allowed_values:
raise ValueError(
"Invalid value for `availability` ({0}), must be one of {1}"
.format(availability, allowed_values)
)
self._availability = availability | Sets the availability of this Product.
:param availability: The availability of this Product.
:type: str |
def stock_status(self, stock_status):
allowed_values = ["available", "alert", "unavailable"]
if stock_status is not None and stock_status not in allowed_values:
raise ValueError(
"Invalid value for `stock_status` ({0}), must be one of {1}"
.format(stock_status, allowed_values)
)
self._stock_status = stock_status | Sets the stock_status of this Product.
:param stock_status: The stock_status of this Product.
:type: str |
def create_product(cls, product, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_product_with_http_info(product, **kwargs)
else:
(data) = cls._create_product_with_http_info(product, **kwargs)
return data | Create Product
Create a new Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_product(product, async=True)
>>> result = thread.get()
:param async bool
:param Product product: Attributes of product to create (required)
:return: Product
If the method is called asynchronously,
returns the request thread. |
def delete_product_by_id(cls, product_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_product_by_id_with_http_info(product_id, **kwargs)
else:
(data) = cls._delete_product_by_id_with_http_info(product_id, **kwargs)
return data | Delete Product
Delete an instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_product_by_id(cls, product_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_product_by_id_with_http_info(product_id, **kwargs)
else:
(data) = cls._get_product_by_id_with_http_info(product_id, **kwargs)
return data | Find Product
Return single instance of Product by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_product_by_id(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to return (required)
:return: Product
If the method is called asynchronously,
returns the request thread. |
def list_all_products(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_products_with_http_info(**kwargs)
else:
(data) = cls._list_all_products_with_http_info(**kwargs)
return data | List Products
Return a list of Products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_products(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Product]
If the method is called asynchronously,
returns the request thread. |
def replace_product_by_id(cls, product_id, product, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
else:
(data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs)
return data | Replace Product
Replace all attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to replace (required)
:param Product product: Attributes of product to replace (required)
:return: Product
If the method is called asynchronously,
returns the request thread. |
def update_product_by_id(cls, product_id, product, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_product_by_id_with_http_info(product_id, product, **kwargs)
else:
(data) = cls._update_product_by_id_with_http_info(product_id, product, **kwargs)
return data | Update Product
Update attributes of Product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_product_by_id(product_id, product, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: ID of product to update. (required)
:param Product product: Attributes of product to update. (required)
:return: Product
If the method is called asynchronously,
returns the request thread. |
def asserts(input_value, rule, message=''):
assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean'
assert isinstance(message, str), 'asserts needs message to be a string'
# if the message is empty and rule is callable, fill message with rule's source code
if len(message)==0 and callable(rule):
try:
s = getsource(rule).splitlines()[0].strip()
except:
s = repr(rule).strip()
message = 'illegal input of {} breaks - {}'.format(input_value, s)
if callable(rule):
# if rule is a function, run the function and assign it to rule
rule = rule(input_value)
# now, assert the rule and return the input value
assert rule, message
return input_value | this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen. |
def print(*a):
try:
_print(*a)
return a[0] if len(a) == 1 else a
except:
_print(*a) | print just one that returns what you give it instead of None |
def _to_backend(self, p):
if isinstance(p, self._cmp_base):
return p.path
elif isinstance(p, self._backend):
return p
elif self._backend is unicode and isinstance(p, bytes):
return p.decode(self._encoding)
elif self._backend is bytes and isinstance(p, unicode):
return p.encode(self._encoding,
'surrogateescape' if PY3 else 'strict')
else:
raise TypeError("Can't construct a %s from %r" % (
self.__class__.__name__, type(p))) | Converts something to the correct path representation.
If given a Path, this will simply unpack it, if it's the correct type.
If given the correct backend, it will return that.
If given bytes for unicode of unicode for bytes, it will encode/decode
with a reasonable encoding. Note that these operations can raise
UnicodeError! |
def parent(self):
p = self._lib.dirname(self.path)
p = self.__class__(p)
return p | The parent directory of this path. |
def unicodename(self):
n = self._lib.basename(self.path)
if self._backend is unicode:
return n
else:
return n.decode(self._encoding, 'replace') | The name of this path as unicode. |
def split_root(self):
if not PY3 and hasattr(self._lib, 'splitunc'):
root, rest = self._lib.splitunc(self.path)
if root:
if rest.startswith(self._sep):
root += self._sep
rest = rest[1:]
return self.__class__(root), self.__class__(rest)
root, rest = self._lib.splitdrive(self.path)
if root:
if rest.startswith(self._sep):
root += self._sep
rest = rest[1:]
return self.__class__(root), self.__class__(rest)
if self.path.startswith(self._sep):
return self.__class__(self._sep), self.__class__(rest[1:])
return self.__class__(''), self | Splits this path into a pair (drive, location).
Note that, because all paths are normalized, a root of ``'.'`` will be
returned for relative paths. |
def rel_path_to(self, dest):
dest = self.__class__(dest)
orig_list = self.norm_case()._components()
dest_list = dest._components()
i = -1
for i, (orig_part, dest_part) in enumerate(zip(orig_list, dest_list)):
if orig_part != self._normcase(dest_part):
up = ['..'] * (len(orig_list) - i)
return self.__class__(*(up + dest_list[i:]))
if len(orig_list) <= len(dest_list):
if len(dest_list) > i + 1:
return self.__class__(*dest_list[i + 1:])
else:
return self.__class__('')
else:
up = ['..'] * (len(orig_list) - i - 1)
return self.__class__(*up) | Builds a relative path leading from this one to the given `dest`.
Note that these paths might be both relative, in which case they'll be
assumed to start from the same directory. |
def lies_under(self, prefix):
orig_list = self.norm_case()._components()
pref_list = self.__class__(prefix).norm_case()._components()
return (len(orig_list) >= len(pref_list) and
orig_list[:len(pref_list)] == pref_list) | Indicates if the `prefix` is a parent of this path. |
def tempfile(cls, suffix='', prefix=None, dir=None, text=False):
if prefix is None:
prefix = tempfile.template
if dir is not None:
# Note that this is not safe on Python 2
# There is no work around, apart from not using the tempfile module
dir = str(Path(dir))
fd, filename = tempfile.mkstemp(suffix, prefix, dir, text)
return fd, cls(filename).absolute() | Returns a new temporary file.
The return value is a pair (fd, path) where fd is the file descriptor
returned by :func:`os.open`, and path is a :class:`~rpaths.Path` to it.
:param suffix: If specified, the file name will end with that suffix,
otherwise there will be no suffix.
:param prefix: Is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
:param dir: If specified, the file will be created in that directory,
otherwise a default directory is used.
:param text: If true, the file is opened in text mode. Else (the
default) the file is opened in binary mode. On some operating
systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
The caller is responsible for deleting the file when done with it. |
def tempdir(cls, suffix='', prefix=None, dir=None):
if prefix is None:
prefix = tempfile.template
if dir is not None:
# Note that this is not safe on Python 2
# There is no work around, apart from not using the tempfile module
dir = str(Path(dir))
dirname = tempfile.mkdtemp(suffix, prefix, dir)
return cls(dirname).absolute() | Returns a new temporary directory.
Arguments are as for :meth:`~rpaths.Path.tempfile`, except that the
`text` argument is not accepted.
The directory is readable, writable, and searchable only by the
creating user.
The caller is responsible for deleting the directory when done with it. |
def rel_path_to(self, dest):
return super(Path, self.absolute()).rel_path_to(Path(dest).absolute()) | Builds a relative path leading from this one to another.
Note that these paths might be both relative, in which case they'll be
assumed to be considered starting from the same directory.
Contrary to :class:`~rpaths.AbstractPath`'s version, this will also
work if one path is relative and the other absolute. |
def listdir(self, pattern=None):
files = [self / self.__class__(p) for p in os.listdir(self.path)]
if pattern is None:
pass
elif callable(pattern):
files = filter(pattern, files)
else:
if isinstance(pattern, backend_types):
if isinstance(pattern, bytes):
pattern = pattern.decode(self._encoding, 'replace')
start, full_re, _int_re = pattern2re(pattern)
elif isinstance(pattern, Pattern):
start, full_re = pattern.start_dir, pattern.full_regex
else:
raise TypeError("listdir() expects pattern to be a callable, "
"a regular expression or a string pattern, "
"got %r" % type(pattern))
# If pattern contains slashes (other than first and last chars),
# listdir() will never match anything
if start:
return []
files = [f for f in files if full_re.search(f.unicodename)]
return files | Returns a list of all the files in this directory.
The special entries ``'.'`` and ``'..'`` will not be returned.
:param pattern: A pattern to match directory entries against.
:type pattern: NoneType | Callable | Pattern | unicode | bytes |
def mkdir(self, name=None, parents=False, mode=0o777):
if name is not None:
return (self / name).mkdir(parents=parents, mode=mode)
if self.exists():
return
if parents:
os.makedirs(self.path, mode)
else:
os.mkdir(self.path, mode)
return self | Creates that directory, or a directory under this one.
``path.mkdir(name)`` is a shortcut for ``(path/name).mkdir()``.
:param name: Path component to append to this path before creating the
directory.
:param parents: If True, missing directories leading to the path will
be created too, recursively. If False (the default), the parent of
that path needs to exist already.
:param mode: Permissions associated with the directory on creation,
without race conditions. |
def rmdir(self, parents=False):
if parents:
os.removedirs(self.path)
else:
os.rmdir(self.path) | Removes this directory, provided it is empty.
Use :func:`~rpaths.Path.rmtree` if it might still contain files.
:param parents: If set to True, it will also destroy every empty
directory above it until an error is encountered. |
def rename(self, new, parents=False):
if parents:
os.renames(self.path, self._to_backend(new))
else:
os.rename(self.path, self._to_backend(new)) | Renames this path to the given new location.
:param new: New path where to move this one.
:param parents: If set to True, it will create the parent directories
of the target if they don't exist. |
def copyfile(self, target):
shutil.copyfile(self.path, self._to_backend(target)) | Copies this file to the given `target` location. |
def copymode(self, target):
shutil.copymode(self.path, self._to_backend(target)) | Copies the mode of this file on the `target` file.
The owner is not copied. |
def copystat(self, target):
shutil.copystat(self.path, self._to_backend(target)) | Copies the permissions, times and flags from this to the `target`.
The owner is not copied. |
def copy(self, target):
shutil.copy(self.path, self._to_backend(target)) | Copies this file the `target`, which might be a directory.
The permissions are copied. |
def copytree(self, target, symlinks=False):
shutil.copytree(self.path, self._to_backend(target), symlinks) | Recursively copies this directory to the `target` location.
The permissions and times are copied (like
:meth:`~rpaths.Path.copystat`).
If the optional `symlinks` flag is true, symbolic links in the source
tree result in symbolic links in the destination tree; if it is false,
the contents of the files pointed to by symbolic links are copied. |
def move(self, target):
shutil.move(self.path, self._to_backend(target)) | Recursively moves a file or directory to the given target location. |
def open(self, mode='r', name=None, **kwargs):
if name is not None:
return io.open((self / name).path, mode=mode, **kwargs)
else:
return io.open(self.path, mode=mode, **kwargs) | Opens this file, or a file under this directory.
``path.open(mode, name)`` is a shortcut for ``(path/name).open(mode)``.
Note that this uses :func:`io.open()` which behaves differently from
:func:`open()` on Python 2; see the appropriate documentation.
:param name: Path component to append to this path before opening the
file. |
def matches(self, path):
path = self._prepare_path(path)
return self.full_regex.search(path) is not None | Tests if the given path matches the pattern.
Note that the unicode translation of the patch is matched, so
replacement characters might have been added. |
def may_contain_matches(self, path):
path = self._prepare_path(path)
return self.int_regex.search(path) is not None | Tests whether it's possible for paths under the given one to match.
If this method returns None, no path under the given one will match the
pattern. |
def _fetch(url,):
import logging as log
import socket
from eventlet import Timeout
from eventlet.green import urllib2
import sys
# TRY AND DOWNLOAD X TIMES BEFORE QUITING
tries = 10
count = 1
downloaded = False
while count < tries and downloaded == False:
try:
log.debug('downloading ' + url.get_full_url())
body = urllib2.urlopen(url).read()
downloaded = True
except socket.timeout, e:
print "timeout on URL, trying again"
count += 1
except Exception, e:
if "[Errno 60]" in str(e):
log.warning('timeout on URL, trying again' % locals())
count += 1
if "Error 502" in str(e):
log.warning('proxy error on URL, trying again' % locals())
count += 1
else:
log.warning(
"could not download " + url.get_full_url() + " : " + str(e) + "\n")
url = None
body = None
downloaded = True
return url, body | *Retrieve an HTML document or file from the web at a given URL*
**Key Arguments:**
- ``url`` -- the URL of the document or file
**Return:**
- ``url`` -- the URL of the document or file, or None if an error occured
- ``body`` -- the text content of the HTML document. |
def tgcanrecruit(self, region=None):
params = {'from': normalize(region)} if region is not None else {}
@api_query('tgcanrecruit', **params)
async def result(_, root):
return bool(int(root.find('TGCANRECRUIT').text))
return result(self) | Whether the nation will receive a recruitment telegram.
Useful in conjunction with the Telegrams API.
Parameters
----------
region : str
Name of the region you are recruiting for.
Returns
-------
an :class:`ApiQuery` of bool |
async def freedom(self, root):
elem = root.find('FREEDOM')
result = OrderedDict()
result['Civil Rights'] = elem.find('CIVILRIGHTS').text
result['Economy'] = elem.find('ECONOMY').text
result['Political Freedom'] = elem.find('POLITICALFREEDOM').text
return result | Nation's `Freedoms`: three basic indicators of the nation's
Civil Rights, Economy, and Political Freedom, as expressive
adjectives.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with \
keys and values of str
Keys being, in order: ``Civil Rights``, ``Economy``, and
``Political Freedom``. |
async def freedomscores(self, root):
elem = root.find('FREEDOMSCORES')
result = OrderedDict()
result['Civil Rights'] = int(elem.find('CIVILRIGHTS').text)
result['Economy'] = int(elem.find('ECONOMY').text)
result['Political Freedom'] = int(elem.find('POLITICALFREEDOM').text)
return result | Nation's `Freedoms`: three basic indicators of the nation's
Civil Rights, Economy, and Political Freedom, as percentages.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with \
keys of str and values of int
Keys being, in order: ``Civil Rights``, ``Economy``, and
``Political Freedom``. |
async def govt(self, root):
elem = root.find('GOVT')
result = OrderedDict()
result['Administration'] = float(elem.find('ADMINISTRATION').text)
result['Defense'] = float(elem.find('DEFENCE').text) # match the web UI
result['Education'] = float(elem.find('EDUCATION').text)
result['Environment'] = float(elem.find('ENVIRONMENT').text)
result['Healthcare'] = float(elem.find('HEALTHCARE').text)
result['Industry'] = float(elem.find('COMMERCE').text) # Don't ask
result['International Aid'] = float(elem.find('INTERNATIONALAID').text)
result['Law & Order'] = float(elem.find('LAWANDORDER').text)
result['Public Transport'] = float(elem.find('PUBLICTRANSPORT').text)
result['Social Policy'] = float(elem.find('SOCIALEQUALITY').text) # Shh
result['Spirituality'] = float(elem.find('SPIRITUALITY').text)
result['Welfare'] = float(elem.find('WELFARE').text)
return result | Nation's government expenditure, as percentages.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with \
keys of str and values of float
Keys being, in order: ``Administration``, ``Defense``,
``Education``, ``Environment``, ``Healthcare``, ``Industry``,
``International Aid``, ``Law & Order``, ``Public Transport``,
``Social Policy``, ``Spirituality``, and ``Welfare``. |
async def sectors(self, root):
elem = root.find('SECTORS')
result = OrderedDict()
result['Black Market (estimated)'] = float(elem.find('BLACKMARKET').text)
result['Government'] = float(elem.find('GOVERNMENT').text)
result['Private Industry'] = float(elem.find('INDUSTRY').text)
result['State-Owned Industry'] = float(elem.find('PUBLIC').text)
return result | Components of the nation's economy, as percentages.
Returns
-------
an :class:`ApiQuery` of :class:`collections.OrderedDict` with \
keys of str and values of float
Keys being, in order: ``Black Market (estimated)``, ``Government``,
``Private Industry``, and ``State-Owned Industry``. |
async def deaths(self, root):
return {
elem.get('type'): float(elem.text)
for elem in root.find('DEATHS')
} | Causes of death in the nation, as percentages.
Returns
-------
an :class:`ApiQuery` of dict with keys of str and values of float |
async def endorsements(self, root):
text = root.find('ENDORSEMENTS').text
return [Nation(name) for name in text.split(',')] if text else [] | Regional neighbours endorsing the nation.
Returns
-------
an :class:`ApiQuery` of a list of :class:`Nation` |
def verify(self, checksum, *, token=None):
params = {'a': 'verify', 'checksum': checksum}
if token:
params['token'] = token
# Needed so that we get output in xml, as opposed to
# plain text. It doesn't actually matter what the
# q param is, it's just important that it's not empty.
@api_query('i_need_the_output_in_xml', **params)
async def result(self, root):
return bool(int(root.find('VERIFY').text))
return result(self) | Interface to the `NationStates Verification API
<https://www.nationstates.net/pages/api.html#verification>`_.
Parameters
----------
checksum : str
The user-supplied verification code. Expires if the nation
logs out, if it performs a significant in-game action such
as moving regions or endorsing another nation, and after it
is successfully verified.
token : str
A token specific to your service and the nation being verified.
Returns
-------
an :class:`ApiQuery` of bool |
async def description(self):
resp = await self._call_web(f'nation={self.id}')
return html.unescape(
re.search(
'<div class="nationsummary">(.+?)<p class="nationranktext">',
resp.text,
flags=re.DOTALL
)
.group(1)
.replace('\n', '')
.replace('</p>', '')
.replace('<p>', '\n\n')
.strip()
) | Nation's full description, as seen on its in-game page.
Returns
-------
an awaitable of str |
def accept(self):
return self._issue._nation._accept_issue(self._issue.id, self._id) | Accept the option.
Returns
-------
an awaitable of :class:`IssueResult` |
def list_all_promotions(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_promotions_with_http_info(**kwargs)
else:
(data) = cls._list_all_promotions_with_http_info(**kwargs)
return data | List Promotions
Return a list of Promotions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_promotions(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Promotion]
If the method is called asynchronously,
returns the request thread. |
def pip_upgrade_all_user(line):
import pip
for dist in pip.get_installed_distributions(user_only=True):
do_pip(["install", "--upgrade", "--user", dist.project_name]) | Attempt to upgrade all packages installed with --user |
def pip_upgrade_all(line):
from pip import get_installed_distributions
user = set(d.project_name for d in get_installed_distributions(user_only=True))
all = set(d.project_name for d in get_installed_distributions())
for dist in all - user:
do_pip(["install", "--upgrade", dist])
for dist in user:
do_pip(["install", "--upgrade", "--user", dist]) | Attempt to upgrade all packages |
def enc_name_descr(name, descr, color=a99.COLOR_DESCR):
return enc_name(name, color)+"<br>"+descr | Encodes html given name and description. |
def style_checkboxes(widget):
ww = widget.findChildren(QCheckBox)
for w in ww:
w.setStyleSheet("QCheckBox:focus {border: 1px solid #000000;}") | Iterates over widget children to change checkboxes stylesheet.
The default rendering of checkboxes does not allow to tell a focused one
from an unfocused one. |
def check_return_space(event, callable_):
if event.type() == QEvent.KeyPress:
if event.key() in [Qt.Key_Return, Qt.Key_Space]:
callable_()
return True
return False | Checks if event corresponds to Return/Space being pressed and calls callable_ if so. |
def are_you_sure(flag_changed, evt, parent=None, title="File has been changed",
msg="Are you sure you want to exit?"):
if flag_changed:
r = QMessageBox.question(parent, title, msg,
QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes)
if r != QMessageBox.Yes:
evt.ignore() | "Are you sure you want to exit" question dialog.
If flag_changed, shows question dialog. If answer is not yes, calls evt.ignore()
Arguments:
flag_changed
evt -- QCloseEvent instance
parent=None -- parent form, used to centralize the question dialog at
title -- title for question dialog
msg -- text of question dialog
Returns True or False. True means: "yes, I want to exit" |
def reset_table_widget(t, rowCount, colCount):
t.reset()
t.horizontalHeader().reset()
t.clear()
t.sortItems(-1)
t.setRowCount(rowCount)
t.setColumnCount(colCount) | Clears and resizes a table widget. |
def show_edit_form(obj, attrs=None, title="", toolTips=None):
if attrs is None:
if hasattr(obj, "keys"):
attrs = list(obj.keys())
else:
raise RuntimeError("attrs is None and cannot determine it from obj")
specs = []
for i, name in enumerate(attrs):
# Tries as attribute, then as key
try:
value = obj.__getattribute__(name)
except AttributeError:
value = obj[name]
if value is None:
value = "" # None becomes str
dict_ = {"value": value}
if toolTips is not None:
dict_["toolTip"] = toolTips[i]
dict_["tooltip"] = toolTips[i]
specs.append((name, dict_))
form = XParametersEditor(specs=specs, title=title)
r = form.exec_()
return r, form | Shows parameters editor modal form.
Arguments:
obj: object to extract attribute values from, or a dict-like
attrs: list of attribute names
title:
toolTips: |
def place_left_top(window, width=None, height=None):
if width is None:
width = window.width()
if height is None:
height = window.height()
window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, height) | Places window in top left corner of screen.
Arguments:
window -- a QWidget
width=None -- window width, in case you want to change it (if not passed, not changed)
height=None -- window height, in case you want to change it (if not passed, not changed) |
def place_center(window, width=None, height=None):
screenGeometry = QApplication.desktop().screenGeometry()
w, h = window.width(), window.height()
if width is not None or height is not None:
w = width if width is not None else w
h = height if height is not None else h
window.setGeometry(0, 0, w, h)
x = (screenGeometry.width() - w) / 2
y = (screenGeometry.height() - h) / 2
window.move(x, y) | Places window in the center of the screen. |
def snap_left(window, width=None):
if not width:
width = window.width()
rect = QApplication.desktop().screenGeometry()
window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, rect.height()) | Snaps window to left of desktop.
Arguments:
window -- a QWidget
width=None -- window width, in case you want to change it (if not passed, not changed) |
def snap_right(window, width=None):
if not width:
width = window.width()
rect = QApplication.desktop().screenGeometry()
window.setGeometry(rect.width()-width, _DESKTOP_OFFSET_TOP, width, rect.height()) | Snaps window to right of desktop.
Arguments:
window -- a QWidget
width=None -- window width, in case you want to change it (if not passed, not changed) |
def get_matplotlib_layout(widget, flag_toolbar=True):
fig = plt.figure()
canvas = FigureCanvas(fig)
# self.canvas.mpl_connect('button_press_event', self.on_plot_click)
layout = QVBoxLayout(widget)
if flag_toolbar:
toolbar = NavigationToolbar2QT(canvas, widget)
layout.addWidget(toolbar)
layout.addWidget(canvas)
a99.set_margin(layout, 0)
return fig, canvas, layout | Creates figure, toolbar, layout, sets widget layout
Returns figure, canvas, layout
Reference: http://stackoverflow.com/questions/12459811 |
def get_icon(keyword):
filename = a99.get_path( "icons", keyword + ".png")
if not os.path.isfile(filename):
raise FileNotFoundError("File '{}' does not exist".format(filename))
return QIcon(filename) | Transforms a PNG file in a QIcon
Looks for a file named <keyword>.png in the "icons" directory
If file does not exist, returns None |
def get_QApplication(args=[]):
global _qapp
if _qapp is None:
QCoreApplication.setAttribute(Qt.AA_X11InitThreads)
_qapp = QApplication(args)
return _qapp | Returns the QApplication instance, creating it is does not yet exist. |
def table_info_to_parameters(table_info):
# Example of item in table_info:
# MyDBRow([('cid', 0), ('name', 'id'), ('type', 'integer'), ('notnull', 0), ('dflt_value', None), ('pk', 1)])
opbj = a99.Parameters()
for field_info in table_info.values():
p = a99.Parameter()
if field_info.type == "integer":
p.type = int
elif field_info.type == "real":
p.type = float
else:
p.type = str
p.name = field_info.name
if field_info.dflt_value is not None:
p.value = field_info.dflt_value
opbj.params.append(p)
return opbj | Converts a list of MyDBRow into a parameters.Parameters object
This facilitates transfering data from SQLite table row to a XParameterEditor window
See also: get_table_info() |
def get_frame():
ret = QFrame()
ret.setLineWidth(1)
ret.setMidLineWidth(0)
ret.setFrameShadow(QFrame.Sunken)
ret.setFrameShape(QFrame.Box)
return ret | Returns a QFrame formatted in a particular way |
def set_checkbox_value(w, value):
save = w.blockSignals(True)
try:
w.setChecked(bool(value))
finally:
w.blockSignals(save) | Sets a checkbox's "checked" property + signal blocking + value tolerance
Args:
w: QCheckBox instance
value: something that can be converted to a bool |
def add_signal(self, signal):
self.__signals.append(signal)
if self.__connected:
# Connects signal if the current state is "connected"
self.__connect_signal(signal) | Adds "input" signal to connected signals.
Internally connects the signal to a control slot. |
def connect_all(self):
if self.__connected:
return # assert not self.__connected, "connect_all() already in \"connected\" state"
with self.__lock:
for signal in self.__signals:
self.__connect_signal(signal)
if self.__slot is not None:
self.__sigDelayed.connect(self.__slot, Qt.QueuedConnection)
self.__connected = True | [Re-]connects all signals and slots.
If already in "connected" state, ignores the call. |
def disconnect_all(self):
if not self.__connected:
return # assert self.__connected, "disconnect_all() already in \"disconnected\" state"
self.__disconnecting = True
try:
for signal in self.__signals:
signal.disconnect(self.__signalReceived)
if self.__slot is not None:
self.__sigDelayed.disconnect(self.__slot)
self.__connected = False
finally:
self.__disconnecting = False | Disconnects all signals and slots.
If already in "disconnected" state, ignores the call. |
def __signalReceived(self, *args):
if self.__disconnecting:
return
with self.__lock:
self.__args = args
if self.__rateLimit == 0:
self.__timer.stop()
self.__timer.start((self.__delay * 1000) + 1)
else:
now = time.time()
if self.__lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.__lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.__rateLimit)) - now)
self.__timer.stop()
# Note: original was min() below.
timeout = (max(leakTime, self.__delay) * 1000) + 1
self.__timer.start(timeout) | Received signal. Cancel previous timer and store args to be forwarded later. |
def __flush(self):
if self.__args is None or self.__disconnecting:
return False
#self.emit(self.signal, *self.args)
self.__sigDelayed.emit(self.__args)
self.__args = None
self.__timer.stop()
self.__lastFlushTime = time.time()
return True | If there is a signal queued up, send it now. |
def clean_indicators(indicators):
output = list()
for indicator in indicators:
strip = ['http://', 'https://']
for item in strip:
indicator = indicator.replace(item, '')
indicator = indicator.strip('.').strip()
parts = indicator.split('/')
if len(parts) > 0:
indicator = parts.pop(0)
output.append(indicator)
output = list(set(output))
return output | Remove any extra details from indicators. |
def hash_values(values, alg="md5"):
import hashlib
if alg not in ['md5', 'sha1', 'sha256']:
raise Exception("Invalid hashing algorithm!")
hasher = getattr(hashlib, alg)
if type(values) == str:
output = hasher(values).hexdigest()
elif type(values) == list:
output = list()
for item in values:
output.append(hasher(item).hexdigest())
return output | Hash a list of values. |
def check_whitelist(values):
import os
import tldextract
whitelisted = list()
for name in ['alexa.txt', 'cisco.txt']:
config_path = os.path.expanduser('~/.config/blockade')
file_path = os.path.join(config_path, name)
whitelisted += [x.strip() for x in open(file_path, 'r').readlines()]
output = list()
for item in values:
ext = tldextract.extract(item)
if ext.registered_domain in whitelisted:
continue
output.append(item)
return output | Check the indicators against known whitelists. |
def cache_items(values):
import os
config_path = os.path.expanduser('~/.config/blockade')
file_path = os.path.join(config_path, 'cache.txt')
if not os.path.isfile(file_path):
file(file_path, 'w').close()
written = [x.strip() for x in open(file_path, 'r').readlines()]
handle = open(file_path, 'a')
for item in values:
# Because of the option to submit in clear or hashed, we need to make
# sure we're not re-hashing before adding.
if is_hashed(item):
hashed = item
else:
hashed = hash_values(item)
if hashed in written:
continue
handle.write(hashed + "\n")
handle.close()
return True | Cache indicators that were successfully sent to avoid dups. |
def prune_cached(values):
import os
config_path = os.path.expanduser('~/.config/blockade')
file_path = os.path.join(config_path, 'cache.txt')
if not os.path.isfile(file_path):
return values
cached = [x.strip() for x in open(file_path, 'r').readlines()]
output = list()
for item in values:
hashed = hash_values(item)
if hashed in cached:
continue
output.append(item)
return output | Remove the items that have already been cached. |
def get_logger(name):
import logging
import sys
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
shandler = logging.StreamHandler(sys.stdout)
fmt = ""
fmt += '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
fmtr = logging.Formatter(fmt)
shandler.setFormatter(fmtr)
logger.addHandler(shandler)
return logger | Get a logging instance we can use. |
def process_whitelists():
import csv
import grequests
import os
import StringIO
import zipfile
mapping = {
'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip': {
'name': 'alexa.txt'
}, 'http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip': {
'name': 'cisco.txt'
}
}
rs = (grequests.get(u) for u in mapping.keys())
responses = grequests.map(rs)
for r in responses:
data = zipfile.ZipFile(StringIO.StringIO(r.content)).read('top-1m.csv')
stream = StringIO.StringIO(data)
reader = csv.reader(stream, delimiter=',', quoting=csv.QUOTE_MINIMAL)
items = [row[1].strip() for row in reader]
stream.close()
config_path = os.path.expanduser('~/.config/blockade')
file_path = os.path.join(config_path, mapping[r.url]['name'])
handle = open(file_path, 'w')
for item in items:
if item.count('.') == 0:
continue
handle.write(item + "\n")
handle.close()
return True | Download approved top 1M lists. |
def mode(self, mode):
allowed_values = ["test", "live"]
if mode is not None and mode not in allowed_values:
raise ValueError(
"Invalid value for `mode` ({0}), must be one of {1}"
.format(mode, allowed_values)
)
self._mode = mode | Sets the mode of this BraintreeGateway.
:param mode: The mode of this BraintreeGateway.
:type: str |
def create_braintree_gateway(cls, braintree_gateway, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs)
else:
(data) = cls._create_braintree_gateway_with_http_info(braintree_gateway, **kwargs)
return data | Create BraintreeGateway
Create a new BraintreeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_braintree_gateway(braintree_gateway, async=True)
>>> result = thread.get()
:param async bool
:param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to create (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread. |
def delete_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs)
else:
(data) = cls._delete_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs)
return data | Delete BraintreeGateway
Delete an instance of BraintreeGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_braintree_gateway_by_id(braintree_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_braintree_gateway_by_id(cls, braintree_gateway_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs)
else:
(data) = cls._get_braintree_gateway_by_id_with_http_info(braintree_gateway_id, **kwargs)
return data | Find BraintreeGateway
Return single instance of BraintreeGateway by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_braintree_gateway_by_id(braintree_gateway_id, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to return (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread. |
def list_all_braintree_gateways(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_braintree_gateways_with_http_info(**kwargs)
else:
(data) = cls._list_all_braintree_gateways_with_http_info(**kwargs)
return data | List BraintreeGateways
Return a list of BraintreeGateways
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_braintree_gateways(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[BraintreeGateway]
If the method is called asynchronously,
returns the request thread. |
def replace_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
else:
(data) = cls._replace_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
return data | Replace BraintreeGateway
Replace all attributes of BraintreeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to replace (required)
:param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to replace (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread. |
def update_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
else:
(data) = cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs)
return data | Update BraintreeGateway
Update attributes of BraintreeGateway
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True)
>>> result = thread.get()
:param async bool
:param str braintree_gateway_id: ID of braintreeGateway to update. (required)
:param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to update. (required)
:return: BraintreeGateway
If the method is called asynchronously,
returns the request thread. |
def cut_spectrum(sp, l0, lf):
if l0 >= lf:
raise ValueError("l0 must be lower than lf")
idx0 = np.argmin(np.abs(sp.x - l0))
idx1 = np.argmin(np.abs(sp.x - lf))
out = copy.deepcopy(sp)
out.x = out.x[idx0:idx1]
out.y = out.y[idx0:idx1]
return out | Cuts spectrum given a wavelength interval, leaving origina intact
Args:
sp: Spectrum instance
l0: initial wavelength
lf: final wavelength
Returns:
Spectrum: cut spectrum |
def remove_nongenerating_nonterminals(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
# copy if required
if inplace is False:
grammar = copy(grammar)
# create working sets
generates = grammar.terminals.copy()
generates.add(EPSILON)
rules = grammar.rules.copy()
# iterate until the set doesn't change
while True:
# create set for the next iteration
additional = generates.copy()
# iterate over unprocessed rules
for rule in rules.copy():
rightPart = rule.right
allIn = True
# check if all symbols on the right part of rule are in generates set
for symbol in rightPart:
if symbol not in generates:
allIn = False
break
# Symbol is missing so rule is not process
if not allIn:
continue
# Rule is process - remove it from processing rules and make symbol as generating
additional.add(rule.fromSymbol)
rules.remove(rule)
# end of rules iterations
# ff current and previous iterations are same, than end iterations
if additional == generates:
break
# swap sets from previous and current iterations
generates = additional
# remove nonterms that are not generating
nongenerating = grammar.nonterminals.difference(generates)
grammar.nonterminals.remove(*nongenerating)
# return the grammar
return grammar | Remove nongenerating symbols from the grammar.
Nongenerating symbols are symbols, that don't generate sequence of terminals.
For example never ending recursion.
:param grammar: Grammar where to remove nongenerating symbols.
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar without nongenerating symbols. |
def skip_first(pipe, items=1):
''' this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
'''
pipe = iter(pipe)
for i in skip(pipe, items):
yield f skip_first(pipe, items=1):
''' this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done
'''
pipe = iter(pipe)
for i in skip(pipe, items):
yield i | this is an alias for skip to parallel the dedicated skip_last function
to provide a little more readability to the code. the action of actually
skipping does not occur until the first iteration is done |
def find(self, query=None, **kwargs):
url = self.getUrl()
if query is not None:
if isinstance(query, queries.SlickQuery):
url = url + "?" + urlencode(query.to_dict())
elif isinstance(query, dict):
url = url + "?" + urlencode(query)
elif len(kwargs) > 0:
url = url + "?" + urlencode(kwargs)
# hopefully when we discover what problems exist in slick to require this, we can take the loop out
for retry in range(3):
try:
self.logger.debug("Making request to slick at url %s", url)
r = requests.get(url)
self.logger.debug("Request returned status code %d", r.status_code)
if r.status_code is 200:
retval = []
objects = r.json()
for dct in objects:
retval.append(self.model.from_dict(dct))
return retval
else:
self.logger.error("Slick returned an error when trying to access %s: status code %s" % (url, str(r.status_code)))
self.logger.error("Slick response: ", pprint.pformat(r))
except BaseException as error:
self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info())
raise SlickCommunicationError(
"Tried 3 times to request data from slick at url %s without a successful status code.", url) | You can pass in the appropriate model object from the queries module,
or a dictionary with the keys and values for the query,
or a set of key=value parameters. |
def findOne(self, query=None, mode=FindOneMode.FIRST, **kwargs):
results = self.find(query, **kwargs)
if len(results) is 0:
return None
elif len(results) is 1 or mode == FindOneMode.FIRST:
return results[0]
elif mode == FindOneMode.LAST:
return results[-1] | Perform a find, with the same options present, but only return a maximum of one result. If find returns
an empty array, then None is returned.
If there are multiple results from find, the one returned depends on the mode parameter. If mode is
FindOneMode.FIRST, then the first result is returned. If the mode is FindOneMode.LAST, then the last is
returned. If the mode is FindOneMode.ERROR, then a SlickCommunicationError is raised. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.