code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def get_validator(filter_data):
for matcher_type, m in matchers.items():
if hasattr(m, 'can_handle') and m.can_handle(filter_data):
filter_data = m.handle(filter_data)
return filter_data | ask every matcher whether it can serve such filter data
:param filter_data:
:return: |
def run():
# NOTE(kiennt): Until now, this example isn't finished yet,
# because we don't have any completed driver
# Get a network client with openstack driver.
network_client = client.Client(version=_VERSION,
resource=_RESOURCES[0], provider=_PROVIDER)
# net = network_client.create('daikk', '10.0.0.0/24')
# list_subnet = network_client.list()
# network_client.show(list_subnet[0].get("id"))
network_client.delete("4b983028-0f8c-4b63-b10c-6e8420bb7903") | Run the examples |
def sort(self, attr):
self.entries = Sorter(self.entries, self.category, attr).sort_entries()
return self | Sort the ratings based on an attribute |
def get_title(self):
if self.category == 'cable':
strings = get_strings(self.soup, 'strong')
else:
strings = get_strings(self.soup, 'b')
if len(strings) == 0:
strings = get_strings(self.soup, 'strong')
if len(strings) >= 1 and self.category == 'cable':
return strings[0]
elif len(strings) > 0 and 'Fast' in strings[-1]:
return strings[0]
return ''.join(strings) | Title is either the chart header for a cable ratings page or above
the opening description for a broadcast ratings page. |
def get_json(self):
ratings_dict = {
'category': self.category,
'date': self.date,
'day': self.weekday,
'next week': self.next_week,
'last week': self.last_week,
'entries': self.entries,
'url': self.url
}
return to_json(ratings_dict) | Serialize ratings object as JSON-formatted string |
def _get_url_params(self, shorten=True):
cable = True if self.category == 'cable' else False
url_date = convert_month(self.date, shorten=shorten, cable=cable)
return [
BASE_URL,
self.weekday.lower(),
self.category + '-ratings',
url_date.replace(' ', '-')
] | Returns a list of each parameter to be used for the url format. |
def _match_show(self, show):
if self.show:
return match_list(self.show, show)
else:
return True | Match a query for a specific show/list of shows |
def _match_net(self, net):
if self.network:
return match_list(self.network, net)
else:
return True | Match a query for a specific network/list of networks |
def _verify_page(self):
title_date = self._get_date_in_title().lower()
split_date = self.date.lower().split()
split_date[0] = split_date[0][:3]
return all(term in title_date for term in split_date) | Verify the ratings page matches the correct date |
def _get_ratings_page(self):
# Use current posted date to build url
self._build_url()
soup = get_soup(self.url)
if soup:
return soup
# Try building url again with unshortened month
self._build_url(shorten=False)
soup = get_soup(self.url)
if soup:
return soup
# If not page is found, use search
return SearchDaily(self.category, date=self.date).fetch_result() | Do a limited search for the correct url. |
def _build_url(self, shorten=True):
self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten)) | Build the url for a cable ratings page |
def fetch_entries(self):
data = []
for row in self.get_rows():
# Stop fetching data if limit has been met
if exceeded_limit(self.limit, len(data)):
break
entry = row.find_all('td')
entry_dict = {}
show = entry[0].string
net = entry[1].string
if not self._match_query(show, net):
continue
entry_dict['show'] = show
entry_dict['net'] = net
entry_dict['time'] = entry[2].string
if ',' in entry[3].string:
entry_dict['viewers'] = entry[3].string.replace(',', '.')
else:
entry_dict['viewers'] = '0.' + entry[3].string
entry_dict['rating'] = entry[4].string
# Add data to create cable entry
data.append(Entry(**entry_dict))
return data | Fetch data and parse it to build a list of cable entries. |
def _build_url(self, shorten=True):
url_order = self._get_url_params(shorten=shorten)
# For fast ratings, switch weekday and category in url
if self.category != 'final':
url_order[1], url_order[2] = url_order[2], url_order[1]
self.url = URL_FORMAT.format(*url_order) | Build the url for a broadcast ratings page |
def get_rows(self):
table = self.soup.find_all('tr')[1:-3]
return [row for row in table if row.contents[3].string] | Get the rows from a broadcast ratings chart |
def fetch_entries(self):
current_time = ''
data = []
for row in self.get_rows():
# Stop fetching data if limit has been met
if exceeded_limit(self.limit, len(data)):
break
entry = row.find_all('td')
entry_dict = {}
show_time = entry[0].string
if show_time and show_time != current_time:
current_time = show_time
if not show_time:
show_time = current_time
entry_dict['time'] = show_time
show_string = entry[1].string.split('(')
show = show_string[0][:-1]
net = self._get_net(show_string)
if not self._match_query(show, net):
continue
entry_dict['show'] = show
entry_dict['net'] = net
entry_dict['viewers'] = entry[3].string.strip('*')
entry_dict['rating'], entry_dict['share'] = self._get_rating(entry)
# Add data to initialize broadcast entry
data.append(Entry(**entry_dict))
return data | Fetch data and parse it to build a list of broadcast entries. |
def get_averages(self):
networks = [unescape_html(n.string) for n in self.soup.find_all('td', width='77')]
table = self.soup.find_all('td', style=re.compile('^font'))
# Each element is a list split as [rating, share]
rateshares = [r.string.split('/') for r in table[:5] if r.string]
viewers = [v.string for v in table[5:] if v.string]
averages = {}
# Load the averages dict
for index, network in enumerate(networks):
viewer = convert_float(unescape_html(viewers[index]))
rating = convert_float(unescape_html(rateshares[index][0]))
share = convert_float(unescape_html(rateshares[index][1]))
averages[network] = {'viewer': viewer, 'rating': rating, 'share': share}
return averages | Get the broadcast network averages for that day.
Returns a dictionary:
key: network name
value: sub-dictionary with 'viewers', 'rating', and 'share' as keys |
def _get_net(self, entry):
try:
net = entry[1]
return net[net.find('(')+1:net.find(')')]
except IndexError:
return None | Get the network for a specific row |
def _get_rating(self, entry):
r_info = ''
for string in entry[2].strings:
r_info += string
rating, share = r_info.split('/')
return (rating, share.strip('*')) | Get the rating and share for a specific row |
def _visit(self, L, marked, tempmarked):
assert not self.is_pseudo
if self in tempmarked:
raise Exception('feature graph is cyclic')
if self not in marked:
tempmarked[self] = True
features = list()
if self.siblings is not None and self.is_toplevel:
features.extend(reversed(self.siblings))
if self.children is not None:
features.extend(reversed(self.children))
if len(features) > 0:
for feature in features:
feature._visit(L, marked, tempmarked)
marked[self] = True
del tempmarked[self]
L.insert(0, self) | Sort features topologically.
This recursive function uses depth-first search to find an ordering of
the features in the feature graph that is sorted both topologically and
with respect to genome coordinates.
Implementation based on Wikipedia's description of the algorithm in
Cormen's *Introduction to Algorithms*.
http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
There are potentially many valid topological sorts of a feature graph,
but only one that is also sorted with respect to genome coordinates
(excluding different orderings of, for example, exons and CDS features
with the same coordinates). Iterating through feature children in
reversed order (in this functions' inner-most loop) seems to be the key
to sorting with respect to genome coordinates. |
def add_child(self, child, rangecheck=False):
assert self.seqid == child.seqid, \
(
'seqid mismatch for feature {} ({} vs {})'.format(
self.fid, self.seqid, child.seqid
)
)
if rangecheck is True:
assert self._strand == child._strand, \
('child of feature {} has a different strand'.format(self.fid))
assert self._range.contains(child._range), \
(
'child of feature {} is not contained within its span '
'({}-{})'.format(self.fid, child.start, child.end)
)
if self.children is None:
self.children = list()
self.children.append(child)
self.children.sort() | Add a child feature to this feature. |
def pseudoify(self):
assert self.is_toplevel
assert self.is_multi
assert len(self.multi_rep.siblings) > 0
rep = self.multi_rep
start = min([s.start for s in rep.siblings + [rep]])
end = max([s.end for s in rep.siblings + [rep]])
parent = Feature(None)
parent._pseudo = True
parent._seqid = self._seqid
parent.set_coord(start, end)
parent._strand = self._strand
for sibling in rep.siblings + [rep]:
parent.add_child(sibling, rangecheck=True)
parent.children = sorted(parent.children)
rep.siblings = sorted(rep.siblings)
return parent | Derive a pseudo-feature parent from the given multi-feature.
The provided multi-feature does not need to be the representative. The
newly created pseudo-feature has the same seqid as the provided multi-
feature, and spans its entire range. Otherwise, the pseudo-feature is
empty. It is used only for convenience in sorting. |
def slug(self):
return '{:s}@{:s}[{:d}, {:d}]'.format(self.type, self.seqid,
self.start + 1, self.end) | A concise slug for this feature.
Unlike the internal representation, which is 0-based half-open, the
slug is a 1-based closed interval (a la GFF3). |
def add_sibling(self, sibling):
assert self.is_pseudo is False
if self.siblings is None:
self.siblings = list()
self.multi_rep = self
sibling.multi_rep = self
self.siblings.append(sibling) | Designate this a multi-feature representative and add a co-feature.
Some features exist discontinuously on the sequence, and therefore
cannot be declared with a single GFF3 entry (which can encode only a
single interval). The canonical encoding for these types of features is
called a multi-feature, in which a single feature is declared on
multiple lines with multiple entries all sharing the same feature type
and ID attribute. This is commonly done with coding sequence (CDS)
features.
In this package, each multi-feature has a single "representative"
feature object, and all other objects/entries associated with that
multi-feature are attached to it as "siblings".
Invoking this method will designate the calling feature as the
multi-feature representative and add the argument as a sibling. |
def source(self, newsource):
oldsource = self.source
for feature in self:
if feature.source == oldsource:
feature._source = newsource | When modifying source, also update children with matching source. |
def type(self, newtype):
self._type = newtype
if self.is_multi:
for sibling in self.multi_rep.siblings:
sibling._type = newtype | If the feature is a multifeature, update all entries. |
def transform(self, offset, newseqid=None):
for feature in self:
feature._range.transform(offset)
if newseqid is not None:
feature.seqid = newseqid | Transform the feature's coordinates by the given offset. |
def add_attribute(self, attrkey, attrvalue, append=False, oldvalue=None):
# Handle ID/Parent relationships
if attrkey == 'ID':
if self.children is not None:
oldid = self.get_attribute('ID')
for child in self.children:
child.add_attribute('Parent', attrvalue,
oldvalue=oldid)
self._attrs[attrkey] = attrvalue
if self.is_multi:
self.multi_rep._attrs[attrkey] = attrvalue
for sibling in self.multi_rep.siblings:
sibling._attrs[attrkey] = attrvalue
return
# Handle all other attribute types
if oldvalue is not None:
if attrkey in self._attrs:
assert oldvalue in self._attrs[attrkey]
del self._attrs[attrkey][oldvalue]
if attrkey not in self._attrs or append is False:
self._attrs[attrkey] = dict()
self._attrs[attrkey][attrvalue] = True | Add an attribute to this feature.
Feature attributes are stored as nested dictionaries.
Each feature can only have one ID, so ID attribute mapping is 'string'
to 'string'. All other attributes can have multiple values, so mapping
is 'string' to 'dict of strings'.
By default, adding an attribute that already exists will cause the old
value to be overwritten. If the `append` option is true, the new
attribute value will not overwrite the old value, but will be appended
as a second value. (Note: ID attributes can have only 1 value.)
If the `oldvalue` option is set, the new value will replace the old
value. This is necessary for updating an attribute that has multiple
values without completely overwriting all old values. (Note: The
`append` option is ignored when `oldvalue` is set.) |
def get_attribute(self, attrkey, as_string=False, as_list=False):
assert not as_string or not as_list
if attrkey not in self._attrs:
return None
if attrkey == 'ID':
return self._attrs[attrkey]
attrvalues = list(self._attrs[attrkey])
attrvalues.sort()
if len(attrvalues) == 1 and not as_list:
return attrvalues[0]
elif as_string:
return ','.join(attrvalues)
return attrvalues | Get the value of an attribute.
By default, returns a string for ID and attributes with a single value,
and a list of strings for attributes with multiple values. The
`as_string` and `as_list` options can be used to force the function to
return values as a string (comma-separated in case of multiple values)
or a list. |
def parse_attributes(self, attrstring):
if attrstring in [None, '', '.']:
return dict()
attributes = dict()
keyvaluepairs = attrstring.split(';')
for kvp in keyvaluepairs:
if kvp == '':
continue
key, value = kvp.split('=')
if key == 'ID':
assert ',' not in value
attributes[key] = value
continue
values = value.split(',')
valdict = dict((val, True) for val in values)
attributes[key] = valdict
return attributes | Parse an attribute string.
Given a string with semicolon-separated key-value pairs, populate a
dictionary with the given attributes. |
def attribute_crawl(self, key):
union = set()
for feature in self:
values = feature.get_attribute(key, as_list=True)
if values is not None:
union.update(set(values))
return union | Grab all attribute values associated with the given feature.
Traverse the given feature (and all of its descendants) to find all
values associated with the given attribute key.
>>> import tag
>>> reader = tag.GFF3Reader(tag.pkgdata('otau-no-seqreg.gff3'))
>>> features = tag.select.features(reader)
>>> for feature in features:
... names = feature.attribute_crawl('Name')
... print(sorted(list(names)))
['Ot01g00060', 'XM_003074019.1', 'XP_003074065.1']
['Ot01g00070', 'XM_003074020.1', 'XP_003074066.1']
['Ot01g00080', 'XM_003074021.1', 'XP_003074067.1']
['Ot01g00090', 'XM_003074022.1', 'XP_003074068.1']
['Ot01g00100', 'XM_003074023.1', 'XP_003074069.1']
['Ot01g00110', 'XM_003074024.1', 'XP_003074070.1'] |
def ncbi_geneid(self):
values = self.get_attribute('Dbxref', as_list=True)
if values is None:
return None
for value in values:
if value.startswith('GeneID:'):
key, geneid = value.split(':')
return geneid
return None | Retrieve this feature's NCBI GeneID if it's present.
NCBI GFF3 files contain gene IDs encoded in **Dbxref** attributes
(example: `Dbxref=GeneID:103504972`). This function locates and returns
the GeneID if present, or returns `None` otherwise. |
def cdslen(self):
if self.type != 'mRNA':
return None
return sum([len(c) for c in self.children if c.type == 'CDS']) | Translated length of this feature.
Undefined for non-mRNA features. |
def parse_querystring(msg):
'parse a querystring into keys and values'
for part in msg.querystring.strip().lstrip('?').split('&'):
key, value = part.split('=')
yield key, valuf parse_querystring(msg):
'parse a querystring into keys and values'
for part in msg.querystring.strip().lstrip('?').split('&'):
key, value = part.split('=')
yield key, value | parse a querystring into keys and values |
def AddClusterTags(r, tags, dry_run=False):
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("put", "/2/tags", query=query) | Adds tags to the cluster.
@type tags: list of str
@param tags: tags to add to the cluster
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id |
def DeleteClusterTags(r, tags, dry_run=False):
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("delete", "/2/tags", query=query) | Deletes tags from the cluster.
@type tags: list of str
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run |
def GetInstances(r, bulk=False):
if bulk:
return r.request("get", "/2/instances", query={"bulk": 1})
else:
instances = r.request("get", "/2/instances")
return r.applier(itemgetters("id"), instances) | Gets information about instances on the cluster.
@type bulk: bool
@param bulk: whether to return all information about all instances
@rtype: list of dict or list of str
@return: if bulk is True, info about the instances, else a list of instances |
def GetInstanceInfo(r, instance, static=None):
if static is None:
return r.request("get", "/2/instances/%s/info" % instance)
else:
return r.request("get", "/2/instances/%s/info" % instance,
query={"static": static}) | Gets information about an instance.
@type instance: string
@param instance: Instance name
@rtype: string
@return: Job ID |
def CreateInstance(r, mode, name, disk_template, disks, nics,
**kwargs):
if INST_CREATE_REQV1 not in r.features:
raise GanetiApiError("Cannot create Ganeti 2.1-style instances")
query = {}
if kwargs.get("dry_run"):
query["dry-run"] = 1
if kwargs.get("no_install"):
query["no-install"] = 1
# Make a version 1 request.
body = {
_REQ_DATA_VERSION_FIELD: 1,
"mode": mode,
"name": name,
"disk_template": disk_template,
"disks": disks,
"nics": nics,
}
conflicts = set(kwargs.iterkeys()) & set(body.iterkeys())
if conflicts:
raise GanetiApiError("Required fields can not be specified as"
" keywords: %s" % ", ".join(conflicts))
kwargs.pop("dry_run", None)
body.update(kwargs)
return r.request("post", "/2/instances", query=query, content=body) | Creates a new instance.
More details for parameters can be found in the RAPI documentation.
@type mode: string
@param mode: Instance creation mode
@type name: string
@param name: Hostname of the instance to create
@type disk_template: string
@param disk_template: Disk template for instance (e.g. plain, diskless,
file, or drbd)
@type disks: list of dicts
@param disks: List of disk definitions
@type nics: list of dicts
@param nics: List of NIC definitions
@type dry_run: bool
@keyword dry_run: whether to perform a dry run
@type no_install: bool
@keyword no_install: whether to create without installing OS(true=don't install)
@rtype: int
@return: job id |
def DeleteInstance(r, instance, dry_run=False):
return r.request("delete", "/2/instances/%s" % instance,
query={"dry-run": dry_run}) | Deletes an instance.
@type instance: str
@param instance: the instance to delete
@rtype: int
@return: job id |
def ActivateInstanceDisks(r, instance, ignore_size=False):
return r.request("put", "/2/instances/%s/activate-disks" % instance,
query={"ignore_size": ignore_size}) | Activates an instance's disks.
@type instance: string
@param instance: Instance name
@type ignore_size: bool
@param ignore_size: Whether to ignore recorded size
@return: job id |
def RecreateInstanceDisks(r, instance, disks=None, nodes=None):
body = {}
if disks is not None:
body["disks"] = disks
if nodes is not None:
body["nodes"] = nodes
return r.request("post", "/2/instances/%s/recreate-disks" % instance,
content=body) | Recreate an instance's disks.
@type instance: string
@param instance: Instance name
@type disks: list of int
@param disks: List of disk indexes
@type nodes: list of string
@param nodes: New instance nodes, if relocation is desired
@rtype: string
@return: job id |
def GrowInstanceDisk(r, instance, disk, amount, wait_for_sync=False):
body = {
"amount": amount,
"wait_for_sync": wait_for_sync,
}
return r.request("post", "/2/instances/%s/disk/%s/grow" %
(instance, disk), content=body) | Grows a disk of an instance.
More details for parameters can be found in the RAPI documentation.
@type instance: string
@param instance: Instance name
@type disk: integer
@param disk: Disk index
@type amount: integer
@param amount: Grow disk by this amount (MiB)
@type wait_for_sync: bool
@param wait_for_sync: Wait for disk to synchronize
@rtype: int
@return: job id |
def AddInstanceTags(r, instance, tags, dry_run=False):
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("put", "/2/instances/%s/tags" % instance, query=query) | Adds tags to an instance.
@type instance: str
@param instance: instance to add tags to
@type tags: list of str
@param tags: tags to add to the instance
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id |
def DeleteInstanceTags(r, instance, tags, dry_run=False):
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("delete", "/2/instances/%s/tags" % instance, query=query) | Deletes tags from an instance.
@type instance: str
@param instance: instance to delete tags from
@type tags: list of str
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run |
def RebootInstance(r, instance, reboot_type=None, ignore_secondaries=False,
dry_run=False):
query = {
"ignore_secondaries": ignore_secondaries,
"dry-run": dry_run,
}
if reboot_type:
if reboot_type not in ("hard", "soft", "full"):
raise GanetiApiError("reboot_type must be one of 'hard',"
" 'soft', or 'full'")
query["type"] = reboot_type
return r.request("post", "/2/instances/%s/reboot" % instance, query=query) | Reboots an instance.
@type instance: str
@param instance: instance to rebot
@type reboot_type: str
@param reboot_type: one of: hard, soft, full
@type ignore_secondaries: bool
@param ignore_secondaries: if True, ignores errors for the secondary node
while re-assembling disks (in hard-reboot mode only)
@type dry_run: bool
@param dry_run: whether to perform a dry run |
def ShutdownInstance(r, instance, dry_run=False, no_remember=False,
timeout=120):
query = {
"dry-run": dry_run,
"no-remember": no_remember,
}
content = {
"timeout": timeout,
}
return r.request("put", "/2/instances/%s/shutdown" % instance,
query=query, content=content) | Shuts down an instance.
@type instance: str
@param instance: the instance to shut down
@type dry_run: bool
@param dry_run: whether to perform a dry run
@type no_remember: bool
@param no_remember: if true, will not record the state change
@rtype: string
@return: job id |
def StartupInstance(r, instance, dry_run=False, no_remember=False):
query = {
"dry-run": dry_run,
"no-remember": no_remember,
}
return r.request("put", "/2/instances/%s/startup" % instance, query=query) | Starts up an instance.
@type instance: str
@param instance: the instance to start up
@type dry_run: bool
@param dry_run: whether to perform a dry run
@type no_remember: bool
@param no_remember: if true, will not record the state change
@rtype: string
@return: job id |
def ReinstallInstance(r, instance, os=None, no_startup=False, osparams=None):
if INST_REINSTALL_REQV1 in r.features:
body = {
"start": not no_startup,
}
if os is not None:
body["os"] = os
if osparams is not None:
body["osparams"] = osparams
return r.request("post", "/2/instances/%s/reinstall" % instance,
content=body)
# Use old request format
if osparams:
raise GanetiApiError("Server does not support specifying OS"
" parameters for instance reinstallation")
query = {
"nostartup": no_startup,
}
if os:
query["os"] = os
return r.request("post", "/2/instances/%s/reinstall" % instance,
query=query) | Reinstalls an instance.
@type instance: str
@param instance: The instance to reinstall
@type os: str or None
@param os: The operating system to reinstall. If None, the instance's
current operating system will be installed again
@type no_startup: bool
@param no_startup: Whether to start the instance automatically |
def ReplaceInstanceDisks(r, instance, disks=None, mode=REPLACE_DISK_AUTO,
remote_node=None, iallocator=None, dry_run=False):
if mode not in REPLACE_DISK:
raise GanetiApiError("Invalid mode %r not one of %r" % (mode,
REPLACE_DISK))
query = {
"mode": mode,
"dry-run": dry_run,
}
if disks:
query["disks"] = ",".join(str(idx) for idx in disks)
if remote_node:
query["remote_node"] = remote_node
if iallocator:
query["iallocator"] = iallocator
return r.request("post", "/2/instances/%s/replace-disks" % instance,
query=query) | Replaces disks on an instance.
@type instance: str
@param instance: instance whose disks to replace
@type disks: list of ints
@param disks: Indexes of disks to replace
@type mode: str
@param mode: replacement mode to use (defaults to replace_auto)
@type remote_node: str or None
@param remote_node: new secondary node to use (for use with
replace_new_secondary mode)
@type iallocator: str or None
@param iallocator: instance allocator plugin to use (for use with
replace_auto mode)
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id |
def ExportInstance(r, instance, mode, destination, shutdown=None,
remove_instance=None, x509_key_name=None,
destination_x509_ca=None):
body = {
"destination": destination,
"mode": mode,
}
if shutdown is not None:
body["shutdown"] = shutdown
if remove_instance is not None:
body["remove_instance"] = remove_instance
if x509_key_name is not None:
body["x509_key_name"] = x509_key_name
if destination_x509_ca is not None:
body["destination_x509_ca"] = destination_x509_ca
return r.request("put", "/2/instances/%s/export" % instance, content=body) | Exports an instance.
@type instance: string
@param instance: Instance name
@type mode: string
@param mode: Export mode
@rtype: string
@return: Job ID |
def MigrateInstance(r, instance, mode=None, cleanup=None):
body = {}
if mode is not None:
body["mode"] = mode
if cleanup is not None:
body["cleanup"] = cleanup
return r.request("put", "/2/instances/%s/migrate" % instance,
content=body) | Migrates an instance.
@type instance: string
@param instance: Instance name
@type mode: string
@param mode: Migration mode
@type cleanup: bool
@param cleanup: Whether to clean up a previously failed migration |
def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False,
target_node=None):
body = {
"ignore_consistency": ignore_consistency,
}
if iallocator is not None:
body["iallocator"] = iallocator
if target_node is not None:
body["target_node"] = target_node
return r.request("put", "/2/instances/%s/failover" % instance,
content=body) | Does a failover of an instance.
@type instance: string
@param instance: Instance name
@type iallocator: string
@param iallocator: Iallocator for deciding the target node for
shared-storage instances
@type ignore_consistency: bool
@param ignore_consistency: Whether to ignore disk consistency
@type target_node: string
@param target_node: Target node for shared-storage instances
@rtype: string
@return: job id |
def RenameInstance(r, instance, new_name, ip_check, name_check=None):
body = {
"ip_check": ip_check,
"new_name": new_name,
}
if name_check is not None:
body["name_check"] = name_check
return r.request("put", "/2/instances/%s/rename" % instance, content=body) | Changes the name of an instance.
@type instance: string
@param instance: Instance name
@type new_name: string
@param new_name: New instance name
@type ip_check: bool
@param ip_check: Whether to ensure instance's IP address is inactive
@type name_check: bool
@param name_check: Whether to ensure instance's name is resolvable |
def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial):
body = {
"fields": fields,
"previous_job_info": prev_job_info,
"previous_log_serial": prev_log_serial,
}
return r.request("get", "/2/jobs/%s/wait" % job_id, content=body) | Waits for job changes.
@type job_id: int
@param job_id: Job ID for which to wait |
def CancelJob(r, job_id, dry_run=False):
return r.request("delete", "/2/jobs/%s" % job_id,
query={"dry-run": dry_run}) | Cancels a job.
@type job_id: int
@param job_id: id of the job to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run |
def GetNodes(r, bulk=False):
if bulk:
return r.request("get", "/2/nodes", query={"bulk": 1})
else:
nodes = r.request("get", "/2/nodes")
return r.applier(itemgetters("id"), nodes) | Gets all nodes in the cluster.
@type bulk: bool
@param bulk: whether to return all information about all instances
@rtype: list of dict or str
@return: if bulk is true, info about nodes in the cluster,
else list of nodes in the cluster |
def MigrateNode(r, node, mode=None, dry_run=False, iallocator=None,
target_node=None):
query = {
"dry-run": dry_run,
}
if NODE_MIGRATE_REQV1 in r.features:
body = {}
if mode is not None:
body["mode"] = mode
if iallocator is not None:
body["iallocator"] = iallocator
if target_node is not None:
body["target_node"] = target_node
else:
# Use old request format
if target_node is not None:
raise GanetiApiError("Server does not support specifying"
" target node for node migration")
body = None
if mode is not None:
query["mode"] = mode
return r.request("post", "/2/nodes/%s/migrate" % node, query=query,
content=body) | Migrates all primary instances from a node.
@type node: str
@param node: node to migrate
@type mode: string
@param mode: if passed, it will overwrite the live migration type,
otherwise the hypervisor default will be used
@type dry_run: bool
@param dry_run: whether to perform a dry run
@type iallocator: string
@param iallocator: instance allocator to use
@type target_node: string
@param target_node: Target node for shared-storage instances
@rtype: int
@return: job id |
def SetNodeRole(r, node, role, force=False, auto_promote=False):
query = {
"force": force,
"auto_promote": auto_promote,
}
return r.request("put", "/2/nodes/%s/role" % node, query=query,
content=role) | Sets the role for a node.
@type node: str
@param node: the node whose role to set
@type role: str
@param role: the role to set for the node
@type force: bool
@param force: whether to force the role change
@type auto_promote: bool
@param auto_promote: Whether node(s) should be promoted to master
candidate if necessary
@rtype: int
@return: job id |
def PowercycleNode(r, node, force=False):
query = {
"force": force,
}
return r.request("post", "/2/nodes/%s/powercycle" % node, query=query) | Powercycles a node.
@type node: string
@param node: Node name
@type force: bool
@param force: Whether to force the operation
@rtype: string
@return: job id |
def GetNodeStorageUnits(r, node, storage_type, output_fields):
query = {
"storage_type": storage_type,
"output_fields": output_fields,
}
return r.request("get", "/2/nodes/%s/storage" % node, query=query) | Gets the storage units for a node.
@type node: str
@param node: the node whose storage units to return
@type storage_type: str
@param storage_type: storage type whose units to return
@type output_fields: str
@param output_fields: storage type fields to return
@rtype: int
@return: job id where results can be retrieved |
def ModifyNodeStorageUnits(r, node, storage_type, name, allocatable=None):
query = {
"storage_type": storage_type,
"name": name,
}
if allocatable is not None:
query["allocatable"] = allocatable
return r.request("put", "/2/nodes/%s/storage/modify" % node, query=query) | Modifies parameters of storage units on the node.
@type node: str
@param node: node whose storage units to modify
@type storage_type: str
@param storage_type: storage type whose units to modify
@type name: str
@param name: name of the storage unit
@type allocatable: bool or None
@param allocatable: Whether to set the "allocatable" flag on the storage
unit (None=no modification, True=set, False=unset)
@rtype: int
@return: job id |
def RepairNodeStorageUnits(r, node, storage_type, name):
query = {
"storage_type": storage_type,
"name": name,
}
return r.request("put", "/2/nodes/%s/storage/repair" % node, query=query) | Repairs a storage unit on the node.
@type node: str
@param node: node whose storage units to repair
@type storage_type: str
@param storage_type: storage type to repair
@type name: str
@param name: name of the storage unit to repair
@rtype: int
@return: job id |
def AddNodeTags(r, node, tags, dry_run=False):
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("put", "/2/nodes/%s/tags" % node, query=query,
content=tags) | Adds tags to a node.
@type node: str
@param node: node to add tags to
@type tags: list of str
@param tags: tags to add to the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id |
def DeleteNodeTags(r, node, tags, dry_run=False):
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("delete", "/2/nodes/%s/tags" % node, query=query) | Delete tags from a node.
@type node: str
@param node: node to remove tags from
@type tags: list of str
@param tags: tags to remove from the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id |
def GetGroups(r, bulk=False):
if bulk:
return r.request("get", "/2/groups", query={"bulk": 1})
else:
groups = r.request("get", "/2/groups")
return r.applier(itemgetters("name"), groups) | Gets all node groups in the cluster.
@type bulk: bool
@param bulk: whether to return all information about the groups
@rtype: list of dict or str
@return: if bulk is true, a list of dictionaries with info about all node
groups in the cluster, else a list of names of those node groups |
def CreateGroup(r, name, alloc_policy=None, dry_run=False):
query = {
"dry-run": dry_run,
}
body = {
"name": name,
"alloc_policy": alloc_policy
}
return r.request("post", "/2/groups", query=query, content=body) | Creates a new node group.
@type name: str
@param name: the name of node group to create
@type alloc_policy: str
@param alloc_policy: the desired allocation policy for the group, if any
@type dry_run: bool
@param dry_run: whether to peform a dry run
@rtype: int
@return: job id |
def DeleteGroup(r, group, dry_run=False):
query = {
"dry-run": dry_run,
}
return r.request("delete", "/2/groups/%s" % group, query=query) | Deletes a node group.
@type group: str
@param group: the node group to delete
@type dry_run: bool
@param dry_run: whether to peform a dry run
@rtype: int
@return: job id |
def RenameGroup(r, group, new_name):
body = {
"new_name": new_name,
}
return r.request("put", "/2/groups/%s/rename" % group, content=body) | Changes the name of a node group.
@type group: string
@param group: Node group name
@type new_name: string
@param new_name: New node group name
@rtype: int
@return: job id |
def AssignGroupNodes(r, group, nodes, force=False, dry_run=False):
query = {
"force": force,
"dry-run": dry_run,
}
body = {
"nodes": nodes,
}
return r.request("put", "/2/groups/%s/assign-nodes" % group, query=query,
content=body) | Assigns nodes to a group.
@type group: string
@param group: Node gropu name
@type nodes: list of strings
@param nodes: List of nodes to assign to the group
@rtype: int
@return: job id |
def AddGroupTags(r, group, tags, dry_run=False):
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("put", "/2/groups/%s/tags" % group, query=query) | Adds tags to a node group.
@type group: str
@param group: group to add tags to
@type tags: list of string
@param tags: tags to add to the group
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: string
@return: job id |
def DeleteGroupTags(r, group, tags, dry_run=False):
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("delete", "/2/groups/%s/tags" % group, query=query) | Deletes tags from a node group.
@type group: str
@param group: group to delete tags from
@type tags: list of string
@param tags: tags to delete
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: string
@return: job id |
def Query(r, what, fields, qfilter=None):
body = {
"fields": fields,
}
if qfilter is not None:
body["qfilter"] = body["filter"] = qfilter
return r.request("put", "/2/query/%s" % what, content=body) | Retrieves information about resources.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@type qfilter: None or list
@param qfilter: Query filter
@rtype: string
@return: job id |
def QueryFields(r, what, fields=None):
query = {}
if fields is not None:
query["fields"] = ",".join(fields)
return r.request("get", "/2/query/%s/fields" % what, query=query) | Retrieves available fields for a resource.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@rtype: string
@return: job id |
def createalphabet(alphabetinput=None):
if alphabetinput and os.path.isfile(alphabetinput):
return _load_alphabet(alphabetinput)
elif alphabetinput:
alpha = []
setlist = alphabetinput.split(',')
for alphaset in setlist:
a = int(alphaset.split('-')[0])
b = int(alphaset.split('-')[1])
for i in range(a, b):
alpha.append(str(unichr(i)))
return alpha
alpha = []
for i in range(32, 127):
alpha.append(str(unichr(i)))
return alpha | Creates a sample alphabet containing printable ASCII characters |
def _instant_search(self):
_keys = []
for k,v in self.searchables.iteritems():
if self.string in v:
_keys.append(k)
self.candidates.append(_keys) | Determine possible keys after a push or pop |
def best_guess(self):
best_guess_ever = (0, 0) # (key, string)
points = defaultdict(float)
points[0] = 0
if len(self.string) > 0:
for key in self.candidate_keys:
guess = self.searchables[key]
if guess == self.string:
points[key] += 100
break
# skip, entry longer then guess
if len(self.string) > len(guess):
continue
# begins with
if guess.startswith(self.string):
points[key] += 1
# contained in
if self.string in guess:
points[key] += 1
# percentage of user search string in best guess
if points[key] > 0:
points[key] += float(len(self.string))/len(guess)
for k,v in points.iteritems():
if points[best_guess_ever[0]] < points[k]:
best_guess_ever = (k, self.searchables[k])
return best_guess_ever | Return the gnomekeyring position of the closest matching |
def find_html_files(self, destination):
for root, dirs, files in os.walk(destination):
for f in files:
if f.endswith('.html'):
yield os.path.join(root, f) | Finds all html files in the given destination. |
def minify_file(self, target):
html = open(target, 'rb').read()
enc = chardet.detect(html)['encoding']
with codecs.open(target, 'r+', enc) as f:
result = htmlmin.minify(f.read(), **self.options)
f.seek(0)
f.write(result)
f.truncate() | Minifies the target html file. |
def on_after_build_all(self, builder, **extra):
# NOTE(vesuvium): compatibility for lektor 2.X and 3.X
try:
is_enabled = self.is_enabled(builder.build_flags)
except AttributeError:
is_enabled = self.is_enabled(builder.extra_flags)
if not is_enabled:
return
reporter.report_generic('Starting HTML minification')
for htmlfile in self.find_html_files(builder.destination_path):
self.minify_file(htmlfile)
reporter.report_generic('HTML minification finished') | after-build-all lektor event |
def raw(self, channel=1):
self.waitOPC()
self.write('COMM_FORMAT DEF9,WORD,BIN')
self.write('C%u:WAVEFORM?' % channel)
return self.read_raw() | Reads the raw input from the oscilloscope.
Parameters
----------
channel : int
channel number of read
Returns
-------
rawData : bytes
raw binary data read from the oscilloscope |
def features(entrystream, type=None, traverse=False):
for feature in entry_type_filter(entrystream, tag.Feature):
if traverse:
if type is None:
message = 'cannot traverse without a specific feature type'
raise ValueError(message)
if type == feature.type:
yield feature
else:
for subfeature in feature:
if type == subfeature.type:
yield subfeature
else:
if not type or type == feature.type:
yield feature | Pull features out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only features of the specified type; set to
:code:`None` to retrieve all features
:param traverse: by default, only top-level features are selected; set
to :code:`True` to search each feature graph for the
specified feature type |
def window(featurestream, seqid, start=None, end=None, strict=True):
region = None
if start and end:
region = tag.Range(start, end)
for feature in featurestream:
if feature.seqid != seqid:
continue
if region:
if strict:
if region.contains(feature._range):
yield feature
else:
if region.overlap(feature._range):
yield feature
else:
yield feature | Pull features out of the designated genomic interval.
This function uses 0-based half-open intervals, not the 1-based closed
intervals used by GFF3.
:param featurestream: a stream of feature entries
:param seqid: ID of the sequence from which to select features
:param start: start of the genomic interval
:param end: end of the genomic interval
:param strict: when set to :code:`True`, only features completely contained
within the interval are selected; when set to :code:`False`,
any feature overlapping the interval is selected |
def directives(entrystream, type=None):
for directive in entry_type_filter(entrystream, tag.Directive):
if not type or type == directive.type:
yield directive | Pull directives out of the specified entry stream.
:param entrystream: a stream of entries
:param type: retrieve only directives of the specified type; set to
:code:`None` to retrieve all directives |
def validate_driver(f):
def check_driver(request):
drivers = get_all_driver()
drivers = filter(drivers, request)
if drivers:
return f(request, drivers)
else:
raise Exception('Driver is not found')
return check_driver | Check driver on |
def cli(ctx, stage, port):
if not ctx.bubble:
ctx.say_yellow('There is no bubble present, will not listen')
raise click.Abort()
gbc = ctx.gbc
WEB = None
if stage in STAGES:
STAGE = ctx.cfg.CFG[stage]
if 'SERVER' in STAGE:
SERVER=STAGE.SERVER
if 'WEB' in SERVER:
WEB=SERVER.WEB
if not WEB:
ctx.say_red('There is no SERVER.WEB in stage:' + stage)
ctx.say_yellow('please check configuration in ' +
ctx.home + '/config/config.yaml')
raise click.Abort()
web_server = get_server(gbc, WEB, ctx.home)
try:
# TODO: bg &
# src_listening = web_server.start_web(ctx=gbc,
web_server.start_web(ctx=gbc,
port=port,
stage=stage)
except Exception as e:
ctx.say_red(
'cannot start web server e ' + WEB)
ctx.say_red(str(e))
raise click.Abort('cannot listen') | Web interface(experimental). |
def get_plural_tag_index(number, locale):
locale = Locale.parse(locale)
plural_rule = locale.plural_form
used_tags = plural_rule.tags | set([_fallback_tag])
tag, index = plural_rule(number), 0
for _tag in _plural_tags:
if _tag == tag:
return index
if _tag in used_tags:
index += 1 | Gets the plural tag index of a number on the plural rule of a locale::
>>> get_plural_tag_index(1, 'en_US')
0
>>> get_plural_tag_index(2, 'en_US')
1
>>> get_plural_tag_index(100, 'en_US')
1 |
def strings_to_(strings: Iterable[str], f: Callable) -> Iterable[Any]:
if not all_string_like(strings):
raise TypeError('All have to be strings!')
# ``type(strs)`` is the container of *strs*.
return type(strings)(map(f, strings)) | Convert a list of strings to a list of certain form, specified by *f*.
:param strings: a list of string
:param f: a function that converts your string
:return: type undefined, but specified by `to_type`
.. doctest::
>>> strings_to_(['0.333', '0.667', '0.250'], float)
[0.333, 0.667, 0.25] |
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]:
return strings_to_(strings, lambda x: int(float(x))) | Convert a list of strings to a list of integers.
:param strings: a list of string
:return: a list of converted integers
.. doctest::
>>> strings_to_integers(['1', '1.0', '-0.2'])
[1, 1, 0] |
def string_to_double_precision_float(s: str) -> float:
first, second, exponential = re.match(
"(-?\d*)\.?(-?\d*)d(-?\d+)", s, re.IGNORECASE).groups()
return float(first + '.' + second + 'e' + exponential) | Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_double_precision_float('1d-82')
1e-82
>>> string_to_double_precision_float('1.0D-82')
1e-82
>>> string_to_double_precision_float('0.8D234')
8e+233
>>> string_to_double_precision_float('.8d234')
8e+233 |
def string_to_general_float(s: str) -> float:
if 'D' in s.upper(): # Possible double precision number
try:
return string_to_double_precision_float(s)
except ValueError:
raise ValueError(
"The string '{0}' does not corresponds to a double precision number!".format(s))
else:
return float(s) | Convert a string to corresponding single or double precision scientific number.
:param s: a string could be '0.1', '1e-5', '1.0D-5', or any other validated number
:return: a float or raise an error
.. doctest::
>>> string_to_general_float('1.0D-5')
1e-05
>>> string_to_general_float('1Dx')
Traceback (most recent call last):
...
ValueError: The string '1Dx' does not corresponds to a double precision number!
>>> string_to_general_float('.8d234')
8e+233
>>> string_to_general_float('0.1')
0.1 |
def match_one_string(pattern: str, s: str, *args):
try:
# `match` is either an empty list or a list of string.
match, = re.findall(pattern, s)
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return wrapper(match)
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
except ValueError:
print("Pattern \"{0}\" not found, or more than one found in string {1}!".format(
pattern, s)) | Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead.
:param pattern:
:param s:
:param args:
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def"
>>> match_one_string(p, s, int)
123
>>> print(match_one_string(p, "abc"))
Pattern "\d+" not found, or more than one found in string abc!
None
>>> print(match_one_string(p, "abc 123 def 456"))
Pattern "\d+" not found, or more than one found in string abc 123 def 456!
None |
def match_one_pattern(pattern: str, s: str, *args: Optional[Callable], **flags):
match: Optional[List[str]] = re.findall(pattern, s,
**flags) # `match` is either an empty list or a list of strings.
if match:
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return [wrapper(m) for m in match]
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
else: # If no match is found
print("Pattern \"{0}\" not found in string {1}!".format(pattern, s))
return None | Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no
wrapper is given, return the pure matched string. If no match is found, return None.
:param pattern: a pattern, can be a string or a regular expression
:param s: a string
:param args: at most 1 argument can be given
:param flags: the same flags as ``re.findall``'s
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def 456"
>>> match_one_pattern(p, s)
['123', '456']
>>> match_one_pattern(p, s, int)
[123, 456]
>>> match_one_pattern(p, "abc 123 def")
['123']
>>> print(match_one_pattern('s', 'abc'))
Pattern "s" not found in string abc!
None
>>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE)
['S', 's'] |
def all_string_like(iterable: Iterable[object]) -> bool:
return all(is_string_like(_) for _ in iterable) | If any element of an iterable is not a string, return `True`.
:param iterable: Can be a set, a tuple, a list, etc.
:return: Whether any element of an iterable is not a string.
.. doctest::
>>> all_string_like(['a', 'b', 'c', 3])
False
>>> all_string_like(('a', 'b', 'c', 'd'))
True |
def source_filename(self, docname: str, srcdir: str):
docpath = Path(srcdir, docname)
parent = docpath.parent
imgpath = parent.joinpath(self.filename)
# Does this exist?
if not imgpath.exists():
msg = f'Image does not exist at "{imgpath}"'
raise SphinxError(msg)
return imgpath | Get the full filename to referenced image |
def env_updated(self,
kb_app,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
resource
):
docname = resource.docname
srcdir = sphinx_app.env.srcdir
source_imgpath = self.source_filename(docname, srcdir)
# Copy the image to the Sphinx build directory
build_dir = sphinx_app.outdir
docpath = Path(docname)
parent = docpath.parent
target_imgpath = str(Path(build_dir, parent, self.filename))
# Does the target dir exist yet in the build dir? Probably not. If
# not, make it
target_dir = Path(build_dir, parent)
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(source_imgpath, target_imgpath) | Make images and enter them in Sphinx's output writer |
def catalog(self, table='', column=''):
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
# Show what methods are available.
return self.lookup_methods
return None | Lookup the values available for querying. |
def _resolve_call(self, table, column='', value='', **kwargs):
if not column:
return self.catalog(table)
elif not value:
return self.catalog(table, column)
# We have all the table, column, and value, and now need to
# ensure they're all strings and uppercase.
column = column.upper()
value = str(value).upper()
data = self.call_api(table, column, value, **kwargs)
if isinstance(data, dict):
# Data is actually the first value.
data = data.values()[0]
return data | Internal method to resolve the API wrapper call. |
def call_api(self, table, column, value, **kwargs):
try:
output_format = kwargs.pop('output_format')
except KeyError:
output_format = self.output_format
url_list = [self.base_url, table, column,
quote(value), 'rows']
rows_count = self._number_of_rows(**kwargs)
url_list.append(rows_count)
url_string = '/'.join(url_list)
xml_data = urlopen(url_string).read()
data = self._format_data(output_format, xml_data)
return data | Exposed method to connect and query the EPA's API. |
def _number_of_rows(self, start=0, count=100, **kwargs):
first = str(start)
last = str(start + count)
string_format = ':'.join([first, last])
return string_format | Internal method to format the number of rows the EPA API returns. |
def get_reference(self, rtype: str, label: str):
# We are doing this instead of dictionary access in case we change
# the storage later to a multidict thingy for optimization.
reftype = self.data.get(rtype)
if reftype:
# The reftype might be "python" or "sphinx" or something else
# from an Intersphinx registry, not something internal to
# Kaybee.
return reftype[label] | Return reference filed under rtype/label
The references are organized by field/label, e.g. category/cat1.
This lets us use a shorthand notation to go the resource, e.g.
ref:category:cat1 instead of folder1/folder2/cat1. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.