code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def jenkins(self):
job_name = self.format['jenkins_job_name'].format(**self.data)
job = {'name': job_name}
return job | Generate jenkins job details. |
def gitlab(self):
main_name = self.format['git_repo'].format(**self.data)
qe_name = self.format['git_repo_qe'].format(**self.data)
config_name = self.format['git_repo_configs'].format(**self.data)
git = {
'config': config_name,
'main': main_name,
'qe': qe_name,
}
return git | Generate gitlab details. |
def get_value_matched_by_regex(field_name, regex_matches, string):
try:
value = regex_matches.group(field_name)
if value is not None:
return value
except IndexError:
pass
raise MissingFieldError(string, field_name) | Ensure value stored in regex group exists. |
def positive_int(val):
if isinstance(val, float):
raise ValueError('"{}" must not be a float'.format(val))
val = int(val)
if val >= 0:
return val
raise ValueError('"{}" must be positive'.format(val)) | Parse `val` into a positive integer. |
def strictly_positive_int_or_none(val):
val = positive_int_or_none(val)
if val is None or val > 0:
return val
raise ValueError('"{}" must be strictly positive'.format(val)) | Parse `val` into either `None` or a strictly positive integer. |
def oboTermParser(filepath):
with io.open(filepath) as openfile:
lineIter = iter([i.rstrip() for i in openfile.readlines()])
#Iterate through lines until the first obo "[Term]" is encountered
try:
line = next(lineIter)
while line != '[Term]':
line = next(lineIter)
header = line #Remove
entryLines = list()
except StopIteration:
errorText = 'File does not contain obo "[Term]" entries.'
raise maspy.errors.FileFormatError(errorText)
for line in lineIter:
#Skip empty lines between entries
if not line:
continue
if line == '[Term]':
yield entryLines
header = line #Remove
entryLines = list()
else:
entryLines.append(line)
#Yield last entry
if entryLines:
yield entryLines | Read a obo file and yield '[Term]' entries.
:param filepath: file path of the .obo file
:yields: lists containing all lines from a obo '[Term]' entry. Lines are not
processed and still contain the newline character. |
def _attributeLinesToDict(attributeLines):
attributes = dict()
for line in attributeLines:
attributeId, attributeValue = line.split(':', 1)
attributes[attributeId.strip()] = attributeValue.strip()
return attributes | Converts a list of obo 'Term' lines to a dictionary.
:param attributeLines: a list of obo 'Term' lines. Each line contains a key
and a value part which are separated by a ':'.
:return: a dictionary containing the attributes of an obo 'Term' entry.
NOTE: Some attributes can occur multiple times in one single term, for
example 'is_a' or 'relationship'. However, currently only the last
occurence is stored. |
def _termIsObsolete(oboTerm):
isObsolete = False
if u'is_obsolete' in oboTerm:
if oboTerm[u'is_obsolete'].lower() == u'true':
isObsolete = True
return isObsolete | Determine wheter an obo 'Term' entry is marked as obsolete.
:param oboTerm: a dictionary as return by
:func:`maspy.ontology._attributeLinesToDict()`
:return: bool |
def load(self, filepath):
for attributeLines in oboTermParser(filepath):
oboTerm = _attributeLinesToDict(attributeLines)
if oboTerm['id'] not in self.oboTerms:
self.oboTerms[oboTerm['id']] = oboTerm
else:
oldOboTerm = self.oboTerms[oboTerm['id']]
oldTermIsObsolete = _termIsObsolete(oldOboTerm)
newTermIsObsolete = _termIsObsolete(oboTerm)
if oldTermIsObsolete and not newTermIsObsolete:
self.oboTerms[oboTerm['id']] = oboTerm
else:
#At least one of two terms with identical id must be obsolete
assert oldTermIsObsolete or newTermIsObsolete | Import '[Term]' entries from an .obo file. |
def discover_handler_classes(handlers_package):
if handlers_package is None:
return
# Add working directory into PYTHONPATH to import developer packages
sys.path.insert(0, os.getcwd())
package = import_module(handlers_package)
# Continue searching for module if package is not a module
if hasattr(package, '__path__'):
for _, modname, _ in pkgutil.iter_modules(package.__path__):
import_module('{package}.{module}'.format(package=package.__name__, module=modname))
return registered_handlers | Looks for handler classes within handler path module.
Currently it's not looking deep into nested module.
:param handlers_package: module path to handlers
:type handlers_package: string
:return: list of handler classes |
def request(self, method, path, query=None, content=None):
if not path.startswith("/"):
raise ClientError("Implementation error: Called with bad path %s"
% path)
body = None
if content is not None:
data = self._json_encoder.encode(content)
body = StringProducer(data)
url = self._base_url + path
if query:
prepare_query(query)
params = urlencode(query, doseq=True)
url += "?%s" % params
log.msg("Sending request to %s %s %s" % (url, self.headers, body),
system="Gentleman")
d = self._agent.request(method, url, headers=self.headers,
bodyProducer=body)
protocol = JsonResponseProtocol(d)
@d.addErrback
def connectionFailed(failure):
failure.trap(ConnectionRefusedError)
raise GanetiApiError("Connection refused!")
@d.addCallback
def cb(response):
if response.code != 200:
raise NotOkayError(code=response.code)
response.deliverBody(protocol)
return protocol.getData() | Sends an HTTP request.
This constructs a full URL, encodes and decodes HTTP bodies, and
handles invalid responses in a pythonic way.
@type method: string
@param method: HTTP method to use
@type path: string
@param path: HTTP URL path
@type query: list of two-tuples
@param query: query arguments to pass to urllib.urlencode
@type content: str or None
@param content: HTTP body content
@rtype: object
@return: JSON-Decoded response
@raises GanetiApiError: If an invalid response is returned |
def start(self):
version = yield self.request("get", "/version")
if version != 2:
raise GanetiApiError("Can't work with Ganeti RAPI version %d" %
version)
log.msg("Accessing Ganeti RAPI, version %d" % version,
system="Gentleman")
self.version = version
try:
features = yield self.request("get", "/2/features")
except NotOkayError, noe:
if noe.code == 404:
# Okay, let's calm down, this is totally reasonable. Certain
# older Ganeti RAPIs don't have a list of features.
features = []
else:
# No, wait, panic was the correct thing to do.
raise
log.msg("RAPI features: %r" % (features,), system="Gentleman")
self.features = features | Confirm that we may access the target cluster. |
def get_multi_word_keywords(features):
keys = {
'is not': Token(TokenTypes.NOT_EQUAL, 'is not'),
}
return OrderedDict(sorted(list(keys.items()), key=lambda t: len(t[0]), reverse=True)) | This returns an OrderedDict containing the multi word keywords in order of length.
This is so the tokenizer will match the longer matches before the shorter matches |
def start(self):
Server().start(self.options,self.handler_function, self.__class__.component_type) | Start the server and run forever. |
def get_reverse(self):
if self.sort in FLOAT_ATTRIBUTES:
return True
elif self.sort in NONFLOAT_ATTRIBUTES:
return False
else:
raise InvalidSortError(self.sort) | By default, Cable entries are sorted by rating and Broadcast ratings are
sorted by time.
By default, float attributes are sorted from highest to lowest and non-float
attributes are sorted alphabetically (show, net) or chronologically (time). |
def sort_func(self, entry):
key = entry[self.sort]
if self.sort in FLOAT_ATTRIBUTES and not isinstance(key, float):
return 0 # If value is 'n/a' string
elif self.sort == 'time':
return convert_time(key)
elif self.sort == 'date':
return convert_date(key)
return key | Return the key attribute to determine how data is sorted.
Time will need to be converted to 24 hour time.
In instances when float attributes will have an 'n/a' string, return 0. |
def sort_entries(self):
return sorted(self.data, key=self.sort_func, reverse=self.get_reverse()) | Get whether reverse is True or False. Return the sorted data. |
def visible_fields(self):
form_visible_fields = self.form.visible_fields()
if self.render_fields:
fields = self.render_fields
else:
fields = [field.name for field in form_visible_fields]
filtered_fields = [field for field in fields if field not in self.exclude_fields]
return [field for field in form_visible_fields if field.name in filtered_fields] | Returns the reduced set of visible fields to output from the form.
This method respects the provided ``fields`` configuration _and_ exlcudes
all fields from the ``exclude`` configuration.
If no ``fields`` where provided when configuring this fieldset, all visible
fields minus the excluded fields will be returned.
:return: List of bound field instances or empty tuple. |
def get_fieldsets(self, fieldsets=None):
fieldsets = fieldsets or self.fieldsets
if not fieldsets:
raise StopIteration
# Search for primary marker in at least one of the fieldset kwargs.
has_primary = any(fieldset.get('primary') for fieldset in fieldsets)
for fieldset_kwargs in fieldsets:
fieldset_kwargs = copy.deepcopy(fieldset_kwargs)
fieldset_kwargs['form'] = self
if not has_primary:
fieldset_kwargs['primary'] = True
has_primary = True
yield self.get_fieldset(**fieldset_kwargs) | This method returns a generator which yields fieldset instances.
The method uses the optional fieldsets argument to generate fieldsets for.
If no fieldsets argument is passed, the class property ``fieldsets`` is used.
When generating the fieldsets, the method ensures that at least one fielset
will be the primary fieldset which is responsible for rendering the non field
errors and hidden fields.
:param fieldsets: Alternative set of fieldset kwargs. If passed this set is
prevered of the ``fieldsets`` property of the form.
:return: generator which yields fieldset instances. |
def generate_binding_credentials(self, binding):
uri = self.clusters.get(binding.instance.get_cluster(), None)
if not uri:
raise ErrClusterConfig(binding.instance.get_cluster())
# partial credentials
creds = {"username" : self.generate_binding_username(binding),
"password" : pwgen(32, symbols=False),
"database" : binding.instance.get_dbname()}
# uri
uri = uri % (
creds["username"],
creds["password"],
creds["database"])
creds["uri"] = uri
# return creds
return creds | Generate binding credentials
This function will permit to define the configuration to
connect to the instance.
Those credentials will be stored on a secret and exposed to a a Pod.
We should at least returns the 'username' and 'password'.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
dict: All credentials and secrets.
Raises:
ErrClusterConfig: Connection string to the cluster is not available. |
def generate_binding_permissions(self, binding, permissions):
permissions.add_roles(binding.instance.get_dbname(),
[RoleSpecs.dbAdmin,
RoleSpecs.readWrite])
return permissions | Generate Users pemissions on the database
Defining roles to the database for the users.
We can pass extra information into parameters of the binding if needed (see binding.parameters).
Args:
binding (AtlasServiceBinding.Binding): A binding
permissions (atlasapi.specs.DatabaseUsersPermissionsSpecs): Permissions for Atlas
Returns:
atlasapi.specs.DatabaseUsersPermissionsSpecs: Permissions for the new user |
def get_chunks(Array, Chunksize):
for i in range(0, len(Array), Chunksize):
yield Array[i:i + Chunksize] | Generator that yields chunks of size ChunkSize |
def read_data_from_bin_file(fileName):
with open(fileName, mode='rb') as file: # b is important -> binary
fileContent = file.read()
(ChannelData, LenOf1Channel,
NumOfChannels, SampleTime) = read_data_from_bytes(fileContent)
return ChannelData, LenOf1Channel, NumOfChannels, SampleTime | Loads the binary data stored in the a binary file and extracts the
data for each channel that was saved, along with the sample rate and length
of the data array.
Parameters
----------
fileContent : bytes
bytes object containing the data from a .bin file exported from
the saleae data logger.
Returns
-------
ChannelData : list
List containing a list which contains the data from each channel
LenOf1Channel : int
The length of the data in each channel
NumOfChannels : int
The number of channels saved
SampleTime : float
The time between samples (in seconds)
SampleRate : float
The sample rate (in Hz) |
def read_data_from_bytes(fileContent):
TotalDataLen = struct.unpack('Q', fileContent[:8])[0] # Unsigned long long
NumOfChannels = struct.unpack('I', fileContent[8:12])[0] # unsigned Long
SampleTime = struct.unpack('d', fileContent[12:20])[0]
AllChannelData = struct.unpack("f" * ((len(fileContent) -20) // 4), fileContent[20:])
# ignore the heading bytes (= 20)
# The remaining part forms the body, to know the number of bytes in the body do an integer division by 4 (since 4 bytes = 32 bits = sizeof(float)
LenOf1Channel = int(TotalDataLen/NumOfChannels)
ChannelData = list(get_chunks(AllChannelData, LenOf1Channel))
return ChannelData, LenOf1Channel, NumOfChannels, SampleTime | Takes the binary data stored in the binary string provided and extracts the
data for each channel that was saved, along with the sample rate and length
of the data array.
Parameters
----------
fileContent : bytes
bytes object containing the data from a .bin file exported from
the saleae data logger.
Returns
-------
ChannelData : list
List containing a list which contains the data from each channel
LenOf1Channel : int
The length of the data in each channel
NumOfChannels : int
The number of channels saved
SampleTime : float
The time between samples (in seconds)
SampleRate : float
The sample rate (in Hz) |
def interpret_waveform(fileContent, RelativeChannelNo):
(ChannelData, LenOf1Channel,
NumOfChannels, SampleTime) = read_data_from_bytes(fileContent)
if RelativeChannelNo > NumOfChannels-1:
raise ValueError("There are {} channels saved, you attempted to read relative channel number {}. Pick a relative channel number between {} and {}".format(NumOfChannels, RelativeChannelNo, 0, NumOfChannels-1))
data = ChannelData[RelativeChannelNo]
del(ChannelData)
time = _np.arange(0, SampleTime*LenOf1Channel, SampleTime)
return (0,SampleTime*LenOf1Channel,SampleTime), data | Extracts the data for just 1 channel and computes the corresponding
time array (in seconds) starting from 0.
Important Note: RelativeChannelNo is NOT the channel number on the Saleae data logger
it is the relative number of the channel that was saved. E.g. if you
save channels 3, 7 and 10, the corresponding RelativeChannelNos would
be 0, 1 and 2.
Parameters
----------
fileContent : bytes
bytes object containing the data from a .bin file exported from
the saleae data logger.
RelativeChannelNo : int
The relative order/position of the channel number in the saved
binary file. See Important Note above!
Returns
-------
time : ndarray
A generated time array corresponding to the data list
Data : list
The data from the relative channel requested
SampleTime : float
The time between samples (in seconds) |
def getApi():
api = Blueprint('health', __name__, url_prefix='/')
@api.route('health', methods=['GET'])
def health():
'''Health check'''
return jsonify({ "status" : True})
return api | Get Api for /health
Returns:
Blueprint: section for healt check |
def get_coord_box(centre_x, centre_y, distance):
"""Todo: return coordinates inside a circle, rather than a square"""
return {
'top_left': (centre_x - distance, centre_y + distance),
'top_right': (centre_x + distance, centre_y + distance),
'bottom_left': (centre_x - distance, centre_y - distance),
'bottom_right': (centre_x + distance, centre_y - distance),
} | Get the square boundary coordinates for a given centre and distance |
def fleet_ttb(unit_type, quantity, factories, is_techno=False, is_dict=False, stasis_enabled=False):
unit_weights = {
UNIT_SCOUT: 1,
UNIT_DESTROYER: 13,
UNIT_BOMBER: 10,
UNIT_CRUISER: 85,
UNIT_STARBASE: 1,
}
govt_weight = 80 if is_dict else 100
prod_weight = 85 if is_techno else 100
weighted_qty = unit_weights[unit_type] * quantity
ttb = (weighted_qty * govt_weight * prod_weight) * (2 * factories)
# TTB is 66% longer with stasis enabled
return ttb + (ttb * 0.66) if stasis_enabled else ttb | Calculate the time taken to construct a given fleet |
def parse_fasta(data): # pragma: no cover
name, seq = None, []
for line in data:
line = line.rstrip()
if line.startswith('>'):
if name:
yield Sequence(name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield Sequence(name, ''.join(seq)) | Load sequences in Fasta format.
This generator function yields a Sequence object for each sequence record
in a GFF3 file. Implementation stolen shamelessly from
http://stackoverflow.com/a/7655072/459780. |
def _resolve_features(self):
for parentid in self.featsbyparent:
parent = self.featsbyid[parentid]
for child in self.featsbyparent[parentid]:
parent.add_child(child, rangecheck=self.strict)
# Replace top-level multi-feature reps with a pseudo-feature
for n, record in enumerate(self.records):
if not isinstance(record, Feature):
continue
if not record.is_multi:
continue
assert record.multi_rep == record
newrep = sorted(record.siblings + [record])[0]
if newrep != record:
for sib in sorted(record.siblings + [record]):
sib.multi_rep = newrep
if sib != newrep:
newrep.add_sibling(sib)
record.siblings = None
parent = newrep.pseudoify()
self.records[n] = parent
if not self.assumesorted:
for seqid in self.inferred_regions:
if seqid not in self.declared_regions:
seqrange = self.inferred_regions[seqid]
srstring = '##sequence-region {:s} {:d} {:d}'.format(
seqid, seqrange.start + 1, seqrange.end
)
seqregion = Directive(srstring)
self.records.append(seqregion)
for record in sorted(self.records):
yield record
self._reset() | Resolve Parent/ID relationships and yield all top-level features. |
def _reset(self):
self.records = list()
self.featsbyid = dict()
self.featsbyparent = dict()
self.countsbytype = dict() | Clear internal data structure. |
def get_by_label(self, label):
return next((x for x in self if x.label == label), None) | Return the first item with a specific label,
or None. |
def getGenericAnswers(self, name, instruction, prompts):
responses = []
for prompt, _echo in prompts:
password = self.getPassword(prompt)
responses.append(password)
return defer.succeed(responses) | Called when the server requests keyboard interactive authentication |
def pairwise(iterable):
iterator = iter(iterable)
try:
first = next(iterator)
except StopIteration:
return
for element in iterator:
yield first, element
first = element | Generate consecutive pairs of elements from the given iterable. |
def pick_cert_for_twisted(netloc, possible):
try:
creds = possible[netloc]
except KeyError:
return (None, ())
key = ssl.KeyPair.load(creds.key.as_bytes(), FILETYPE_PEM)
return (
ssl.PrivateCertificate.load(
creds.chain.certificates[0].as_bytes(), key, FILETYPE_PEM,
),
tuple(
ssl.Certificate.load(cert.as_bytes(), FILETYPE_PEM)
for cert
in creds.chain.certificates[1:]
),
) | Pick the right client key/certificate to use for the given server and
return it in the form Twisted wants.
:param NetLocation netloc: The location of the server to consider.
:param dict[TLSCredentials] possible: The available credentials from which
to choose.
:return: A two-tuple. If no credentials were found, the elements are
``None`` and ``[]``. Otherwise, the first element is a
``twisted.internet.ssl.PrivateCertificate`` instance representing the
client certificate to use and the second element is a ``tuple`` of
``twisted.internet.ssl.Certificate`` instances representing the rest
of the chain necessary to validate the client certificate. |
def pick_trust_for_twisted(netloc, possible):
try:
trust_cert = possible[netloc]
except KeyError:
return None
cert = ssl.Certificate.load(trust_cert.as_bytes(), FILETYPE_PEM)
return ssl.trustRootFromCertificates([cert]) | Pick the right "trust roots" (certificate authority certificates) for the
given server and return it in the form Twisted wants.
Kubernetes certificates are often self-signed or otherwise exist outside
of the typical certificate authority cartel system common for normal
websites. This function tries to find the right authority to use.
:param NetLocation netloc: The location of the server to consider.
:param dict[pem.Certificate] possible: The available certificate authority
certificates from which to choose.
:return: A provider of ``twisted.internet.interfaces.IOpenSSLTrustRoot``
if there is a known certificate authority certificate for the given
server. Otherwise, ``None``. |
def https_policy_from_config(config):
server = config.cluster["server"]
base_url = URL.fromText(native_string_to_unicode(server))
ca_certs = pem.parse(config.cluster["certificate-authority"].bytes())
if not ca_certs:
raise ValueError("No certificate authority certificate found.")
ca_cert = ca_certs[0]
try:
# Validate the certificate so we have early failures for garbage data.
ssl.Certificate.load(ca_cert.as_bytes(), FILETYPE_PEM)
except OpenSSLError as e:
raise ValueError(
"Invalid certificate authority certificate found.",
str(e),
)
netloc = NetLocation(host=base_url.host, port=base_url.port)
policy = ClientCertificatePolicyForHTTPS(
credentials={},
trust_roots={
netloc: ca_cert,
},
)
return policy | Create an ``IPolicyForHTTPS`` which can authenticate a Kubernetes API
server.
:param KubeConfig config: A Kubernetes configuration containing an active
context identifying a cluster. The resulting ``IPolicyForHTTPS`` will
authenticate the API server for that cluster.
:return IPolicyForHTTPS: A TLS context which requires server certificates
signed by the certificate authority certificate associated with the
active context's cluster. |
def authenticate_with_certificate_chain(reactor, base_url, client_chain, client_key, ca_cert):
if base_url.scheme != u"https":
raise ValueError(
"authenticate_with_certificate() makes sense for HTTPS, not {!r}".format(
base_url.scheme
),
)
netloc = NetLocation(host=base_url.host, port=base_url.port)
policy = ClientCertificatePolicyForHTTPS(
credentials={
netloc: TLSCredentials(
chain=Chain(certificates=Certificates(client_chain)),
key=client_key,
),
},
trust_roots={
netloc: ca_cert,
},
)
return Agent(reactor, contextFactory=policy) | Create an ``IAgent`` which can issue authenticated requests to a
particular Kubernetes server using a client certificate.
:param reactor: The reactor with which to configure the resulting agent.
:param twisted.python.url.URL base_url: The base location of the
Kubernetes API.
:param list[pem.Certificate] client_chain: The client certificate (and
chain, if applicable) to use.
:param pem.Key client_key: The private key to use with the client
certificate.
:param pem.Certificate ca_cert: The certificate authority to respect when
verifying the Kubernetes server certificate.
:return IAgent: An agent which will authenticate itself to a particular
Kubernetes server and which will verify that server or refuse to
interact with it. |
def authenticate_with_certificate(reactor, base_url, client_cert, client_key, ca_cert):
return authenticate_with_certificate_chain(
reactor, base_url, [client_cert], client_key, ca_cert,
) | See ``authenticate_with_certificate_chain``.
:param pem.Certificate client_cert: The client certificate to use. |
def authenticate_with_serviceaccount(reactor, **kw):
config = KubeConfig.from_service_account(**kw)
policy = https_policy_from_config(config)
token = config.user["token"]
agent = HeaderInjectingAgent(
_to_inject=Headers({u"authorization": [u"Bearer {}".format(token)]}),
_agent=Agent(reactor, contextFactory=policy),
)
return agent | Create an ``IAgent`` which can issue authenticated requests to a
particular Kubernetes server using a service account token.
:param reactor: The reactor with which to configure the resulting agent.
:param bytes path: The location of the service account directory. The
default should work fine for normal use within a container.
:return IAgent: An agent which will authenticate itself to a particular
Kubernetes server and which will verify that server or refuse to
interact with it. |
def first_time_setup(self):
if not self._auto_unlock_key_position():
pw = password.create_passwords()[0]
attrs = {'application': self.keyring}
gkr.item_create_sync(self.default_keyring
,gkr.ITEM_GENERIC_SECRET
,self.keyring
,attrs
,pw
,True)
found_pos = self._auto_unlock_key_position()
item_info = gkr.item_get_info_sync(self.default_keyring, found_pos)
gkr.create_sync(self.keyring, item_info.get_secret()) | First time running Open Sesame?
Create keyring and an auto-unlock key in default keyring. Make sure
these things don't already exist. |
def _auto_unlock_key_position(self):
found_pos = None
default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring)
for pos in default_keyring_ids:
item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos)
app = 'application'
if item_attrs.has_key(app) and item_attrs[app] == "opensesame":
found_pos = pos
break
return found_pos | Find the open sesame password in the default keyring |
def get_position_searchable(self):
ids = gkr.list_item_ids_sync(self.keyring)
position_searchable = {}
for i in ids:
item_attrs = gkr.item_get_attributes_sync(self.keyring, i)
position_searchable[i] = item_attrs['searchable']
return position_searchable | Return dict of the position and corrasponding searchable str |
def _match_exists(self, searchable):
position_searchable = self.get_position_searchable()
for pos,val in position_searchable.iteritems():
if val == searchable:
return pos
return False | Make sure the searchable description doesn't already exist |
def save_password(self, password, **attrs):
pos_of_match = self._match_exists(attrs['searchable'])
if pos_of_match:
old_password = self.get_password(pos_of_match).get_secret()
gkr.item_delete_sync(self.keyring, pos_of_match)
desc = str(int(time.time())) + "_" + attrs['searchable']
gkr.item_create_sync(self.keyring
,gkr.ITEM_GENERIC_SECRET
,desc
,{}
,old_password
,True)
desc = attrs['searchable']
pos = gkr.item_create_sync(self.keyring
,gkr.ITEM_GENERIC_SECRET
,desc
,attrs
,password
,True)
return pos | Save the new password, save the old password with the date prepended |
def parse_url(self):
url = urlparse(self.url).path
# handle git
url = url.split('.git')[0]
if ':' in url:
url = url.split(':')[1]
# Ony capture last two list items
try:
project, repo = url.split('/')[-2:]
except ValueError:
raise ParserError('"{}" is not a valid repository URL.'.format(self.url))
return project, repo | Parse a git/ssh/http(s) url. |
def _fetch_dimensions(self, dataset):
yield Dimension(u"school")
yield Dimension(u"year",
datatype="year")
yield Dimension(u"semester",
datatype="academic_term",
dialect="swedish") # HT/VT
yield Dimension(u"municipality",
datatype="year",
domain="sweden/municipalities") | Iterate through semesters, counties and municipalities. |
def _merge_configs(configs):
result = {
u"contexts": [],
u"users": [],
u"clusters": [],
u"current-context": None,
}
for config in configs:
for k in {u"contexts", u"users", u"clusters"}:
try:
values = config.doc[k]
except KeyError:
pass
else:
result[k].extend(values)
if result[u"current-context"] is None:
try:
result[u"current-context"] = config.doc[u"current-context"]
except KeyError:
pass
return KubeConfig(result) | Merge one or more ``KubeConfig`` objects.
:param list[KubeConfig] configs: The configurations to merge.
:return KubeConfig: A single configuration object with the merged
configuration. |
def _merge_configs_from_env(kubeconfigs):
paths = list(
FilePath(p)
for p
in kubeconfigs.split(pathsep)
if p
)
config = _merge_configs(list(
KubeConfig.from_file(p.path)
for p
in paths
))
return config | Merge configuration files from a ``KUBECONFIG`` environment variable.
:param bytes kubeconfigs: A value like the one given to ``KUBECONFIG`` to
specify multiple configuration files.
:return KubeConfig: A configuration object which has merged all of the
configuration from the specified configuration files. Merging is
performed according to
https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/#merging-kubeconfig-files |
def network_kubernetes_from_context(
reactor, context=None, path=None, environ=None,
default_config_path=FilePath(expanduser(u"~/.kube/config")),
):
if path is None:
if environ is None:
from os import environ
try:
kubeconfigs = environ[u"KUBECONFIG"]
except KeyError:
config = KubeConfig.from_file(default_config_path.path)
else:
config = _merge_configs_from_env(kubeconfigs)
else:
config = KubeConfig.from_file(path.path)
if context is None:
context = config.doc[u"current-context"]
context = config.contexts[context]
cluster = config.clusters[context[u"cluster"]]
user = config.users[context[u"user"]]
if isinstance(cluster[u"server"], bytes):
base_url = URL.fromText(cluster[u"server"].decode("ascii"))
else:
base_url = URL.fromText(cluster[u"server"])
[ca_cert] = parse(cluster[u"certificate-authority"].bytes())
client_chain = parse(user[u"client-certificate"].bytes())
[client_key] = parse(user[u"client-key"].bytes())
agent = authenticate_with_certificate_chain(
reactor, base_url, client_chain, client_key, ca_cert,
)
return network_kubernetes(
base_url=base_url,
agent=agent,
) | Create a new ``IKubernetes`` provider based on a kube config file.
:param reactor: A Twisted reactor which will be used for I/O and
scheduling.
:param unicode context: The name of the kube config context from which to
load configuration details. Or, ``None`` to respect the current
context setting from the configuration.
:param FilePath path: The location of the kube config file to use.
:param dict environ: A environment direction in which to look up
``KUBECONFIG``. If ``None``, the real process environment will be
inspected. This is used only if ``path`` is ``None``.
:return IKubernetes: The Kubernetes service described by the named
context. |
def collection_location(obj):
# TODO kind is not part of IObjectLoader and we should really be loading
# apiVersion off of this object too.
kind = obj.kind
apiVersion = obj.apiVersion
prefix = version_to_segments[apiVersion]
collection = kind.lower() + u"s"
if IObject.providedBy(obj):
# Actual objects *could* have a namespace...
namespace = obj.metadata.namespace
else:
# Types representing a kind couldn't possible.
namespace = None
if namespace is None:
# If there's no namespace, look in the un-namespaced area.
return prefix + (collection,)
# If there is, great, look there.
return prefix + (u"namespaces", namespace, collection) | Get the URL for the collection of objects like ``obj``.
:param obj: Either a type representing a Kubernetes object kind or an
instance of such a type.
:return tuple[unicode]: Some path segments to stick on to a base URL to
construct the location of the collection of objects like the one
given. |
def enter_new_scope(ctx):
ctx = ctx.clone()
ctx.waiting_for = ctx.compiled_story().children_matcher()
return ctx | we inside new scope with it onw
:param ctx:
:return: |
def iterate_storyline(ctx):
logger.debug('# start iterate')
compiled_story = ctx.compiled_story()
if not compiled_story:
return
for step in range(ctx.current_step(),
len(compiled_story.story_line)):
ctx = ctx.clone()
tail = ctx.stack_tail()
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': tail['data'],
'step': step,
'topic': tail['topic'],
}])
logger.debug('# [{}] iterate'.format(step))
logger.debug(ctx)
ctx = yield ctx | iterate the last storyline from the last visited story part
:param ctx:
:return: |
def scope_in(ctx):
logger.debug('# scope_in')
logger.debug(ctx)
ctx = ctx.clone()
compiled_story = None
if not ctx.is_empty_stack():
compiled_story = ctx.get_child_story()
logger.debug('# child')
logger.debug(compiled_story)
# we match child story loop once by message
# what should prevent multiple matching by the same message
ctx.matched = True
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': matchers.serialize(callable.WaitForReturn()),
'step': stack[-1]['step'],
'topic': stack[-1]['topic']
}])
try:
if not compiled_story and ctx.is_scope_level_part():
compiled_story = ctx.get_current_story_part()
except story_context.MissedStoryPart:
pass
if not compiled_story:
compiled_story = ctx.compiled_story()
logger.debug('# [>] going deeper')
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack + [
stack_utils.build_empty_stack_item(compiled_story.topic)])
logger.debug(ctx)
return ctx | - build new scope on the top of stack
- and current scope will wait for it result
:param ctx:
:return: |
def scope_out(ctx):
logger.debug('# scope_out')
logger.debug(ctx)
# we reach the end of story line
# so we could collapse previous scope and related stack item
if ctx.is_tail_of_story() and ctx.could_scope_out():
logger.debug('# [<] return')
ctx = ctx.clone()
ctx.message['session']['stack'] = ctx.message['session']['stack'][:-1]
if not ctx.is_empty_stack() and \
(ctx.is_scope_level_part() or \
ctx.is_breaking_a_loop()):
# isinstance(ctx.get_current_story_part(), loop.StoriesLoopNode) and \
# isinstance(ctx.waiting_for, callable.EndOfStory) or \
ctx.message = modify_stack_in_message(ctx.message,
lambda stack: stack[:-1] + [{
'data': stack[-1]['data'],
'step': stack[-1]['step'] + 1,
'topic': stack[-1]['topic'],
}])
if ctx.is_breaking_a_loop() and not ctx.is_scope_level():
ctx.waiting_for = None
logger.debug(ctx)
return ctx | drop last stack item if:
- we have reach the end of stack
- and don't wait any input
:param ctx:
:return: |
def str2date(self, date_str):
# try default date template
try:
a_datetime = datetime.strptime(
date_str, self._default_date_template)
return a_datetime.date()
except:
pass
# try every date templates
for template in date_template_list:
try:
a_datetime = datetime.strptime(date_str, template)
self._default_date_template = template
return a_datetime.date()
except:
pass
# raise error
raise ValueError("Unable to parse date from: %r!" % date_str) | Parse date from string.
If there's no template matches your string, Please go
https://github.com/MacHu-GWU/rolex-project/issues
submit your datetime string. I 'll update templates ASAP.
This method is faster than :meth:`dateutil.parser.parse`.
:param date_str: a string represent a date
:type date_str: str
:return: a date object
**中文文档**
从string解析date。首先尝试默认模板, 如果失败了, 则尝试所有的模板。
一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的
字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。
该方法要快过 :meth:`dateutil.parser.parse` 方法。 |
def _str2datetime(self, datetime_str):
# try default datetime template
try:
a_datetime = datetime.strptime(
datetime_str, self._default_datetime_template)
return a_datetime
except:
pass
# try every datetime templates
for template in datetime_template_list:
try:
a_datetime = datetime.strptime(datetime_str, template)
self._default_datetime_template = template
return a_datetime
except:
pass
# raise error
a_datetime = parse(datetime_str)
self.str2datetime = parse
return a_datetime | Parse datetime from string.
If there's no template matches your string, Please go
https://github.com/MacHu-GWU/rolex-project/issues
submit your datetime string. I 'll update templates ASAP.
This method is faster than :meth:`dateutil.parser.parse`.
:param datetime_str: a string represent a datetime
:type datetime_str: str
:return: a datetime object
**中文文档**
从string解析datetime。首先尝试默认模板, 如果失败了, 则尝试所有的模板。
一旦尝试成功, 就将当前成功的模板保存为默认模板。这样做在当你待解析的
字符串非常多, 且模式单一时, 只有第一次尝试耗时较多, 之后就非常快了。
该方法要快过 :meth:`dateutil.parser.parse` 方法。
为了防止模板库失败的情况, 程序设定在失败后自动一直启用
:meth:`dateutil.parser.parse` 进行解析。你可以调用 :meth:`Parser.reset()`
方法恢复默认设定。 |
def parse_date(self, value):
if isinstance(value, sixmini.string_types):
return self.str2date(value)
elif value is None:
raise TypeError("Unable to parse date from %r" % value)
elif isinstance(value, sixmini.integer_types):
return date.fromordinal(value)
elif isinstance(value, datetime):
return value.date()
elif isinstance(value, date):
return value
else:
raise ValueError("Unable to parse date from %r" % value) | A lazy method to parse anything to date.
If input data type is:
- string: parse date from it
- integer: use from ordinal
- datetime: use date part
- date: just return it |
def parse_datetime(self, value):
if isinstance(value, sixmini.string_types):
return self.str2datetime(value)
elif value is None:
raise TypeError("Unable to parse datetime from %r" % value)
elif isinstance(value, sixmini.integer_types):
return from_utctimestamp(value)
elif isinstance(value, float):
return from_utctimestamp(value)
elif isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime(value.year, value.month, value.day)
else:
raise ValueError("Unable to parse datetime from %r" % value) | A lazy method to parse anything to datetime.
If input data type is:
- string: parse datetime from it
- integer: use from ordinal
- date: use date part and set hour, minute, second to zero
- datetime: just return it |
def define(self):
if len(self.states) == 0:
for char in self.alphabet:
self.add_arc(0, 0, char)
self[0].final = False | If DFA is empty, create a sink state |
def add_state(self):
sid = len(self.states)
self.states.append(DFAState(sid))
return sid | Adds a new state |
def add_arc(self, src, dst, char):
# assert type(src) == type(int()) and type(dst) == type(int()), \
# "State type should be integer."
# assert char in self.I
#
#print self.states
#print src
for s_idx in [src, dst]:
if s_idx >= len(self.states):
for i in range(len(self.states), s_idx + 1):
self.states.append(DFAState(i))
for arc in self.states[src].arcs:
if arc.ilabel == self.isyms.__getitem__(char) or char == EPSILON:
self.nfa = True
break
self.states[src].arcs.append(
DFAArc(src, dst, self.isyms.__getitem__(char))) | Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None |
def complement(self, alphabet):
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True | Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None |
def init_from_acceptor(self, acceptor):
self.states = copy.deepcopy(acceptor.states)
self.alphabet = copy.deepcopy(acceptor.alphabet)
self.osyms = copy.deepcopy(acceptor.osyms)
self.isyms = copy.deepcopy(acceptor.isyms) | Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None |
def load(self, txt_fst_file_name):
with open(txt_fst_file_name, 'r') as input_filename:
for line in input_filename:
line = line.strip()
split_line = line.split()
if len(split_line) == 1:
self[int(split_line[0])].final = True
else:
self.add_arc(int(split_line[0]), int(split_line[1]),
split_line[2].decode('hex')) | Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_file_name (str): The input file
Returns:
None |
def intersect(self, other):
operation = bool.__and__
self.cross_product(other, operation)
return self | Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA |
def symmetric_difference(self, other):
operation = bool.__xor__
self.cross_product(other, operation)
return self | Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA |
def union(self, other):
operation = bool.__or__
self.cross_product(other, operation)
return self | Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the union operation
Returns:
DFA: The resulting DFA |
def _epsilon_closure(self, state):
closure = set([state.stateid])
stack = [state]
while True:
if not stack:
break
s = stack.pop()
for arc in s:
if self.isyms.find(arc.ilabel) != EPSILON or \
arc.nextstate in closure:
continue
closure.add(arc.nextstate)
stack.append(self.states[arc.nextstate])
return closure | Returns the \epsilon-closure for the state given as input. |
def invert(self):
for state in self.states:
if state.final:
state.final = False
else:
state.final = True | Inverts the DFA final states |
def as_list(self):
if hasattr(self, 'cust_list'):
return self.cust_list
if hasattr(self, 'attr_check'):
self.attr_check()
cls_bltns = set(dir(self.__class__))
ret = [a for a in dir(self) if a not in cls_bltns and getattr(self, a)]
return ret | returns a list version of the object, based on it's attributes |
def as_dict(self):
if hasattr(self, 'cust_dict'):
return self.cust_dict
if hasattr(self, 'attr_check'):
self.attr_check()
cls_bltns = set(dir(self.__class__))
return {a: getattr(self, a) for a in dir(self) if a not in cls_bltns} | returns an dict version of the object, based on it's attributes |
def as_odict(self):
if hasattr(self, 'cust_odict'):
return self.cust_odict
if hasattr(self, 'attr_check'):
self.attr_check()
odc = odict()
for attr in self.attrorder:
odc[attr] = getattr(self, attr)
return odc | returns an odict version of the object, based on it's attributes |
def fetch_and_parse(url, bodyLines):
pageHtml = fetch_page(url)
return parse(url, pageHtml, bodyLines) | Takes a url, and returns a dictionary of data with 'bodyLines' lines |
def copy_rec(source, dest):
if os.path.isdir(source):
for child in os.listdir(source):
new_dest = os.path.join(dest, child)
os.makedirs(new_dest, exist_ok=True)
copy_rec(os.path.join(source, child), new_dest)
elif os.path.isfile(source):
logging.info(' Copy "{}" to "{}"'.format(source, dest))
shutil.copy(source, dest)
else:
logging.info(' Ignoring "{}"'.format(source)) | Copy files between diferent directories.
Copy one or more files to an existing directory. This function is
recursive, if the source is a directory, all its subdirectories are created
in the destination. Existing files in destination are overwrited without
any warning.
Args:
source (str): File or directory name.
dest (str): Directory name.
Raises:
FileNotFoundError: Destination directory doesn't exist. |
def build(self):
signed = bool(self.options() & Builder.Options.Signed)
# remove previous build information
buildpath = self.buildPath()
if not buildpath:
raise errors.InvalidBuildPath(buildpath)
# setup the environment
for key, value in self.environment().items():
log.info('SET {0}={1}'.format(key, value))
os.environ[key] = value
if os.path.exists(buildpath):
shutil.rmtree(buildpath)
# generate the build path for the installer
os.makedirs(buildpath)
# create the output path
outpath = self.outputPath()
if not os.path.exists(outpath):
os.makedirs(outpath)
# copy license information
src = self.licenseFile()
if src and os.path.exists(src):
targ = os.path.join(buildpath, 'license.txt')
shutil.copyfile(src, targ)
# generate revision information
if self.options() & Builder.Options.GenerateRevision:
self.generateRevision()
# generate documentation information
if self.options() & Builder.Options.GenerateDocs:
self.generateDocumentation(buildpath)
# generate setup file
if self.options() & Builder.Options.GenerateSetupFile:
setuppath = os.path.join(self.sourcePath(), '..')
egg = (self.options() & Builder.Options.GenerateEgg) != 0
self.generateSetupFile(setuppath, egg=egg)
# generate executable information
if self.options() & Builder.Options.GenerateExecutable:
if not self.generateExecutable(signed=signed):
return
# generate zipfile information
if self.options() & Builder.Options.GenerateZipFile:
self.generateZipFile(self.outputPath())
# generate installer information
if self.options() & Builder.Options.GenerateInstaller:
self.generateInstaller(buildpath, signed=signed) | Builds this object into the desired output information. |
def generateRevision(self):
revpath = self.sourcePath()
if not os.path.exists(revpath):
return
# determine the revision location
revfile = os.path.join(revpath, self.revisionFilename())
mode = ''
# test for svn revision
try:
args = ['svn', 'info', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'svn'
except WindowsError:
try:
args = ['git', 'rev-parse', 'HEAD', revpath]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
mode = 'git'
except WindowsError:
return
# process SVN revision
rev = None
if mode == 'svn':
for line in proc.stdout:
data = re.match('^Revision: (\d+)', line)
if data:
rev = int(data.group(1))
break
if rev is not None:
try:
f = open(revfile, 'w')
f.write('__revision__ = {0}\n'.format(rev))
f.close()
except IOError:
pass | Generates the revision file for this builder. |
def generateZipFile(self, outpath='.'):
fname = self.installName() + '.zip'
outfile = os.path.abspath(os.path.join(outpath, fname))
# clears out the exiting archive
if os.path.exists(outfile):
try:
os.remove(outfile)
except OSError:
log.warning('Could not remove zipfile: %s', outfile)
return False
# generate the zip file
zfile = zipfile.ZipFile(outfile, 'w')
# zip up all relavent fields from the code base
if os.path.isfile(self.sourcePath()):
zfile.write(self.sourcePath(), os.path.basename(self.sourcePath()))
else:
basepath = os.path.abspath(os.path.join(self.sourcePath(), '..'))
baselen = len(basepath) + 1
for root, folders, filenames in os.walk(basepath):
# ignore hidden folders
if '.svn' in root or '.git' in root:
continue
# ignore setuptools build info
part = root[baselen:].split(os.path.sep)[0]
if part in ('build', 'dist') or part.endswith('.egg-info'):
continue
# include files
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext in self.ignoreFileTypes():
continue
arcroot = root[baselen:].replace('\\', '/')
arcname = os.path.join(arcroot, filename)
log.info('Archiving %s...', arcname)
zfile.write(os.path.join(root, filename), arcname)
zfile.close()
return True | Generates the zip file for this builder. |
def installName(self):
opts = {'name': self.name(), 'version': self.version()}
if self.revision():
opts['revision'] = '.{0}'.format(self.revision())
else:
opts['revision'] = ''
if self._installName:
return self._installName.format(**opts)
else:
return '{name}-{version}{revision}'.format(**opts) | Returns the name for the installer this builder will generate.
:return <str> |
def licenseFile(self):
if self._licenseFile:
return self._licenseFile
elif self._license:
f = projex.resources.find('licenses/{0}.txt'.format(self.license()))
return f
else:
return '' | Returns the license file for this builder.
:return <str> |
def sign(self, filename):
sign = self.signcmd()
certificate = self.certificate()
if not sign:
log.error('No signcmd defined.')
return False
elif not certificate and '{cert}' in sign:
log.error('No sign certificated defined.')
return False
log.info('Signing {0}...'.format(filename))
sign = os.path.expandvars(sign)
filename = os.path.expandvars(filename)
cert = os.path.expandvars(certificate)
# let the previous process finish fully, or we might get some file errors
time.sleep(2)
return cmdexec(sign.format(filename=filename, cert=cert)) == 0 | Signs the filename with the certificate associated with this builder.
:param filename | <str>
:return <bool> | success |
def plugin(name, module=''):
if module:
mod = projex.importfile(module)
if mod:
return getattr(mod, nstr(name), None)
return Builder._plugins.get(nstr(name)) | Returns the plugin for the given name. By default, the
base Builder instance will be returned.
:param name | <str> |
def register(plugin, name=None):
if name is None:
name = plugin.__name__
Builder._plugins[nstr(name)] = plugin | Registers the given builder as a plugin to the system.
:param plugin | <subclass of PackageBuilder>
name | <str> || None |
def fromXml(cls, xdata, filepath=''):
builder = cls()
builder.loadXml(xdata, filepath=filepath)
return builder | Generates a new builder from the given xml data and then
loads its information.
:param xdata | <xml.etree.ElementTree.Element>
:return <Builder> || None |
def fromYaml(cls, ydata, filepath=''):
builder = cls()
builder.loadYaml(ydata, filepath=filepath)
return builder | Generates a new builder from the given yaml data and then
loads its information.
:param ydata | <dict>
:return <Builder> || None |
def fromFile(filename):
xdata = None
ydata = None
# try parsing an XML file
try:
xdata = ElementTree.parse(filename).getroot()
except StandardError:
xdata = None
if xdata is None:
# try parsing a yaml file
if yaml:
with open(filename, 'r') as f:
text = f.read()
try:
ydata = yaml.load(text)
except StandardError:
return None
else:
log.warning('Could not process yaml builder!')
# load a yaml definition
if type(ydata) == dict:
typ = ydata.get('type')
module = ydata.get('module')
builder = Builder.plugin(typ, module)
if builder:
return builder.fromYaml(ydata, os.path.dirname(filename))
else:
log.warning('Could not find builder: {0}'.format(typ))
# load an xml definition
elif xdata is not None:
typ = xdata.get('type')
module = xdata.get('module')
builder = Builder.plugin(typ, module)
if builder:
return builder.fromXml(xdata, os.path.dirname(filename))
else:
log.warning('Could not find builder: {0}'.format(typ))
return None | Parses the inputted xml file information and generates a builder
for it.
:param filename | <str>
:return <Builder> || None |
def fromXml(cls, xdata, filepath=''):
module = None
pkg_data = xdata.find('package')
if pkg_data is not None:
path = pkg_data.find('path').text
name = pkg_data.find('name').text
if filepath:
path = os.path.join(filepath, path)
path = os.path.abspath(path)
sys.path.insert(0, path)
sys.modules.pop(name, None)
try:
__import__(name)
module = sys.modules[name]
except (ImportError, KeyError):
return None
else:
return None
# generate the builder
builder = cls(module)
builder.loadXml(xdata, filepath=filepath)
return builder | Generates a new builder from the given xml data and then
loads its information.
:param xdata | <xml.etree.ElementTree.Element>
:return <Builder> || None |
def fromYaml(cls, ydata, filepath=''):
module = None
pkg_data = ydata.get('package')
if pkg_data is not None:
path = pkg_data.get('path', '')
name = pkg_data.get('name', '')
if filepath:
path = os.path.join(filepath, path)
path = os.path.abspath(path)
sys.path.insert(0, path)
sys.modules.pop(name, None)
try:
__import__(name)
module = sys.modules[name]
except (ImportError, KeyError):
return None
else:
return None
# generate the builder
builder = cls(module)
builder.loadYaml(ydata, filepath=filepath)
return builder | Generates a new builder from the given xml data and then
loads its information.
:param ydata | <xml.etree.ElementTree.Element>
:return <Builder> || None |
def to_object(item):
def convert(item):
if isinstance(item, dict):
return IterableObject({k: convert(v) for k, v in item.items()})
if isinstance(item, list):
def yield_convert(item):
for index, value in enumerate(item):
yield convert(value)
return list(yield_convert(item))
else:
return item
return convert(item) | Convert a dictionary to an object (recursive). |
def to_dict(item):
def convert(item):
if isinstance(item, IterableObject):
if isinstance(item.source, dict):
return {k: convert(v.source) if hasattr(v, 'source') else convert(v) for k, v in item}
else:
return convert(item.source)
elif isinstance(item, dict):
return {k: convert(v) for k, v in item.items()}
elif isinstance(item, list):
def yield_convert(item):
for index, value in enumerate(item):
yield convert(value)
return list(yield_convert(item))
else:
return item
return convert(item) | Convert an object to a dictionary (recursive). |
def step_undefined_step_snippet_should_exist_for(context, step):
undefined_step_snippet = make_undefined_step_snippet(step)
context.execute_steps(u'''\
Then the command output should contain:
"""
{undefined_step_snippet}
"""
'''.format(undefined_step_snippet=text_indent(undefined_step_snippet, 4))) | Checks if an undefined-step snippet is provided for a step
in behave command output (last command).
EXAMPLE:
Then an undefined-step snippet should exist for "Given an undefined step" |
def step_undefined_step_snippet_should_not_exist_for(context, step):
undefined_step_snippet = make_undefined_step_snippet(step)
context.execute_steps(u'''\
Then the command output should not contain:
"""
{undefined_step_snippet}
"""
'''.format(undefined_step_snippet=text_indent(undefined_step_snippet, 4))) | Checks if an undefined-step snippet is provided for a step
in behave command output (last command). |
def step_undefined_step_snippets_should_exist_for_table(context):
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_exist_for(context, step) | Checks if undefined-step snippets are provided.
EXAMPLE:
Then undefined-step snippets should exist for:
| Step |
| When an undefined step is used |
| Then another undefined step is used | |
def step_undefined_step_snippets_should_not_exist_for_table(context):
assert context.table, "REQUIRES: table"
for row in context.table.rows:
step = row["Step"]
step_undefined_step_snippet_should_not_exist_for(context, step) | Checks if undefined-step snippets are not provided.
EXAMPLE:
Then undefined-step snippets should not exist for:
| Step |
| When an known step is used |
| Then another known step is used | |
async def create_connection(
host,
port,
*,
loop=None,
secure=True,
ssl_context=None,
**kwargs,
):
loop = loop or asyncio.get_event_loop()
secure = True if port == 443 else secure
connection = HTTP2ClientConnection(host, loop=loop, secure=secure)
if not isinstance(ssl_context, SSLContext):
ssl_context = default_ssl_context()
await loop.create_connection(
lambda: connection,
host=host,
port=port,
ssl=ssl_context,
)
return connection | Open an HTTP/2 connection to the specified host/port. |
def mixin (cls):
cls._events = {}
cls.bind = Pyevent.bind.__func__
cls.unbind = Pyevent.unbind.__func__
cls.trigger = Pyevent.trigger.__func__
return cls | A decorator which adds event methods to a class giving it the ability to
bind to and trigger events
:param cls: the class to add the event logic to
:type cls: class
:return: the modified class
:rtype: class |
def bind (self, event, callback):
if self._events.has_key(event):
self._events[event].append(callback)
else:
self._events[event] = [callback] | Bind an event to a call function and ensure that it is called for the
specified event
:param event: the event that should trigger the callback
:type event: str
:param callback: the function that should be called
:rtype callback: function |
def unbind (self, event, callback):
if self._events.has_key(event) and len(self._events[event]) > 0:
for _callback in self._events[event]:
if _callback == callback:
self._events[event].remove(callback)
if len(self._events[event]) == 0:
del self._events[event] | Unbind the callback from the event and ensure that it is never called
:param event: the event that should be unbound
:type event: str
:param callback: the function that should be unbound
:rtype callback: function |
def trigger (self, event, *args, **kwargs):
if self._events.has_key(event):
for _callback in self._events[event]:
try:
_callback(args, kwargs)
except TypeError:
_callback() | Cause the callbacks associated with the event to be called
:param event: the event that occurred
:type event: str
:param data: optional data to pass to the callback
:type data: anything that should be passed to the callback |
def get_wordlist(lang, wl_dir, po_path):
#print("Looking for Wordlist in:\nlang {}\nwl_dir {}\npo_path {}".format(lang, wl_dir, po_path))
po_path = os.path.abspath(po_path)
if wl_dir is not None:
wl_path = os.path.join(wl_dir, lang + '.txt')
if os.path.isfile(wl_path):
return wl_path
"""
If wl_dir is not given, the wordlist should live in a file named
"wordlist.txt" either in the locales_dir for the default language or in
the same directory as the .po-files
"""
if po_path.endswith("po"):
# translated language
po_dir = os.path.dirname(po_path)
for f in os.scandir(po_dir):
if f.name == "wordlist.txt":
#print("found wordlist in", f.path)
return f.path
#print("Checked po-dir, None Found")
"""
If no file was found so far, the po-files seem to lie in
<lang>/LC_MESSAGES, and the wordlist should be in the directory
above.
"""
if os.path.basename(po_dir) == "LC_MESSAGES":
for f in os.scandir(os.path.join(po_dir, "..")):
if f.name == "wordlist.txt":
#print("found wordlist in", f.path)
return f.path
#print("Checked LC_MESSAGES-dir. none found")
#print("Checked lang-specific files")
if os.path.isdir(po_path):
# default language
for f in os.scandir(po_path):
if f.name == "wordlist.txt":
#print("found wordlist in", f.path)
return f.path
#print("If this shows up, no wordlist was found")
return None | If wl_dir is given, there may be a file called "<lang>.txt". If this is
the case, this should be the wordlist we are looking for. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.