code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def is_img_id_valid(img_id):
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True | Checks if img_id is valid. |
def get_variant_label(v_conf):
if v_conf['MAX_SIZE'][0] is None:
return 'h{}'.format(v_conf['MAX_SIZE'][1])
if v_conf['MAX_SIZE'][1] is None:
return 'w{}'.format(v_conf['MAX_SIZE'][0])
return '{}x{}'.format(*v_conf['MAX_SIZE']) | Generates name for variant images based settings (by variants sizes). |
def remove_all_files_of_img_id(img_id):
files = get_files_by_img_id(img_id, check_hash=False)
if files:
os.remove(media_path(files['main']))
for fn in files['variants'].values():
os.remove(media_path(fn)) | Removes all img_id's files. |
def remove_tmp_prefix_from_filename(filename):
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):] | Remove tmp prefix from filename. |
def remove_tmp_prefix_from_file_path(file_path):
path, filename = os.path.split(file_path)
return os.path.join(path, remove_tmp_prefix_from_filename(filename)).replace('\\', '/') | Remove tmp prefix from file path or url. |
def make_permalink(img_id):
profile, filename = img_id.split(':', 1)
new_img_id = profile + ':' + remove_tmp_prefix_from_filename(filename)
urls = get_files_by_img_id(img_id)
if urls is None:
return urls
move_list = {(urls['main'], remove_tmp_prefix_from_file_path(urls['main']))}
for var_label, var_file_path in urls['variants'].iteritems():
move_list.add((var_file_path, remove_tmp_prefix_from_file_path(var_file_path)))
for file_path_from, file_path_to in move_list:
os.rename(media_path(file_path_from), media_path(file_path_to))
return new_img_id | Removes tmp prefix from filename and rename main and variant files.
Returns img_id without tmp prefix. |
def upload_from_fs(fn, profile=None, label=None):
if not os.path.isfile(fn):
raise ValueError('File is not exists: {}'.format(fn))
if profile is None:
profile = 'default'
conf = get_profile_configs(profile)
with open(fn, 'rb') as f:
if not is_image(f, types=conf['TYPES']):
msg = (('Format of uploaded file "%(name)s" is not allowed. '
'Allowed formats is: %(formats)s.') %
{'name': fn, 'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})
raise RuntimeError(msg)
return _custom_upload(f, profile, label, conf) | Saves image from fn with TMP prefix and returns img_id. |
def upload_from_fileobject(f, profile=None, label=None):
if profile is None:
profile = 'default'
conf = get_profile_configs(profile)
f.seek(0)
if not is_image(f, types=conf['TYPES']):
msg = (('Format of uploaded file is not allowed. '
'Allowed formats is: %(formats)s.') %
{'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))})
raise RuntimeError(msg)
return _custom_upload(f, profile, label, conf) | Saves image from f with TMP prefix and returns img_id. |
def init_app(self, app):
app.extensions['invenio-documents'] = self
app.cli.add_command(cmd) | Flask application initialization. |
def request(self, method, url, **kwargs):
if "data" in kwargs:
kwargs["data"] = json.dumps(kwargs["data"])
kwargs["headers"] = {
'Content-Type': 'application/json',
'Authorization': 'token %s' % self.__token__,
}
req = make_request(
method,
url,
**kwargs
)
self.logger.debug(
"Request::{}::{}".format(method, url),
extra={
"request": kwargs,
"response": {"headers": req.headers, "code": req.status_code, "data": req.content}
}
)
return req | Unified method to make request to the Github API
:param method: HTTP Method to use
:param url: URL to reach
:param kwargs: dictionary of arguments (params for URL parameters, data for post/put data)
:return: Response |
def default_branch(self, file):
if isinstance(self.__default_branch__, str):
return self.__default_branch__
elif self.__default_branch__ == GithubProxy.DEFAULT_BRANCH.NO:
return self.master_upstream
else:
return file.sha[:8] | Decide the name of the default branch given the file and the configuration
:param file: File with informations about it
:return: Branch Name |
def init_app(self, app):
self.app = app
self.__blueprint__ = Blueprint(
self.__name__,
self.__name__,
url_prefix=self.__prefix__,
)
for url, name, methods in self.__urls__:
self.blueprint.add_url_rule(
url,
view_func=getattr(self, name),
endpoint=name.replace("r_", ""),
methods=methods
)
self.app = self.app.register_blueprint(self.blueprint)
return self.blueprint | Initialize the application and register the blueprint
:param app: Flask Application
:return: Blueprint of the current nemo app
:rtype: flask.Blueprint |
def put(self, file):
input_ = {
"message": file.logs,
"author": file.author.dict(),
"content": file.base64,
"branch": file.branch
}
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
data = self.request("PUT", uri, data=input_)
if data.status_code == 201:
file.pushed = True
return file
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="put", context={
"uri": uri,
"params": input_
}
) | Create a new file on github
:param file: File to create
:return: File or self.ProxyError |
def get(self, file):
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
params = {
"ref": file.branch
}
data = self.request("GET", uri, params=params)
# We update the file blob because it exists and we need it for update
if data.status_code == 200:
data = json.loads(data.content.decode("utf-8"))
file.blob = data["sha"]
elif data.status_code == 404:
pass
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="get", context={
"uri": uri,
"params": params
}
)
return file | Check on github if a file exists
:param file: File to check status of
:return: File with new information, including blob, or Error
:rtype: File or self.ProxyError |
def update(self, file):
params = {
"message": file.logs,
"author": file.author.dict(),
"content": file.base64,
"sha": file.blob,
"branch": file.branch
}
uri = "{api}/repos/{origin}/contents/{path}".format(
api=self.github_api_url,
origin=self.origin,
path=file.path
)
data = self.request("PUT", uri, data=params)
if data.status_code == 200:
file.pushed = True
return file
else:
reply = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (reply, "message"),
step="update", context={
"uri": uri,
"params": params
}
) | Make an update query on Github API for given file
:param file: File to update, with its content
:return: File with new information, including success (or Error) |
def pull_request(self, file):
uri = "{api}/repos/{upstream}/pulls".format(
api=self.github_api_url,
upstream=self.upstream,
path=file.path
)
params = {
"title": "[Proxy] {message}".format(message=file.logs),
"body": "",
"head": "{origin}:{branch}".format(origin=self.origin.split("/")[0], branch=file.branch),
"base": self.master_upstream
}
data = self.request("POST", uri, data=params)
if data.status_code == 201:
return json.loads(data.content.decode("utf-8"))["html_url"]
else:
reply = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, reply["message"],
step="pull_request", context={
"uri": uri,
"params": params
}
) | Create a pull request
:param file: File to push through pull request
:return: URL of the PullRequest or Proxy Error |
def get_ref(self, branch, origin=None):
if not origin:
origin = self.origin
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=origin,
branch=branch
)
data = self.request("GET", uri)
if data.status_code == 200:
data = json.loads(data.content.decode("utf-8"))
if isinstance(data, list):
# No addresses matches, we get search results which stars with {branch}
return False
# Otherwise, we get one record
return data["object"]["sha"]
elif data.status_code == 404:
return False
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="get_ref", context={
"uri": uri
}
) | Check if a reference exists
:param branch: The branch to check if it exists
:return: Sha of the branch if it exists, False if it does not exist, self.ProxyError if it went wrong |
def make_ref(self, branch):
master_sha = self.get_ref(self.master_upstream)
if not isinstance(master_sha, str):
return self.ProxyError(
404,
"The default branch from which to checkout is either not available or does not exist",
step="make_ref"
)
params = {
"ref": "refs/heads/{branch}".format(branch=branch),
"sha": master_sha
}
uri = "{api}/repos/{origin}/git/refs".format(
api=self.github_api_url,
origin=self.origin
)
data = self.request("POST", uri, data=params)
if data.status_code == 201:
data = json.loads(data.content.decode("utf-8"))
return data["object"]["sha"]
else:
decoded_data = json.loads(data.content.decode("utf-8"))
return self.ProxyError(
data.status_code, (decoded_data, "message"),
step="make_ref", context={
"uri": uri,
"params": params
}
) | Make a branch on github
:param branch: Name of the branch to create
:return: Sha of the branch or self.ProxyError |
def check_sha(self, sha, content):
rightful_sha = sha256(bytes("{}{}".format(content, self.secret), "utf-8")).hexdigest()
return sha == rightful_sha | Check sent sha against the salted hash of the content
:param sha: SHA sent through fproxy-secure-hash header
:param content: Base 64 encoded Content
:return: Boolean indicating equality |
def patch_ref(self, sha):
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=self.origin,
branch=self.master_fork
)
data = {
"sha": sha,
"force": True
}
reply = self.request(
"PATCH",
uri,
data=data
)
if reply.status_code == 200:
dic = json.loads(reply.content.decode("utf-8"))
return dic["object"]["sha"]
else:
dic = json.loads(reply.content.decode("utf-8"))
return self.ProxyError(
reply.status_code,
(dic, "message"),
step="patch",
context={
"uri": uri,
"data": data
}
) | Patch reference on the origin master branch
:param sha: Sha to use for the branch
:return: Status of success
:rtype: str or self.ProxyError |
def r_update(self):
# Getting Master Branch
upstream = self.get_ref(self.master_upstream, origin=self.upstream)
if isinstance(upstream, bool):
return (ProxyError(
404, "Upstream Master branch '{0}' does not exist".format(self.master_upstream),
step="get_upstream_ref"
)).response()
elif isinstance(upstream, self.ProxyError):
return upstream.response()
# Patching
new_sha = self.patch_ref(upstream)
if isinstance(new_sha, self.ProxyError):
return new_sha.response()
self.logger.info("Updated repository {} to sha {}".format(self.origin, new_sha), extra={"former_sha": upstream})
return jsonify({
"status": "success",
"commit": new_sha
}) | Updates a fork Master
- Check the ref of the origin repository
- Patch reference of fork repository
- Return status to Perseids
:return: JSON Response with status_code 201 if successful. |
def delete_where_user_id(cls, user_id):
result = cls.where_user_id(user_id)
if result is None:
return None
result.delete()
return True | delete by email |
def int_filter(text):
res = list()
for char in text:
if char.isdigit():
res.append(char)
return int("".join(res)) | Extract integer from text.
**δΈζζζ‘£**
ζι€ζζ¬ε
ηζ΄ζ°γ |
def float_filter(text):
res = list()
for char in text:
if (char.isdigit() or (char == ".")):
res.append(char)
return float("".join(res)) | Extract float from text.
**δΈζζζ‘£**
ζι€ζζ¬ε
ηε°ζ°γ |
def load(self, filename, offset):
try:
self.offset = offset
# self.fd = open(filename, 'rb')
# self.fd.close()
except IOError as e:
print(e) | Will eventually load information for Apple_Boot volume.
Not yet implemented |
def resolve(accessor: hexdi.core.clstype) -> __gentype__.T:
return hexdi.core.get_root_container().resolve(accessor=accessor) | shortcut for resolving from root container
:param accessor: accessor for resolving object
:return: resolved object of requested type |
def bind_type(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype, lifetime_manager: hexdi.core.ltype):
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime_manager) | shortcut for bind_type on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object
:param lifetime_manager: type of lifetime manager for this binding |
def bind_permanent(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype):
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PermanentLifeTimeManager) | shortcut for bind_type with PermanentLifeTimeManager on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object |
def bind_transient(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype):
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PerResolveLifeTimeManager) | shortcut for bind_type with PerResolveLifeTimeManager on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object |
def get_series(self, series):
if series == "acs1":
return self.census.acs1dp
elif series == "acs5":
return self.census.acs5
elif series == "sf1":
return self.census.sf1
elif series == "sf3":
return self.census.sf3
else:
return None | Returns a census series API handler. |
def write_xml(self):
'''
:return: Xml for order by property
:rtype: lxml.etree.Element
'''
order_by_specs = etree.Element('order-by-property')
order_by_specs.attrib['type-id'] = self.typeid
order_by_specs.attrib['property-name'] = self.property_name
order_by_specs.attrib['direction'] = self.direction
return order_by_specf write_xml(self):
'''
:return: Xml for order by property
:rtype: lxml.etree.Element
'''
order_by_specs = etree.Element('order-by-property')
order_by_specs.attrib['type-id'] = self.typeid
order_by_specs.attrib['property-name'] = self.property_name
order_by_specs.attrib['direction'] = self.direction
return order_by_specs | :return: Xml for order by property
:rtype: lxml.etree.Element |
def set(self, repo):
name = repo.name
if name in self.__repositories \
and self.__repositories[name].is_initialized:
raise ValueError('Can not replace repositories that have been '
'initialized.')
self.__repositories[name] = repo | Sets the given repository (by name). |
def new(self, repo_type, name=None, make_default=False,
repository_class=None, aggregate_class=None,
configuration=None):
if name == REPOSITORY_DOMAINS.ROOT:
# Unless explicitly configured differently, all root repositories
# join the transaction.
join_transaction = True
autocommit = False
name = repo_type
else:
join_transaction = False
if name is None:
name = "%s%d" % (repo_type, next(self.__repo_id_gen))
# The system repository is special in that its repository
# should not join the transaction but still commit all changes.
autocommit = name == REPOSITORY_DOMAINS.SYSTEM
if repository_class is None:
reg = get_current_registry()
repository_class = reg.queryUtility(IRepository, name=repo_type)
if repository_class is None:
raise ValueError('Unknown repository type "%s".' % repo_type)
repo = repository_class(name,
aggregate_class,
join_transaction=join_transaction,
autocommit=autocommit)
if not configuration is None:
repo.configure(**configuration)
if make_default:
self.__default_repo = repo
return repo | Creates a new repository of the given type. If the root repository
domain (see :class:`everest.repositories.constants.REPOSITORY_DOMAINS`)
is passed as a repository name, the type string is used as the name;
if no name is passed, a unique name is created automatically. |
def setup_system_repository(self, repository_type, reset_on_start,
repository_class=None):
# Set up the system entity repository (this does not join the
# transaction and is in autocommit mode).
cnf = dict(messaging_enable=True,
messaging_reset_on_start=reset_on_start)
system_repo = self.new(repository_type,
name=REPOSITORY_DOMAINS.SYSTEM,
repository_class=repository_class,
configuration=cnf)
self.set(system_repo) | Sets up the system repository with the given repository type.
:param str repository_type: Repository type to use for the SYSTEM
repository.
:param bool reset_on_start: Flag to indicate whether stored system
resources should be discarded on startup.
:param repository_class: class to use for the system repository. If
not given, the registered class for the given type will be used. |
def initialize_all(self):
for repo in itervalues_(self.__repositories):
if not repo.is_initialized:
repo.initialize() | Convenience method to initialize all repositories that have not been
initialized yet. |
def raw(body, status=200, headers=None,
content_type='application/octet-stream'):
'''
Returns response object without encoding the body.
:param body: Response data.
:param status: Response code.
:param headers: Custom Headers.
:param content_type: the content type (string) of the response.
'''
return HTTPResponse(body_bytes=body, status=status, headers=headers,
content_type=content_typef raw(body, status=200, headers=None,
content_type='application/octet-stream'):
'''
Returns response object without encoding the body.
:param body: Response data.
:param status: Response code.
:param headers: Custom Headers.
:param content_type: the content type (string) of the response.
'''
return HTTPResponse(body_bytes=body, status=status, headers=headers,
content_type=content_type) | Returns response object without encoding the body.
:param body: Response data.
:param status: Response code.
:param headers: Custom Headers.
:param content_type: the content type (string) of the response. |
def html(body, status=200, headers=None):
'''
Returns response object with body in html format.
:param body: Response data to be encoded.
:param status: Response code.
:param headers: Custom Headers.
'''
return HTTPResponse(body, status=status, headers=headers,
content_type='text/html; charset=utf-8'f html(body, status=200, headers=None):
'''
Returns response object with body in html format.
:param body: Response data to be encoded.
:param status: Response code.
:param headers: Custom Headers.
'''
return HTTPResponse(body, status=status, headers=headers,
content_type='text/html; charset=utf-8') | Returns response object with body in html format.
:param body: Response data to be encoded.
:param status: Response code.
:param headers: Custom Headers. |
def redirect(to, headers=None, status=302,
content_type='text/html; charset=utf-8'):
'''Abort execution and cause a 302 redirect (by default).
:param to: path or fully qualified URL to redirect to
:param headers: optional dict of headers to include in the new request
:param status: status code (int) of the new request, defaults to 302
:param content_type: the content type (string) of the response
:returns: the redirecting Response
'''
headers = headers or {}
# According to RFC 7231, a relative URI is now permitted.
headers['Location'] = to
return HTTPResponse(
status=status,
headers=headers,
content_type=content_typef redirect(to, headers=None, status=302,
content_type='text/html; charset=utf-8'):
'''Abort execution and cause a 302 redirect (by default).
:param to: path or fully qualified URL to redirect to
:param headers: optional dict of headers to include in the new request
:param status: status code (int) of the new request, defaults to 302
:param content_type: the content type (string) of the response
:returns: the redirecting Response
'''
headers = headers or {}
# According to RFC 7231, a relative URI is now permitted.
headers['Location'] = to
return HTTPResponse(
status=status,
headers=headers,
content_type=content_type) | Abort execution and cause a 302 redirect (by default).
:param to: path or fully qualified URL to redirect to
:param headers: optional dict of headers to include in the new request
:param status: status code (int) of the new request, defaults to 302
:param content_type: the content type (string) of the response
:returns: the redirecting Response |
def get_message(self, more_content):
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#response
'''
return {
'status': self.status,
'content': self.body,
'headers': self._parse_headers(),
'more_content': more_content
f get_message(self, more_content):
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#response
'''
return {
'status': self.status,
'content': self.body,
'headers': self._parse_headers(),
'more_content': more_content
} | http://channels.readthedocs.io/en/stable/asgi/www.html#response |
def get_bundles():
global _cached_bundles
if not _cached_bundles:
_cached_bundles = BundleManager()
for bundle_conf in bundles_settings.BUNDLES:
_cached_bundles[bundle_conf[0]] = Bundle(bundle_conf)
return _cached_bundles | Used to cache the bundle definitions rather than loading from config every time they're used |
def get_bundle_versions():
global _cached_versions
if not bundles_settings.BUNDLES_VERSION_FILE:
_cached_versions = {}
if _cached_versions is None:
locs = {}
try:
execfile(bundles_settings.BUNDLES_VERSION_FILE, locs)
_cached_versions = locs['BUNDLES_VERSIONS']
except IOError:
_cached_versions = {}
return _cached_versions | Used to cache the bundle versions rather than loading them from the bundle versions file every time they're used |
def get_url(self, version=None):
if self.fixed_bundle_url:
return self.fixed_bundle_url
return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type) | Return the filename of the bundled bundle |
def get_file_urls(self):
if self.use_bundle:
return [self.get_url()]
return [bundle_file.file_url for bundle_file in self.files] | Return a list of file urls - will return a single item if settings.USE_BUNDLES is True |
def export_batch(self):
batch = self.batch_cls(
model=self.model, history_model=self.history_model, using=self.using
)
if batch.items:
try:
json_file = self.json_file_cls(batch=batch, path=self.path)
json_file.write()
except JSONDumpFileError as e:
raise TransactionExporterError(e)
batch.close()
return batch
return None | Returns a batch instance after exporting a batch of txs. |
def get(input_dict, environment_dict):
if environment_dict['currentkeyname'] is None or not seash_global_variables.keys[environment_dict['currentkeyname']]['privatekey']:
raise seash_exceptions.UserError("Error, must get as an identity with a private key")
# Activate secure mode if user did not specify the insecure keyword
allow_ssl_insecure = _get_user_argument(input_dict, 'insecure') is not None
vesselcount = int(_get_user_argument(input_dict, 'vesselcount'))
try:
vesseltype = _get_user_argument(input_dict, 'type')
# User may not have specified a vesseltype
except IndexError:
vesseltype = None
if not vesseltype in ['wan', 'lan', 'nat', None]:
raise seash_exceptions.UserError("Error, unknown vessel type '"+vesseltype+"'")
client = _connect_to_clearinghouse(environment_dict['currentkeyname'],
allow_ssl_insecure)
# Get the vessels!
try:
if vesseltype is None:
vesseldicts = client.acquire_random_resources(vesselcount)
else:
vesseldicts = client.acquire_resources(vesseltype, vesselcount)
_update_targets(vesseldicts, environment_dict)
except (seattleclearinghouse_xmlrpc.UnableToAcquireResourcesError, seattleclearinghouse_xmlrpc.NotEnoughCreditsError), e:
print str(e) | <Purpose>
Gets the specified vessels.
<Arguments>
input_dict: The commanddict representing the user's input.
environment_dict: The dictionary representing the current seash
environment.
<Side Effects>
Connects to the Clearinghouse and acquires vessels.
Adds the acquired vessels to the list of valid targets.
<Exceptions>
None
<Returns>
None |
def release(input_dict, environment_dict):
# Activate secure mode if user did not specify the insecure keyword
allow_ssl_insecure = _get_user_argument(input_dict, 'insecure') is not None
# Get the group name to release
groupname = environment_dict['currenttarget']
nodelist = seash_global_variables.targets[groupname]
# Get the Clearinghouse vessel handles for each vessel
retdict = seash_helper.contact_targets(nodelist, _get_clearinghouse_vessel_handle)
clearinghouse_vesselhandles = []
faillist = []
# parse the output so we can print out something intelligible
for nodename in retdict:
if retdict[nodename][0]:
clearinghouse_vesselhandles.append(retdict[nodename][1])
else:
faillist.append(nodename)
# Release!
client = _connect_to_clearinghouse(environment_dict['currentkeyname'],
allow_ssl_insecure)
client.release_resources(clearinghouse_vesselhandles)
# Remove each vessel from the targets list
removed_nodehandles = seash_global_variables.targets[groupname][:]
for handle in removed_nodehandles:
for target in seash_global_variables.targets:
if handle in seash_global_variables.targets[target]:
seash_global_variables.targets[target].remove(handle) | <Purpose>
Releases the specified vessels.
<Arguments>
input_dict: The commanddict representing the user's input.
environment_dict: The dictionary representing the current seash
environment.
<Side Effects>
Connects to the Clearinghouse and releases vessels.
Removes the released vessels from the list of valid targets.
Does not guarantee that all vessels specified are released!
<Exceptions>
None
<Returns>
None |
def _update_targets(vesseldicts, environment_dict):
# Compile a list of the nodes that we need to check
nodelist = []
for vesseldict in vesseldicts:
nodeip_port = vesseldict['node_ip']+':'+str(vesseldict['node_port'])
if not nodeip_port in nodelist:
nodelist.append(nodeip_port)
# we'll output a message about the new keys later...
newidlist = []
faillist = []
# Clear the list so that the user doesn't target vessels acquired from
# previous requests when targeting this group
seash_global_variables.targets['acquired'] = []
print nodelist
# currently, if I browse more than once, I look up everything again...
retdict = seash_helper.contact_targets(
nodelist,
seash_helper.browse_target,
environment_dict['currentkeyname'],
'acquired')
# parse the output so we can print out something intelligible
for nodename in retdict:
if retdict[nodename][0]:
newidlist = newidlist + retdict[nodename][1]
else:
faillist.append(nodename)
seash_helper.print_vessel_errors(retdict)
if len(newidlist) == 0:
print "Could not add any new targets."
else:
print "Added targets: "+", ".join(newidlist)
if len(seash_global_variables.targets['acquired']) > 0:
num_targets = str(len(seash_global_variables.targets['acquired']))
print "Added group 'acquired' with "+num_targets+" targets" | <Purpose>
Connects to the nodes in the vesseldicts and adds them to the list
of valid targets.
<Arguments>
vesseldicts:
A list of vesseldicts obtained through
SeattleClearinghouseClient calls.
<Side Effects>
All valid targets that the user can access on the specified nodes
are added to the list of targets.
<Exceptions>
None
<Returns>
None |
def _get_clearinghouse_vessel_handle(vesselhandle):
host, portstring, vesselname = vesselhandle.split(':')
port = int(portstring)
# get information about the node's vessels
try:
nodehandle = nmclient.nmclient_createhandle(host, port,
timeout=seash_global_variables.globalseashtimeout)
except NMClientException,e:
return (False, str(e))
try:
# We need to get the nodekey on this vessel
vesseldict = nmclient.nmclient_getvesseldict(nodehandle)
except NMClientException,e:
return (False, str(e))
finally:
nmclient.nmclient_destroyhandle(nodehandle)
nodekeystr = rsa.rsa_publickey_to_string(vesseldict['nodekey'])
return (True, nodekeystr+':'+vesselname) | <Purpose>
Acquires the unique vessel identifier for a given vesselhandle.
<Arguments>
vesselhandle:
A vessel handle expressed in the form node_ip:node_port:vesselname.
<Side Effects>
Opens a connection to the vessel to retrieve its nodekey.
<Exceptions>
None
<Returns>
A list of Clearinghouse vesselhandles for each vessel. |
def _check_key(self, key):
self.setup_schema()
if key not in self._attrs and key not in self:
raise KeyError(key) | Ensure key is either in schema's attributes or already set on self. |
def similar(self):
if self._similar is None:
self._similar = [
Artist(artist['ArtistID'], artist['Name'], self._connection)
for artist in self._connection.request(
'artistGetSimilarArtists',
{'artistID': self.id},
self._connection.header('artistGetSimilarArtists'))[1]['SimilarArtists']]
return iter(self._similar) | iterator over similar artists as :class:`Artist` objects |
def songs(self):
if self._songs is None:
self._songs = [Song.from_response(song, self._connection) for song in
self._connection.request(
'artistGetArtistSongs',
{'artistID': self.id},
self._connection.header('artistGetArtistSongs'))[1]]
return iter(self._songs) | iterator over artist's songs as :class:`Song` objects |
def query_params(self, params, key, def_value, short_hand=None):
if key not in params and short_hand:
# value is associated with shorthand, move to key
params[key] = params.get(short_hand, def_value)
del params[short_hand]
elif key not in params and not short_hand:
params[key] = def_value
elif key in params:
# key is there, also possibly shorthand
# assume def value at this point is not needed
if short_hand in params:
del params[short_hand]
return params | updates params dict to use
:param params:
:param key:
:param def_value:
:param short_hand:
:return: |
def hirise_edr(self, pid, chunk_size=1024*1024):
productid = "{}*".format(pid)
query = {"target" : "mars",
"query" : "product",
"results" : "f",
"output" : "j",
"pt" : "EDR",
"iid" : "HiRISE",
"ihid" : "MRO",
"productid" : productid}
# Query the ODE
products = query_ode(self.ode_url, query)
# Validate query results with conditions for this particular query
if len(products) > 30:
print("Error: Too many products selected for in query, Make PID more specific")
sys.exit(1)
if not isinstance(products, list):
print("Error: Too few responses from server to be a full HiRISE EDR, ")
else:
# proceed to download
for product in products:
download_edr_img_files(product, self.https, chunk_size) | Download a HiRISE EDR set of .IMG files to the CWD
You must know the full id to specifiy the filter to use, ie:
PSP_XXXXXX_YYYY will download every EDR IMG file available
PSP_XXXXXX_YYYY_R will download every EDR RED filter IMG file
PSP_XXXXXX_YYYY_BG12_0 will download only the BG12_0
As a wild card is auto applied to the end of the provided pid
pid: product ID of the CTX EDR, partial IDs ok
chunk_size: Chunk size in bytes to use in download |
def get_meta(self, **kwargs):
query = kwargs
# filters
query = query_params(query, 'productid', None, short_hand='pid')
query = query_params(query, 'query', 'product')
query = query_params(query, 'results', 'm')
query = query_params(query, 'output', 'j')
return query_ode(self.ode_url, query=query) | Perform a mostly arbitrary meta_data query and dump to std out
:param kwargs:
:return: |
def suggest_accumulation_rate(chron):
# Follow's Bacon's method @ Bacon.R ln 30 - 44
# Suggested round vals.
sugg = np.tile([1, 2, 5], (4, 1)) * np.reshape(np.repeat([0.1, 1.0, 10, 100], 3), (4, 3))
# Get ballpark accumulation rates, uncalibrated dates.
ballpacc = stats.linregress(x=chron.depth, y=chron.age * 1.1).slope
ballpacc = np.abs(sugg - ballpacc)
sugg = sugg.flat[ballpacc.argmin()] # Suggest rounded acc.rate with lowest abs diff.
return sugg | From core age-depth data, suggest mean accumulation rate (cm/y) |
def detect(self, filename, offset, standalone=False):
r = RawStruct(
filename=filename,
offset=offset + SIG_OFFSET,
length=SIG_SIZE)
oem_id = r.data
if oem_id == b"NTFS ":
return True
return False | Verifies NTFS filesystem signature.
Returns:
bool: True if filesystem signature at offset 0x03 \
matches 'NTFS ', False otherwise. |
def load(cls, v):
if v is None:
return []
if isinstance(v, list):
return [ Action(s) for s in v ]
elif isinstance(v, str):
return [Action(v)]
else:
raise ParseError("Couldn't parse action: %r" % v) | Load the action from configuration |
def load_stream(cls, st):
y = yaml.load(st)
return [ Automaton(k, v) for k, v in y.iteritems() ] | Load Automatons from a stream |
def make_dot(self, filename_or_stream, auts):
if isinstance(filename_or_stream, str):
stream = file(filename_or_stream, 'w')
else:
stream = filename_or_stream
dot = DotFile(stream)
for aut in auts:
dot.start(aut.name)
dot.node('shape=Mrecord width=1.5')
for st in aut.states:
label = st.name
if st.entering:
label += '|%s' % '\\l'.join(str(st) for st in st.entering)
if st.leaving:
label += '|%s' % '\\l'.join(str(st) for st in st.leaving)
label = '{%s}' % label
dot.state(st.name, label=label)
for st in aut.states:
for tr in st.transitions:
dot.transition(tr.s_from.name, tr.s_to.name, tr.when)
dot.end()
dot.finish() | Create a graphviz .dot representation of the automaton. |
def load(self, config):
self.config = config
if 'start' not in self.config:
raise ParseError('missing start entry')
if 'states' not in self.config:
raise ParseError('missing states entry')
if 'transitions' not in self.config:
raise ParseError('missing transitions entry')
for state, val in self.config['states'].iteritems():
state = State(state)
state.entering = Action.load(val.get('entering'))
state.leaving = Action.load(val.get('leaving'))
self.states.add(state)
self.start = self.states[self.config['start']]
for transition, val in self.config['transitions'].iteritems():
if '->' in transition:
# from->to
lft, rgt = transition.split('->')
if lft == '*':
sfroms = self.states.keys()
else:
sfroms = lft.split(',')
if rgt == '*':
stos = self.states.keys()
else:
stos = rgt.split(',')
pairs = ((f, t) for f in sfroms for t in stos)
else:
# self transition 'from1,from2' = from1->from1, from2->from2
if transition == '*':
ss = self.states.keys()
else:
ss = transition.split(',')
pairs = ((x, x) for x in ss)
for sfrom, sto in pairs:
if sfrom not in self.states:
raise ParseError("Could find state %r" % sfrom)
if sto not in self.states:
raise ParseError("Could find state %r" % sto)
s_from = self.states[sfrom]
s_to = self.states[sto]
if not isinstance(val, list):
val = [val]
for v in val:
when = v['when']
actions = Action.load(v.get('actions'))
transition = Transition(s_from, s_to, Condition(when), actions)
s_from.transitions.append(transition) | load the configuration |
def _parse_url(url=None):
if url is None:
return ('', '')
scheme, netloc, path, _, _ = parse.urlsplit(url)
if scheme != 's3':
raise InvalidURL(url, "URL scheme must be s3://")
if path and not netloc:
raise InvalidURL(url)
return netloc, path[1:] | Split the path up into useful parts: bucket, obj_key |
def get(self, url=None, delimiter="/"):
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects | Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown. |
def create(self, url):
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
return self.call("CreateBucket", bucket=target) | Create a bucket, directory, or empty file. |
def destroy(self, url, recursive=False):
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
if recursive:
for obj in self.get(url, delimiter=''):
self.destroy(obj['url'])
return self.call("DeleteBucket", bucket=target) | Destroy a bucket, directory, or file. Specifying recursive=True
recursively deletes all subdirectories and files. |
def upload(self, local_path, remote_url):
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp) | Copy a local file to an S3 location. |
def download(self, remote_url, local_path, buffer_size=8 * 1024):
bucket, key = _parse_url(remote_url)
response_file = self.call("GetObject", bucket=bucket, key=key)['Body']
with open(local_path, 'wb') as fp:
buf = response_file.read(buffer_size)
while buf:
fp.write(buf)
buf = response_file.read(buffer_size) | Copy S3 data to a local file. |
def copy(self, src_url, dst_url):
src_bucket, src_key = _parse_url(src_url)
dst_bucket, dst_key = _parse_url(dst_url)
if not dst_bucket:
dst_bucket = src_bucket
params = {
'copy_source': '/'.join((src_bucket, src_key)),
'bucket': dst_bucket,
'key': dst_key,
}
return self.call("CopyObject", **params) | Copy an S3 object to another S3 location. |
def move(self, src_url, dst_url):
self.copy(src_url, dst_url)
self.destroy(src_url) | Copy a single S3 object to another S3 location, then delete the
original object. |
def expand_file_names(path, files_root):
# For non-wildcards just return the path. This allows us to detect when
# explicitly listed files are missing.
if not any(wildcard in path for wildcard in '*?['):
return [path]
else:
dir_path, filename = os.path.split(path)
return [os.path.join(dir_path, f) for f in fnmatch.filter(os.listdir(os.path.join(files_root, dir_path)), filename)] | Expands paths (e.g. css/*.css in files_root /actual/path/to/css/files/) |
def _init_redis_shards(self):
self._shards = {}
if self._sentinels is not None:
self.init_shards_from_sentinel()
elif self._masters is not None:
self.init_shards_from_masters()
else:
raise Exception("You must either specify sentinels or masters") | init_redis_shards is used internally to connect to the Redis sentinels
and populate self.shards with the redis.StrictRedis instances. This
is a convenient method to override / stub out in unit tests. |
def get_shard_names(self):
results = []
for shard_num in range(0, self.num_shards()):
shard_name = self.get_shard_name(shard_num)
results.append(shard_name)
return results | get_shard_names returns an array containing the names of the shards
in the cluster. This is determined with num_shards and
shard_name_format |
def get_key(self, key_type, key_id):
return "{0}:{1}{2}{3}".format(key_type, self._hash_start, key_id,
self._hash_stop) | get_key constructs a key given a key type and a key id.
Keyword arguments:
key_type -- the type of key (e.g.: 'friend_request')
key_id -- the key id (e.g.: '12345')
returns a string representing the key
(e.g.: 'friend_request:{12345}') |
def get_shard_by_key(self, key):
key_id = self._get_key_id_from_key(key)
return self.get_shard_by_key_id(key_id) | get_shard_by_key returns the Redis shard given a key.
Keyword arguments:
key -- the key (e.g. 'friend_request:{12345}')
If the key contains curly braces as in the example, then portion inside
the curly braces will be used as the key id. Otherwise, the entire key
is the key id.
returns a redis.StrictRedis connection |
def get_shard_num_by_key(self, key):
key_id = self._get_key_id_from_key(key)
return self.get_shard_num_by_key_id(key_id) | get_shard_num_by_key returns the Redis shard number givne a key.
Keyword arguments:
key -- the key (e.g. 'friend_request:{12345}')
See get_shard_by_key for more details as this method behaves the same
way. |
def get_shard_by_key_id(self, key_id):
shard_num = self.get_shard_num_by_key_id(key_id)
return self.get_shard_by_num(shard_num) | get_shard_by_key_id returns the Redis shard given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345')
This is similar to get_shard_by_key(key) except that it will not search
for a key id within the curly braces.
returns a redis.StrictRedis connection |
def get_shard_num_by_key_id(self, key_id):
# TODO: support other hash functions?
m = hashlib.md5(str(key_id).encode('ascii')).hexdigest()
# Below is borrowed from
# https://github.com/twitter/twemproxy/blob/master/src/hashkit/nc_md5.c
val = (int(m[0:2], 16) |
int(m[2:4], 16) << 8 |
int(m[4:6], 16) << 16 |
int(m[6:8], 16) << 24)
return val % self.num_shards() | get_shard_num_by_key_id returns the Redis shard number (zero-indexed)
given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345' or 'anythingcangohere')
This method is critical in how the Redis cluster sharding works. We
emulate twemproxy's md5 distribution algorithm. |
def get_canonical_key(self, key_type, key_id):
canonical_key_id = self.get_canonical_key_id(key_id)
return self.get_key(key_type, canonical_key_id) | get_canonical_key returns the canonical form of a key given a key id.
For example, '12345' maps to shard 6. The canonical key at index 6
(say '12') is the canonical key id given the key id of '12345'. This
is useful for sets that need to exist on all shards. See
compute_canonical_key_ids for how these are calculated.
Keyword arguments:
key_type -- the type of key (e.g. 'canceled')
key_id -- the key id (e.g. '12345')
returns the canonical key string (e.g. 'canceled:{12}') |
def get_canonical_key_id(self, key_id):
shard_num = self.get_shard_num_by_key_id(key_id)
return self._canonical_keys[shard_num] | get_canonical_key_id is used by get_canonical_key, see the comment
for that method for more explanation.
Keyword arguments:
key_id -- the key id (e.g. '12345')
returns the canonical key id (e.g. '12') |
def get_shard_by_num(self, shard_num):
if shard_num < 0 or shard_num >= self.num_shards():
raise ValueError("requested invalid shard# {0}".format(shard_num))
return self._shards[shard_num] | get_shard_by_num returns the shard at index shard_num.
Keyword arguments:
shard_num -- The shard index
Returns a redis.StrictRedis connection or raises a ValueError. |
def _get_key_id_from_key(self, key):
key_id = key
regex = '{0}([^{1}]*){2}'.format(self._hash_start, self._hash_stop,
self._hash_stop)
m = re.search(regex, key)
if m is not None:
# Use what's inside the hash tags as the key id, if present.
# Otherwise the whole key will be used as the key id.
key_id = m.group(1)
return key_id | _get_key_id_from_key returns the key id from a key, if found. otherwise
it just returns the key to be used as the key id.
Keyword arguments:
key -- The key to derive the ID from. If curly braces are found in the
key, then the contents of the curly braces are used as the
key id for the key.
Returns the key id portion of the key, or the whole key if no hash
tags are present. |
def compute_canonical_key_ids(self, search_amplifier=100):
canonical_keys = {}
num_shards = self.num_shards()
# Guarantees enough to find all keys without running forever
num_iterations = (num_shards**2) * search_amplifier
for key_id in range(1, num_iterations):
shard_num = self.get_shard_num_by_key(str(key_id))
if shard_num in canonical_keys:
continue
canonical_keys[shard_num] = str(key_id)
if len(canonical_keys) == num_shards:
break
if len(canonical_keys) != num_shards:
raise ValueError("Failed to compute enough keys. " +
"Wanted %d, got %d (search_amp=%d).".format(
num_shards, len(canonical_keys),
search_amplifier))
return canonical_keys | A canonical key id is the lowest integer key id that maps to
a particular shard. The mapping to canonical key ids depends on the
number of shards.
Returns a dictionary mapping from shard number to canonical key id.
This method will throw an exception if it fails to compute all of
the canonical key ids. |
def keys(self, args):
results = {}
# TODO: parallelize
for shard_num in range(0, self.num_shards()):
shard = self.get_shard_by_num(shard_num)
results[shard_num] = shard.keys(args)
return results | keys wrapper that queries every shard. This is an expensive
operation.
This method should be invoked on a TwemRedis instance as if it
were being invoked directly on a StrictRedis instance. |
def mget(self, args):
key_map = collections.defaultdict(list)
results = {}
for key in args:
shard_num = self.get_shard_num_by_key(key)
key_map[shard_num].append(key)
# TODO: parallelize
for shard_num in key_map.keys():
shard = self.get_shard_by_num(shard_num)
results[shard_num] = shard.mget(key_map[shard_num])
return results | mget wrapper that batches keys per shard and execute as few
mgets as necessary to fetch the keys from all the shards involved.
This method should be invoked on a TwemRedis instance as if it
were being invoked directly on a StrictRedis instance. |
def mset(self, args):
key_map = collections.defaultdict(dict)
result_count = 0
for key in args.keys():
value = args[key]
shard_num = self.get_shard_num_by_key(key)
key_map[shard_num][key] = value
# TODO: parallelize
for shard_num in key_map.keys():
shard = self.get_shard_by_num(shard_num)
result_count += shard.mset(key_map[shard_num])
return result_count | mset wrapper that batches keys per shard and execute as few
msets as necessary to set the keys in all the shards involved.
This method should be invoked on a TwemRedis instance as if it
were being invoked directly on a StrictRedis instance. |
def get_nested_attribute(obj, attribute):
parent, attr = resolve_nested_attribute(obj, attribute)
if not parent is None:
attr_value = getattr(parent, attr)
else:
attr_value = None
return attr_value | Returns the value of the given (possibly dotted) attribute for the given
object.
If any of the parents on the nested attribute's name path are `None`, the
value of the nested attribute is also assumed as `None`.
:raises AttributeError: If any attribute access along the attribute path
fails with an `AttributeError`. |
def set_nested_attribute(obj, attribute, value):
parent, attr = resolve_nested_attribute(obj, attribute)
if parent is None:
raise AttributeError('Can not set attribute "%s" on None value.'
% attr)
setattr(parent, attr, value) | Sets the value of the given (possibly dotted) attribute for the given
object to the given value.
:raises AttributeError: If any of the parents on the nested attribute's
name path are `None`. |
def id_generator(start=0):
count = start
while True:
send_value = (yield count)
if not send_value is None:
if send_value < count:
raise ValueError('Values from ID generator must increase '
'monotonically (current value: %d; value '
'sent to generator: %d).'
% (count, send_value))
count = send_value
else:
count += 1 | Generator for sequential numeric numbers. |
def get_filter_specification_visitor(name, registry=None):
if registry is None:
registry = get_current_registry()
return registry.getUtility(IFilterSpecificationVisitor, name=name) | Returns a the class registered as the filter specification
visitor utility under the given name (one of the
:const:`everest.querying.base.EXPRESSION_KINDS` constants).
:returns: class implementing
:class:`everest.interfaces.IFilterSpecificationVisitor` |
def get_order_specification_visitor(name, registry=None):
if registry is None:
registry = get_current_registry()
return registry.getUtility(IOrderSpecificationVisitor, name=name) | Returns the class registered as the order specification
visitor utility under the given name (one of the
:const:`everest.querying.base.EXPRESSION_KINDS` constants).
:returns: class implementing
:class:`everest.interfaces.IOrderSpecificationVisitor` |
def get_repository(name=None):
repo_mgr = get_repository_manager()
if name is None:
repo = repo_mgr.get_default()
else:
repo = repo_mgr.get(name)
return repo | Returns the repository with the given name or the default repository if
:param:`name` is `None`. |
def app_name_from_ini_file(ini_file_path):
parser = configparser.SafeConfigParser()
parser.read(ini_file_path)
return app_name_from_ini_parser(parser) | Returns the name of the main application from the given ini file. See
:function:`app_name_from_ini_parser` for details.
:param ini_file_path: Path to the .ini file to parse. |
def app_name_from_ini_parser(ini_parser):
app_names = [sect.split(':')[-1]
for sect in ini_parser.sections()
if sect[:4] == 'app:']
if len(app_names) == 1:
app_name = app_names[0]
else:
pp_sect_name = 'pipeline:main'
if ini_parser.has_section(pp_sect_name):
pipeline_apps = ini_parser.get(pp_sect_name, 'pipeline').split()
app_name = pipeline_apps[-1]
else:
raise ValueError('Could not determine application name. '
'You need to either define exactly one '
'app:<app name> section or a '
'pipeline:main section in your ini '
'file.')
return app_name | Returns the name of the main application from the given ini file parser.
The name is found as follows:
* If the ini file contains only one app:<app name> section,
return this app name;
* Else, if the ini file contains a pipeline:main section, use
the name of the innermost app;
* Else raise ValueError.
:param ini_parser: :class:`configparser.SafeConfigParser` instance with
an ini file read. |
def generative(func):
def wrap(inst, *args, **kw):
clone = type(inst).__new__(type(inst))
clone.__dict__ = inst.__dict__.copy()
return func(clone, *args, **kw)
return update_wrapper(wrap, func) | Marks an instance method as generative. |
def truncate(message, limit=500):
if len(message) > limit:
trc_msg = ''.join([message[:limit // 2 - 2],
' .. ',
message[len(message) - limit // 2 + 2:]])
else:
trc_msg = message
return trc_msg | Truncates the message to the given limit length. The beginning and the
end of the message are left untouched. |
def remove_board(board_id):
log.debug('remove %s', board_id)
lines = boards_txt().lines()
lines = filter(lambda x: not x.strip().startswith(board_id + '.'), lines)
boards_txt().write_lines(lines) | remove board.
:param board_id: board id (e.g. 'diecimila')
:rtype: None |
def make_route(self, route) -> dict:
middleware = route['middleware'] if 'middleware' in route else None
# added to ALL requests to support xhr cross-site requests
route['methods'].append('OPTIONS')
return {
'url': route['url'],
'name': route['name'],
'methods': route['methods'],
'middleware': middleware,
'callback': {
'module': route['function'].__module__,
'class': route['function'].__qualname__.rsplit('.', 1)[0],
'function': route['function'].__name__
}
} | Construct a route to be parsed into flask App |
def construct_routes(self):
modules = self.evernode_app.get_modules()
for module_name in modules:
with self.app.app_context():
module = importlib.import_module(
'modules.%s.routes' % (module_name))
for route in module.routes:
self.routes.append(self.make_route(route))
if self.app.config['DEBUG']:
print('--- Loaded Modules ---')
print("Loaded Modules: " + str(modules)) | Gets modules routes.py and converts to module imports |
def diffusion_driver(self):
if self._diffusion_driver is None:
return self,
if isinstance(self._diffusion_driver, list):
return tuple(self._diffusion_driver)
if isinstance(self._diffusion_driver, tuple):
return self._diffusion_driver
return self._diffusion_driver, | diffusion driver are the underlying `dW` of each process `X` in a SDE like `dX = m dt + s dW`
:return list(StochasticProcess): |
def reset_codenames(self, dry_run=None, clear_existing=None):
self.created_codenames = []
self.updated_names = []
actions = ["add", "change", "delete", "view"]
if django.VERSION >= (2, 1):
actions.append("view")
for app in django_apps.get_app_configs():
for model in app.get_models():
try:
getattr(model, model._meta.simple_history_manager_attribute)
except AttributeError:
pass
else:
self.update_or_create(
model, dry_run=dry_run, clear_existing=clear_existing
)
if dry_run:
print("This is a dry-run. No modifications were made.")
if self.created_codenames:
print("The following historical permission.codenames were be added:")
pprint(self.created_codenames)
else:
print("No historical permission.codenames were added.")
if self.updated_names:
print("The following historical permission.names were updated:")
pprint(self.updated_names)
else:
print("No historical permission.names were updated.") | Ensures all historical model codenames exist in Django's Permission
model. |
def login(self, username, password, disableautosave=True, print_response=True):
if type(username) != str:
return False, "Username must be string"
if type(password) != str:
return False, "Password must be string"
if type(disableautosave) != bool:
return False, "Disableautosave must be boolean"
data = {"username": username, "password": password, "disableautosave": disableautosave}
status_response, response = self.call_api("r/user/login/", data, print_response=print_response)
# Store httpcookie if possible
if status_response and "deployr" in response:
if "response" in response["deployr"]:
if "httpcookie" in response["deployr"]["response"]:
self.JSESSIONID = response["deployr"]["response"]["httpcookie"]
return status_response, response | :param username:
:param password:
:param disableautosave: boolean
:param print_response: print log if required
:return: status code, response data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.