code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def run(self):
this_widget = self.get_widget(self.docname)
self.widgets[repr(this_widget)] = this_widget
# Now add the node to the doctree
widget_node = widget()
ids = [repr(this_widget)]
names = [self.name]
attrs = dict(ids=ids, names=names)
widget_node.update_basic_atts(attrs)
return [widget_node] | Run at parse time.
When the documents are initially being scanned, this method runs
and does two things: (a) creates an instance that is added to
the site's widgets, and (b) leaves behind a placeholder docutils
node that can later be processed after the docs are resolved.
The latter needs enough information to retrieve the former. |
def register_references(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames: List[str]):
references: ReferencesContainer = sphinx_app.env.references
for name, klass in kb_app.config.resources.items():
# Name is the value in the decorator and directive, e.g.
# @kb.resource('category') means name=category
if getattr(klass, 'is_reference', False):
references[name] = dict() | Walk the registry and add sphinx directives |
def register_handlers(self, handler_classes):
for handler_class in handler_classes:
self.handlers.append(handler_class(client=self.client))
logging.info('Successfully registered {handler_class}'.format(
handler_class=getattr(handler_class, '__name__', str(handler_class)))
) | Create handlers from discovered handler classes
:param handler_classes: List of :class:`~responsebot.handlers.base.BaseTweetHandler`'s derived classes |
def on_tweet(self, tweet):
logging.info(u'Received tweet: `{message}`'.format(message=tweet.text))
for handler in self.handlers:
if not handler.catch_self_tweets and self.is_self_tweet(tweet):
continue
if not handler.filter.match_tweet(tweet=tweet, user_stream=self.client.config.get('user_stream')):
continue
handler.on_tweet(tweet) | Callback to receive tweet from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the
received tweet to registered handlers.
:param tweet: An object containing a tweet's text and metadata
:type tweet: :class:`~responsebot.models.Tweet` |
def on_event(self, event):
if event.event not in TWITTER_NON_TWEET_EVENTS:
logging.warning(u'Received unknown twitter event {event}'.format(event=event.event))
return
logging.info(u'Received event {event}'.format(event=event.event))
for handler in self.handlers:
handler.on_event(event) | Callback to receive events from :class:`~responsebot.responsebot_stream.ResponseBotStream`. Tries to forward the
received event to registered handlers.
:param event: The received event
:type event: :class:`~responsebot.models.Event`
error from a custom handler |
def get_merged_filter(self):
track = set()
follow = set()
for handler in self.handlers:
track.update(handler.filter.track)
follow.update(handler.filter.follow)
return TweetFilter(track=list(track), follow=list(follow)) | Return merged filter from list of handlers
:return: merged filter
:rtype: :class:`~responsebot.models.TweetFilter` |
def get_domain(url):
parse_result = urlparse(url)
domain = "{schema}://{netloc}".format(
schema=parse_result.scheme, netloc=parse_result.netloc)
return domain | Get domain part of an url.
For example: https://www.python.org/doc/ -> https://www.python.org |
def join_all(domain, *parts):
l = list()
if domain.endswith("/"):
domain = domain[:-1]
l.append(domain)
for part in parts:
for i in part.split("/"):
if i.strip():
l.append(i)
url = "/".join(l)
return url | Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url |
def add_params(endpoint, params):
p = PreparedRequest()
p.prepare(url=endpoint, params=params)
if PY2: # pragma: no cover
return unicode(p.url)
else: # pragma: no cover
return p.url | Combine query endpoint and params.
Example::
>>> add_params("https://www.google.com/search", {"q": "iphone"})
https://www.google.com/search?q=iphone |
def neval(expression, globals=None, locals=None, **kwargs):
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from ast import fix_missing_locations as fml
try:
transformer = kwargs['transformer']
except KeyError:
from napi.transformers import NapiTransformer as transformer
#try:
node = parse(expression, '<string>', 'eval')
#except ImportError:
# builtins.eval(expression)
#else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = transformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'eval')
return builtins.eval(code, globals, locals) | Evaluate *expression* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *expression* is transformed using
:class:`.NapiTransformer`. |
def nexec(statement, globals=None, locals=None, **kwargs):
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from napi.transformers import NapiTransformer
from ast import fix_missing_locations as fml
try:
node = parse(statement, '<string>', 'exec')
except ImportError:#KeyError:
exec(statement)
else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = NapiTransformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'exec')
return builtins.eval(code, globals, locals) | Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`. |
def cli(ctx, oldversion):
# print ctx.bubble
path = ctx.home
bubble_file_name = path + '/.bubble'
config_file = path + '/config/config.yaml'
if file_exists(bubble_file_name):
pass
else:
with open(bubble_file_name, 'w') as dot_bubble:
dot_bubble.write('bubble=' + metadata.version)
dot_bubble.write('\nconfig=' + config_file)
ctx.say_green('Initialised a new bubble in [%s]' %
click.format_filename(bubble_file_name))
create_dir(ctx, path + '/config/')
create_dir(ctx, path + '/logs/')
create_dir(ctx, path + '/export/')
create_dir(ctx, path + '/import/')
create_dir(ctx, path + '/remember/')
create_dir(ctx, path + '/remember/archive')
rules_file = path + '/config/rules.bubble'
if file_exists(bubble_file_name):
pass
else:
with open(rules_file, 'w') as rules:
rules.write(get_example_rules_bubble())
ctx.say_green('Created an example rules in [%s]' %
click.format_filename(rules_file))
rule_functions_file = path + '/custom_rule_functions.py'
if file_exists(rule_functions_file):
pass
else:
with open(rule_functions_file, 'w') as rule_functions:
rule_functions.write(get_example_rule_functions())
ctx.say_green('Created an example rule_functions in [%s]' %
click.format_filename(rule_functions_file))
ctx.say_green('Bubble upgraded') | Upgrade the current bubble, should mimic init as much as possible(experimental) |
def _list_remote(store, maildir, verbose=False):
# This command produces a list of all files in the maildir like:
# base-filename timestamp container-directory
command = """echo {maildir}/{{cur,new}} | tr ' ' '\\n' | while read path ; do ls -1Ugo --time-style=+%s $path | sed -rne "s|[a-zA-Z-]+[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+([0-9]+)[ \t]+([0-9]+\\.[A-Za-z0-9]+)(\\.([.A-Za-z0-9-]+))*(:[2],([PRSTDF]*))*|\\2 \\1 $path|p";done"""
stdout = store.cmd(command, verbose)
lines = stdout.split("\n")
for line in lines:
parts = line.split(" ")
if len(parts) >= 3:
yield parts[0:3] | List the a maildir.
store is an abstract representation of the source maildir.
maildir is the local maildir to which mail will be pulled.
This is a generator for a reason. Because of the way ssh
multi-mastering works a single open TCP connection allows multiple
virtual ssh connections. So the encryption and tcp only has to be
done once.
If this command returned a list then the ssh list command would
have finished and the ssh connection for each message would have
to be made again. |
def sshpull(host, maildir, localmaildir, noop=False, verbose=False, filterfile=None):
store = _SSHStore(host, maildir)
_pull(store, localmaildir, noop, verbose, filterfile) | Pull a remote maildir to the local one. |
def filepull(maildir, localmaildir, noop=False, verbose=False, filterfile=None):
store = _Store(maildir)
_pull(store, localmaildir, noop, verbose, filterfile) | Pull one local maildir into another.
The source need not be an md folder (it need not have a store). In
this case filepull is kind of an import. |
def _filter(msgdata, mailparser, mdfolder, mailfilters):
if mailfilters:
for f in mailfilters:
msg = mailparser.parse(StringIO(msgdata))
rule = f(msg, folder=mdfolder)
if rule:
yield rule
return | Filter msgdata by mailfilters |
def cmd(self, cmd, verbose=False):
command = cmd.format(maildir=self.directory)
if verbose:
print(command)
p = Popen([
"ssh",
"-T",
self.host,
command
], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout,stderr = p.communicate()
return stdout | Executes the specified command on the remote host.
The cmd must be format safe, this means { and } must be doubled, thusly:
echo /var/local/maildir/{{cur,new}}
the cmd can include the format word 'maildir' to be replaced
by self.directory. eg:
echo {maildir}/{{cur,new}} |
def fetch_result(self):
results = self.soup.find_all('div', {'class': 'container container-small'})
href = None
is_match = False
i = 0
while i < len(results) and not is_match:
result = results[i]
anchor = result.find('a', {'rel': 'bookmark'})
is_match = self._filter_results(result, anchor)
href = anchor['href']
i += 1
try:
page = get_soup(href)
except (Exception):
page = None
# Return page if search is successful
if href and page:
return page
else:
raise PageNotFoundError(PAGE_ERROR) | Return a list of urls for each search result. |
def _filter_results(self, result, anchor):
valid = True
try:
cat_tag = result.find('a', {'rel': 'category tag'}).string
title = anchor.string.lower()
date_tag = result.find('time').string
except (AttributeError, TypeError):
return False
if cat_tag != "Daily Ratings":
valid = False
if not date_in_range(self.date, date_tag, 5):
valid = False
if self.category == 'cable' and 'cable' not in title:
valid = False
elif self.category != 'cable' and 'cable' in title:
valid = False
return valid | Filter search results by checking category titles and dates |
def _build_url(self):
url_params = [
BASE_URL, self.category + ' ratings', self.day, self.year, self.month
]
return SEARCH_URL.format(*url_params) | Build url based on searching by date or by show. |
def _assert_category(self, category):
category = category.lower()
valid_categories = ['cable', 'broadcast', 'final', 'tv']
assert_msg = "%s is not a valid category." % (category)
assert (category in valid_categories), assert_msg | Validate category argument |
def _get_response(self, url, **params):
data = urlencode(params)
url = "%s?%s" % (url, data)
headers = {'User-Agent': self.get_random_agent()}
request = Request(url, headers=headers, method='GET')
def open_request(request, attempts, err=None):
if attempts > self.request_attempts:
raise
attempts += 1
try:
with urlopen(request, timeout=self.timeout) as response:
return response.read()
except HTTPError as err:
if err.getcode() < 500:
raise
print("HTTPError occurred while trying to request the url "
"%s. %s. Trying again in %s seconds..." % (url, err,
self.seconds_between_attempts))
time.sleep(self.seconds_between_attempts)
return open_request(request, attempts, err)
attempts = 0
self.last_response = open_request(request, attempts)
return self.last_response | Giving a service path and optional specific arguments, returns
the response string. |
def get_response(self, path, **params):
url = "%s%s" % (self.base_url, path)
return self._get_response(url, **params) | Giving a service path and optional specific arguments, returns
the response string. |
def get_data(self, path, **params):
xml = self.get_response(path, **params)
try:
return parse(xml)
except Exception as err:
print(path)
print(params)
print(err)
raise | Giving a service path and optional specific arguments, returns
the XML data from the API parsed as a dict structure. |
def run(self, port): # pragma: no coverage
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(self)
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start() | Run on given port. Parse standard options and start the http server. |
def log_request(self, handler):
packet = {'method': handler.request.method,
'uri': handler.request.uri,
'remote_ip': handler.request.remote_ip,
'status': handler.get_status(),
'request_time_ms': handler.request.request_time() * 1000.0,
'service_id': self.service_id,
'request_id': handler.request.headers.get(REQUEST_ID_HEADER,
'undefined')
}
# handler can optionally define additional data to log
if hasattr(handler, 'logvalues'):
for key, value in handler.logvalues.iteritems():
packet[key] = value
servicelog.log(packet)
metric = "requests." + str(handler.get_status())
metrics.timing(metric, handler.request.request_time() * 1000.0)
super(LoggingApplication, self).log_request(handler) | Override base method to log requests to JSON UDP collector and emit
a metric. |
def logvalue(self, key, value):
if not hasattr(self, 'logvalues'):
self.logvalues = {}
self.logvalues[key] = value | Add log entry to request log info |
def write_error(self, status_code, **kwargs):
message = default_message = httplib.responses.get(status_code, '')
# HTTPError exceptions may have a log_message attribute
if 'exc_info' in kwargs:
(_, exc, _) = kwargs['exc_info']
if hasattr(exc, 'log_message'):
message = str(exc.log_message) or default_message
self.logvalue('halt_reason', message)
title = "{}: {}".format(status_code, default_message)
body = "{}: {}".format(status_code, message)
self.finish("<html><title>" + title + "</title>"
"<body>" + body + "</body></html>") | Log halt_reason in service log and output error page |
def timeit(self, metric, func, *args, **kwargs):
return metrics.timeit(metric, func, *args, **kwargs) | Time execution of callable and emit metric then return result. |
def require_content_type(self, content_type):
if self.request.headers.get('content-type', '') != content_type:
self.halt(400, 'Content type must be ' + content_type) | Raises a 400 if request content type is not as specified. |
def set_headers(self, headers):
for (header, value) in headers.iteritems():
self.set_header(header, value) | Set headers |
def _ensure_request_id_header(self):
"Ensure request headers have a request ID. Set one if needed."
if REQUEST_ID_HEADER not in self.request.headers:
self.request.headers.add(REQUEST_ID_HEADER, uuid.uuid1().hexf _ensure_request_id_header(self):
"Ensure request headers have a request ID. Set one if needed."
if REQUEST_ID_HEADER not in self.request.headers:
self.request.headers.add(REQUEST_ID_HEADER, uuid.uuid1().hex) | Ensure request headers have a request ID. Set one if needed. |
def load_parameters(self, source):
with open(source) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
for k, v in loaded.items():
if isinstance(v, str):
loaded[k] = "'"+v+"'"
return loaded | For YML, the source it the file path |
def load_config(self, config_source, parameters_source):
with open(config_source) as config_source:
config_raw = config_source.read()
parameters = {}
"""Parameteres from file"""
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if params is not None:
parameters.update(params)
"""Overwrite parameteres with the environment variables"""
env_params = {}
env_params.update(os.environ)
for k, v in env_params.items():
if is_string(v):
env_params[k] = "'" + v + "'"
parameters.update(env_params)
"""Replace the parameters"""
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return final_configuration if final_configuration is not None else {} | For YML, the source it the file path |
def load_parameters(self, source):
with open(source) as parameters_source:
return json.loads(parameters_source.read()) | For JSON, the source it the file path |
def load_config(self, config_source, parameters_source):
with open(config_source) as config_source:
config_raw = config_source.read()
"""Replace the parameters"""
pattern = "(%[a-zA-Z_0-9]*%)"
self.parameters = {}
"""Parameteres from file"""
if os.path.isfile(parameters_source):
self.parameters.update(self.load_parameters(parameters_source))
"""Overwrite parameteres with the environment variables"""
self.parameters.update(os.environ)
replaced_config = re.sub(pattern=pattern, repl=self._replace_function, string=config_raw)
return json.loads(replaced_config) | For JSON, the source it the file path |
def main():
if len(argv) < 2:
targetfile = 'target.y'
else:
targetfile = argv[1]
print 'Parsing ruleset: ' + targetfile,
flex_a = Flexparser()
mma = flex_a.yyparse(targetfile)
print 'OK'
print 'Perform minimization on initial automaton:',
mma.minimize()
print 'OK'
print 'Perform StateRemoval on minimal automaton:',
state_removal = StateRemoval(mma)
mma_regex = state_removal.get_regex()
print mma_regex | Testing function for DFA _Brzozowski Operation |
def _state_removal_init(self):
# First, we remove all multi-edges:
for state_i in self.mma.states:
for state_j in self.mma.states:
if state_i.stateid == state_j.stateid:
self.l_transitions[
state_i.stateid, state_j.stateid] = self.epsilon
else:
self.l_transitions[
state_i.stateid, state_j.stateid] = self.empty
for arc in state_i.arcs:
if arc.nextstate == state_j.stateid:
if self.l_transitions[state_i.stateid, state_j.stateid] != self.empty:
self.l_transitions[state_i.stateid, state_j.stateid] \
+= self.mma.isyms.find(arc.ilabel)
else:
self.l_transitions[state_i.stateid, state_j.stateid] = \
self.mma.isyms.find(arc.ilabel) | State Removal Operation Initialization |
def _state_removal_solve(self):
initial = sorted(
self.mma.states,
key=attrgetter('initial'),
reverse=True)[0].stateid
for state_k in self.mma.states:
if state_k.final:
continue
if state_k.stateid == initial:
continue
self._state_removal_remove(state_k.stateid)
print self.l_transitions
return self.l_transitions | The State Removal Operation |
def save(self, *args, **kwargs):
stripped_name = ' '.join(
w for w in self.organization.name.split()
if w not in STOPWORDS
)
if not self.slug:
self.slug = uuslug(
stripped_name,
instance=self,
max_length=100,
separator='-',
start_no=2
)
self.uid = '{}_body:{}'.format(
self.jurisdiction.uid, slugify(stripped_name))
super(Body, self).save(*args, **kwargs) | **uid**: :code:`{jurisdiction.uid}_body:{slug}` |
def _set_elangles(self):
elang_list = list(self.attr_gen('elangle'))
try:
elevation_angles = sorted(zip(*elang_list)[1])
n_elangles = len(elevation_angles)
self.elangles = dict(zip(list(string.ascii_uppercase[:n_elangles]), elevation_angles))
except IndexError:
self.elangles = {} | Sets the values of instance variable elangles.
Method creates a dictionary containing the elangles of the pvol file.
Elangles are ordered in acending order using uppercase letters as keys
Examples
--------
>>> pvol = OdimPVOL('pvol.h5')
>>> print(pvol.elangles)
{'A': 0.5, 'C': 1.5, 'B': 0.69999999999999996, 'E': 5.0, 'D': 3.0} |
def select_dataset(self, elangle, quantity):
elangle_path = None
try:
search_results = self.search('elangle', self.elangles[elangle])
except KeyError:
return None
if search_results == []:
print('Elevation angle {} is not found from file'.format(elangle))
print('File contains elevation angles:{}'.format(self.elangles))
else:
elangle_path = search_results[0]
if elangle_path is not None:
dataset_root = re.search( '^/dataset[0-9]+/', elangle_path).group(0)
quantity_path = None
search_results = self.search('quantity', quantity)
for path in search_results:
if dataset_root in path:
quantity_path = path
break
if quantity_path is not None:
dataset_path = re.search('^/dataset[0-9]+/data[0-9]/', quantity_path).group(0)
dataset_path = os.path.join(dataset_path, 'data')
if isinstance(self[dataset_path], h5py.Dataset):
self.dataset = self[dataset_path].ref
return dataset_path | Selects the matching dataset and returns its path.
Parameters
----------
elangle : str
Upper case ascii letter defining the elevation angle
quantity : str
Name of the quantity e.g. DBZH, VRAD, RHOHV...
Returns
-------
dataset : str
Path of the matching dataset or None if no dataset is found.
Examples
--------
Get the hdf5 path of the DBZH dataset at lowest elevation angle
>>> pvol = odimPVOL('pvol.h5')
>>> dataset = pvol.select_dataset('A', 'DBZH')
>>> print(dataset)
'/dataset1/data1/data' |
def get(self, url, headers={}, retry=True):
return self.request(url=url, method="GET", headers=headers,
retry=retry) | Execute an HTTP GET request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True. |
def post(self, url, body="", headers={}, retry=True):
headers["Content-Length"] = str(len(body))
return self.request(url=url, method="POST", body=body, headers=headers,
retry=retry) | Execute an HTTP POST request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True. |
def patch(self, url, body="", headers={}, retry=True):
return self.request(url=url, method="PATCH", body=body, headers=headers,
retry=retry) | Execute an HTTP PATCH request and return a dict containing the
response and the response status code.
Keyword arguments:
url -- The path to execute the result against, not including the API
version or project ID, with no leading /. Required.
body -- A string or file object to send as the body of the request.
Defaults to an empty string.
headers -- HTTP Headers to send with the request. Can overwrite the
defaults. Defaults to {}.
retry -- Whether exponential backoff should be employed. Defaults
to True. |
def clone(cls, srcpath, destpath):
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo | Copy a main repository to a new location. |
def create(cls, path):
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path) | Create a new repository |
def proplist(self, rev, path=None):
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path) | List Subversion properties of the path |
def propget(self, prop, rev, path=None):
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path) | Get Subversion property value of the path |
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) | Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments. |
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr) | Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments. |
def temp_file(
content=None,
suffix='',
prefix='tmp',
parent_dir=None):
binary = isinstance(content, (bytes, bytearray))
parent_dir = parent_dir if parent_dir is None else str(parent_dir)
fd, abs_path = tempfile.mkstemp(suffix, prefix, parent_dir, text=False)
path = pathlib.Path(abs_path)
try:
try:
if content:
os.write(fd, content if binary else content.encode())
finally:
os.close(fd)
yield path.resolve()
finally:
with temporary.util.allow_missing_file():
path.unlink() | Create a temporary file and optionally populate it with content. The file
is deleted when the context exits.
The temporary file is created when entering the context manager and
deleted when exiting it.
>>> import temporary
>>> with temporary.temp_file() as temp_file:
... assert temp_file.exists()
>>> assert not temp_file.exists()
The user may also supply some content for the file to be populated with:
>>> with temporary.temp_file('hello!') as temp_file:
... with temp_file.open() as f:
... assert f.read() == 'hello!'
The temporary file can be placed in a custom directory:
>>> with temporary.temp_dir() as temp_dir:
... with temporary.temp_file(parent_dir=temp_dir) as temp_file:
... assert temp_file.parent == temp_dir
If, for some reason, the user wants to delete the temporary file before
exiting the context, that's okay too:
>>> with temporary.temp_file() as temp_file:
... temp_file.unlink() |
def _emit_no_set_found(environment_name, product_name):
sys.stdout.write(colorama.Fore.YELLOW + 'No connections found in global config file '
'in environment: {0} for product: {1}'
.format(environment_name, product_name) +
colorama.Fore.RESET)
sys.stdout.write('\n')
logger.warning('No connections found in environment: {0} for product: {1}'
.format(environment_name, product_name)) | writes to std out and logs if no connection string is found for deployment
:param environment_name:
:param product_name:
:return: |
def load_content(self):
# get the toc file from the root file
rel_path = self.root_file_url.replace(os.path.basename(self.root_file_url), '')
self.toc_file_url = rel_path + self.root_file.find(id="ncx")['href']
self.toc_file_soup = bs(self.book_file.read(self.toc_file_url), 'xml')
# get the book content from the toc file
for n, c in cross(self.toc_file_soup.find_all('navLabel'), self.toc_file_soup.find_all('content')):
content_soup = bs(self.book_file.read(rel_path + c.get('src')))
self.content.append({'part_name': c.text,
'source_url': c.get('src'),
'content_source': content_soup,
'content_source_body': content_soup.body,
'content_source_text': content_soup.body.text}) | Load the book content |
def UninstallTrump(RemoveDataTables=True, RemoveOverrides=True, RemoveFailsafes=True):
ts = ['_symbols', '_symbol_validity', '_symbol_tags', '_symbol_aliases',
'_feeds', '_feed_munging', '_feed_munging_args', '_feed_sourcing',
'_feed_validity', '_feed_meta', '_feed_tags', '_feed_handle',
'_index_kwargs', '_indicies', '_symbol_handle', '_symboldatadef']
if RemoveOverrides:
ts.append('_overrides')
if RemoveFailsafes:
ts.append('_failsafes')
engine = create_engine(ENGINE_STR)
if RemoveDataTables:
results = engine.execute("SELECT name FROM _symbols;")
datatables = [row['name'] for row in results]
ts = ts + datatables
drops = "".join(['DROP TABLE IF EXISTS "{}" CASCADE;'.format(t) for t in ts])
engine.execute(drops) | This script removes all tables associated with Trump.
It's written for PostgreSQL, but should be very easy to adapt to other
databases. |
def calcPeptideMass(peptide, **kwargs):
aaMass = kwargs.get('aaMass', maspy.constants.aaMass)
aaModMass = kwargs.get('aaModMass', maspy.constants.aaModMass)
elementMass = kwargs.get('elementMass', pyteomics.mass.nist_mass)
addModMass = float()
unmodPeptide = peptide
for modId, modMass in viewitems(aaModMass):
modSymbol = '[' + modId + ']'
numMod = peptide.count(modSymbol)
if numMod > 0:
unmodPeptide = unmodPeptide.replace(modSymbol, '')
addModMass += modMass * numMod
if unmodPeptide.find('[') != -1:
print(unmodPeptide)
raise Exception('The peptide contains modification, ' +
'not present in maspy.constants.aaModMass'
)
unmodPeptideMass = sum(aaMass[i] for i in unmodPeptide)
unmodPeptideMass += elementMass['H'][0][0]*2 + elementMass['O'][0][0]
modPeptideMass = unmodPeptideMass + addModMass
return modPeptideMass | Calculate the mass of a peptide.
:param aaMass: A dictionary with the monoisotopic masses of amino acid
residues, by default :attr:`maspy.constants.aaMass`
:param aaModMass: A dictionary with the monoisotopic mass changes of
modications, by default :attr:`maspy.constants.aaModMass`
:param elementMass: A dictionary with the masses of chemical elements, by
default ``pyteomics.mass.nist_mass``
:param peptide: peptide sequence, modifications have to be written in the
format "[modificationId]" and "modificationId" has to be present in
:attr:`maspy.constants.aaModMass`
#TODO: change to a more efficient way of calculating the modified mass, by
first extracting all present modifications and then looking up their masses. |
def removeModifications(peptide):
while peptide.find('[') != -1:
peptide = peptide.split('[', 1)[0] + peptide.split(']', 1)[1]
return peptide | Removes all modifications from a peptide string and return the plain
amino acid sequence.
:param peptide: peptide sequence, modifications have to be written in the
format "[modificationName]"
:param peptide: str
:returns: amino acid sequence of ``peptide`` without any modifications |
def returnModPositions(peptide, indexStart=1, removeModString='UNIMOD:'):
unidmodPositionDict = dict()
while peptide.find('[') != -1:
currModification = peptide.split('[')[1].split(']')[0]
currPosition = peptide.find('[') - 1
if currPosition == -1: # move n-terminal modifications to first position
currPosition = 0
currPosition += indexStart
peptide = peptide.replace('['+currModification+']', '', 1)
if removeModString:
currModification = currModification.replace(removeModString, '')
unidmodPositionDict.setdefault(currModification,list())
unidmodPositionDict[currModification].append(currPosition)
return unidmodPositionDict | Determines the amino acid positions of all present modifications.
:param peptide: peptide sequence, modifications have to be written in the
format "[modificationName]"
:param indexStart: returned amino acids positions of the peptide start with
this number (first amino acid position = indexStart)
:param removeModString: string to remove from the returned modification name
:return: {modificationName:[position1, position2, ...], ...}
#TODO: adapt removeModString to the new unimod ids in
#maspy.constants.aaModComp ("UNIMOD:X" -> "u:X") -> also change unit tests. |
def calcMhFromMz(mz, charge):
mh = (mz * charge) - (maspy.constants.atomicMassProton * (charge-1) )
return mh | Calculate the MH+ value from mz and charge.
:param mz: float, mass to charge ratio (Dalton / charge)
:param charge: int, charge state
:returns: mass to charge ratio of the mono protonated ion (charge = 1) |
def calcMzFromMh(mh, charge):
mz = (mh + (maspy.constants.atomicMassProton * (charge-1))) / charge
return mz | Calculate the mz value from MH+ and charge.
:param mh: float, mass to charge ratio (Dalton / charge) of the mono
protonated ion
:param charge: int, charge state
:returns: mass to charge ratio of the specified charge state |
def calcMzFromMass(mass, charge):
mz = (mass + (maspy.constants.atomicMassProton * charge)) / charge
return mz | Calculate the mz value of a peptide from its mass and charge.
:param mass: float, exact non protonated mass
:param charge: int, charge state
:returns: mass to charge ratio of the specified charge state |
def calcMassFromMz(mz, charge):
mass = (mz - maspy.constants.atomicMassProton) * charge
return mass | Calculate the mass of a peptide from its mz and charge.
:param mz: float, mass to charge ratio (Dalton / charge)
:param charge: int, charge state
:returns: non protonated mass (charge = 0) |
def execute(self, processProtocol, command, env={},
path=None, uid=None, gid=None, usePTY=0, childFDs=None):
sshCommand = (command if isinstance(command, SSHCommand)
else SSHCommand(command, self.precursor, path))
commandLine = sshCommand.getCommandLine()
# Get connection to ssh server
connectionDeferred = self.getConnection(uid)
# spawn the remote process
connectionDeferred.addCallback(connectProcess, processProtocol,
commandLine, env, usePTY, childFDs)
return connectionDeferred | Execute a process on the remote machine using SSH
@param processProtocol: the ProcessProtocol instance to connect
@param executable: the executable program to run
@param args: the arguments to pass to the process
@param env: environment variables to request the remote ssh server to set
@param path: the remote path to start the remote process on
@param uid: user id or username to connect to the ssh server with
@param gid: this is not used for remote ssh processes
@param usePTY: wither to request a pty for the process
@param childFDs: file descriptors to use for stdin, stdout and stderr |
def _getUserAuthObject(self, user, connection):
credentials = self._getCredentials(user)
userAuthObject = AutomaticUserAuthClient(user, connection, **credentials)
return userAuthObject | Get a SSHUserAuthClient object to use for authentication
@param user: The username to authenticate for
@param connection: The connection service to start after authentication |
def _verifyHostKey(self, hostKey, fingerprint):
if fingerprint in self.knownHosts:
return defer.succeed(True)
return defer.fail(UnknownHostKey(hostKey, fingerprint)) | Called when ssh transport requests us to verify a given host key.
Return a deferred that callback if we accept the key or errback if we
decide to reject it. |
def yield_once(iterator):
@wraps(iterator)
def yield_once_generator(*args, **kwargs):
yielded = set()
for item in iterator(*args, **kwargs):
if item not in yielded:
yielded.add(item)
yield item
return yield_once_generator | Decorator to make an iterator returned by a method yield each result only
once.
>>> @yield_once
... def generate_list(foo):
... return foo
>>> list(generate_list([1, 2, 1]))
[1, 2]
:param iterator: Any method that returns an iterator
:return: An method returning an iterator
that yields every result only once at most. |
def _to_list(var):
if isinstance(var, list):
return var
elif var is None:
return []
elif isinstance(var, str) or isinstance(var, dict):
# We dont want to make a list out of those via the default constructor
return [var]
else:
try:
return list(var)
except TypeError:
return [var] | Make variable to list.
>>> _to_list(None)
[]
>>> _to_list('whee')
['whee']
>>> _to_list([None])
[None]
>>> _to_list((1, 2, 3))
[1, 2, 3]
:param var: variable of any type
:return: list |
def arguments_to_lists(function):
def l_function(*args, **kwargs):
l_args = [_to_list(arg) for arg in args]
l_kwargs = {}
for key, value in kwargs.items():
l_kwargs[key] = _to_list(value)
return function(*l_args, **l_kwargs)
return l_function | Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters |
def get_public_members(obj):
return {attr: getattr(obj, attr) for attr in dir(obj)
if not attr.startswith("_")
and not hasattr(getattr(obj, attr), '__call__')} | Retrieves a list of member-like objects (members or properties) that are
publically exposed.
:param obj: The object to probe.
:return: A list of strings. |
def generate_eq(*members):
def decorator(cls):
def eq(self, other):
if not isinstance(other, cls):
return False
return all(getattr(self, member) == getattr(other, member)
for member in members)
def ne(self, other):
return not eq(self, other)
cls.__eq__ = eq
cls.__ne__ = ne
return cls
return decorator | Decorator that generates equality and inequality operators for the
decorated class. The given members as well as the type of self and other
will be taken into account.
Note that this decorator modifies the given class in place!
:param members: A list of members to compare for equality. |
def generate_ordering(*members):
def decorator(cls):
def lt(self, other):
if not isinstance(other, cls):
raise TypeError("Comparison with unrelated classes is "
"unsupported.")
for member in members:
if getattr(self, member) == getattr(other, member):
continue
if (
getattr(self, member) is None or
getattr(other, member) is None):
return getattr(self, member) is None
return getattr(self, member) < getattr(other, member)
return False
cls.__lt__ = lt
return total_ordering(generate_eq(*members)(cls))
return decorator | Decorator that generates ordering operators for the decorated class based
on the given member names. All ordering except equality functions will
raise a TypeError when a comparison with an unrelated class is attempted.
(Comparisons with child classes will thus work fine with the capabilities
of the base class as python will choose the base classes comparison
operator in that case.)
Note that this decorator modifies the given class in place!
:param members: A list of members to compare, ordered from high priority to
low. I.e. if the first member is equal the second will be
taken for comparison and so on. If a member is None it is
considered smaller than any other value except None. |
def enforce_signature(function):
argspec = inspect.getfullargspec(function)
annotations = argspec.annotations
argnames = argspec.args
unnamed_annotations = {}
for i, arg in enumerate(argnames):
if arg in annotations:
unnamed_annotations[i] = (annotations[arg], arg)
def decorated(*args, **kwargs):
for i, annotation in unnamed_annotations.items():
if i < len(args):
assert_right_type(args[i], annotation[0], annotation[1])
for argname, argval in kwargs.items():
if argname in annotations:
assert_right_type(argval, annotations[argname], argname)
return function(*args, **kwargs)
return decorated | Enforces the signature of the function by throwing TypeError's if invalid
arguments are provided. The return value is not checked.
You can annotate any parameter of your function with the desired type or a
tuple of allowed types. If you annotate the function with a value, this
value only will be allowed (useful especially for None). Example:
>>> @enforce_signature
... def test(arg: bool, another: (int, None)):
... pass
...
>>> test(True, 5)
>>> test(True, None)
Any string value for any parameter e.g. would then trigger a TypeError.
:param function: The function to check. |
def as_string(self):
if self.headers_only:
self.msgobj = self._get_content()
# We could just use msgobj.as_string() but this is more flexible... we might need it.
from email.generator import Generator
fp = StringIO()
g = Generator(fp, maxheaderlen=60)
g.flatten(self.msgobj)
text = fp.getvalue()
return text | Get the underlying message object as a string |
def iteritems(self):
for n,v in self.msgobj.__dict__["_headers"]:
yield n.lower(), v
return | Present the email headers |
def _set_flag(self, flag):
self.folder._invalidate_cache()
# TODO::: turn the flag off when it's already on
def replacer(m):
return "%s/%s.%s%s" % (
joinpath(self.folder.base, self.folder.folder, "cur"),
m.group("key"),
m.group("hostname"),
":2,%s" % (
"%s%s" % (m.group("flags"), flag) if m.group("flags") \
else flag
)
)
newfilename = self.msgpathre.sub(replacer, self.filename)
self.filesystem.rename(self.filename, newfilename)
self.filename = newfilename | Turns the specified flag on |
def _get_message(self, key, since=None):
stored = self.store[key]
if isinstance(stored, dict):
filename = stored["path"]
folder = stored["folder"]
if since and since > 0.0:
st = stat(filename)
if st.st_mtime < since:
return None
stored = MdMessage(
key,
filename = filename,
folder = folder,
filesystem = folder.filesystem
)
self.store[key] = stored
else:
if since and since > 0.0:
st = stat(stored.filename)
if st.st_mtime < since:
return None
return stored | Return the MdMessage object for the key.
The object is either returned from the cache in the store or
made, cached and then returned.
If 'since' is passed in the modification time of the file is
checked and the message is only returned if the mtime is since
the specified time.
If the 'since' check fails, None is returned.
'since' must be seconds since epoch. |
def _foldername(self, additionalpath=""):
if not self._foldername_cache.get(additionalpath):
fn = joinpath(self.base, self.folder, additionalpath) \
if not self.is_subfolder \
else joinpath(self.base, ".%s" % self.folder, additionalpath)
self._foldername_cache[additionalpath] = fn
return self._foldername_cache[additionalpath] | Dot decorate a folder name. |
def folders(self):
entrys = self.filesystem.listdir(abspath(self._foldername()))
regex = re.compile("\\..*")
just_dirs = dict([(d,d) for d in entrys if regex.match(d)])
folder = self._foldername()
filesystem = self.filesystem
class FolderList(object):
def __iter__(self):
dirs = list(just_dirs.keys())
dirs.sort()
dirs.reverse()
for dn in dirs:
yield MdFolder(
dn[1:],
base=folder,
subfolder=True,
filesystem=filesystem
)
return
def __list__(self):
return [dn[1:] for dn in just_dirs]
def __contains__(self, name):
return just_dirs.__contains__(".%s" % name)
def __getitem__(self, name):
return MdFolder(
just_dirs[".%s" % name][1:],
base=folder,
subfolder=True,
filesystem=filesystem
)
f = FolderList()
return f | Return a map of the subfolder objects for this folder.
This is a snapshot of the folder list at the time the call was made.
It does not update over time.
The map contains MdFolder objects:
maildir.folders()["Sent"]
might retrieve the folder .Sent from the maildir. |
def move(self, key, folder):
# Basically this is a sophisticated __delitem__
# We need the path so we can make it in the new folder
path, host, flags = self._exists(key)
self._invalidate_cache()
# Now, move the message file to the new folder
newpath = joinpath(
folder.base,
folder.get_name(),
"cur", # we should probably move it to new if it's in new
basename(path)
)
self.filesystem.rename(path, newpath)
# And update the caches in the new folder
folder._invalidate_cache() | Move the specified key to folder.
folder must be an MdFolder instance. MdFolders can be obtained
through the 'folders' method call. |
def _muaprocessnew(self):
foldername = self._foldername("new")
files = self.filesystem.listdir(foldername)
for filename in files:
if filename == "":
continue
curfilename = self._foldername(joinpath("new", filename))
newfilename = joinpath(
self._cur,
"%s:2,%s" % (filename, "")
)
self.filesystem.rename(curfilename, newfilename) | Moves all 'new' files into cur, correctly flagging |
def _exists(self, key):
filecache, keycache = self._fileslist()
msg = keycache.get(key, None)
if msg:
path = msg.filename
meta = filecache[path]
return path, meta["hostname"], meta.get("flags", "")
raise KeyError("not found %s" % key) | Find a key in a particular section
Searches through all the files and looks for matches with a regex. |
def __get_slice(data, slice_number, axis=0, flipH=False, flipV=False):
if axis == 0:
data2d = data[slice_number, :, :]
elif axis == 1:
data2d = data[:, slice_number, :]
elif axis == 2:
data2d = data[:, :, slice_number]
else:
logger.error("axis number error")
print("axis number error")
return None
if flipV:
if data2d is not None:
data2d = data2d[-1:0:-1,:]
if flipH:
if data2d is not None:
data2d = data2d[:, -1:0:-1]
return data2d | :param data:
:param slice_number:
:param axis:
:param flipV: vertical flip
:param flipH: horizontal flip
:return: |
def __put_slice_in_slim(slim, dataim, sh, i):
a, b = np.unravel_index(int(i), sh)
st0 = int(dataim.shape[0] * a)
st1 = int(dataim.shape[1] * b)
sp0 = int(st0 + dataim.shape[0])
sp1 = int(st1 + dataim.shape[1])
slim[
st0:sp0,
st1:sp1
] = dataim
return slim | put one small slice as a tile in a big image |
def _import_data(data, axis, slice_step, first_slice_offset=0):
try:
import SimpleITK as sitk
if type(data) is sitk.SimpleITK.Image:
data = sitk.GetArrayFromImage(data)
except:
pass
data = __select_slices(data, axis, slice_step, first_slice_offset=first_slice_offset)
return data | import ndarray or SimpleITK data |
def generate_data(shp=[16, 20, 24]):
x = np.ones(shp)
# inserting box
x[4:-4, 6:-2, 1:-6] = -1
x_noisy = x + np.random.normal(0, 0.6, size=x.shape)
return x_noisy | Generating data |
def index_to_coords(index, shape):
'''convert index to coordinates given the shape'''
coords = []
for i in xrange(1, len(shape)):
divisor = int(np.product(shape[i:]))
value = index // divisor
coords.append(value)
index -= value * divisor
coords.append(index)
return tuple(coordsf index_to_coords(index, shape):
'''convert index to coordinates given the shape'''
coords = []
for i in xrange(1, len(shape)):
divisor = int(np.product(shape[i:]))
value = index // divisor
coords.append(value)
index -= value * divisor
coords.append(index)
return tuple(coords) | convert index to coordinates given the shape |
def slices(img, shape=[3, 4]):
sh = np.asarray(shape)
i_max = np.prod(sh)
allimg = np.zeros(img.shape[-2:] * sh)
for i in range(0, i_max):
# i = 0
islice = round((img.shape[0] / float(i_max)) * i)
# print(islice)
imgi = img[islice, :, :]
coords = index_to_coords(i, sh)
aic = np.asarray(img.shape[-2:]) * coords
allimg[aic[0]:aic[0] + imgi.shape[-2], aic[1]:aic[1] + imgi.shape[-1]] = imgi
# plt.imshow(imgi)
# print(imgi.shape)
# print(img.shape)
return allimg | create tiled image with multiple slices
:param img:
:param shape:
:return: |
def sed2(img, contour=None, shape=[3, 4]):
"""
:param img:
:param contour:
:param shape:
:return:
"""
plt.imshow(slices(img, shape), cmap='gray')
if contour is not None:
plt.contour(slices(contour, shape)) | plot tiled image of multiple slices
:param img:
:param contour:
:param shape:
:return: |
def set_window(self, windowC, windowW):
if not (windowW and windowC):
windowW = np.max(self.img) - np.min(self.img)
windowC = (np.max(self.img) + np.min(self.img)) / 2.0
self.imgmax = windowC + (windowW / 2)
self.imgmin = windowC - (windowW / 2)
self.windowC = windowC
self.windowW = windowW | Sets visualization window
:param windowC: window center
:param windowW: window width
:return: |
def rotate_to_zaxis(self, new_zaxis):
img = self._rotate_end(self.img, self.zaxis)
seeds = self._rotate_end(self.seeds, self.zaxis)
contour = self._rotate_end(self.contour, self.zaxis)
# Rotate data in depndecy on zaxispyplot
self.img = self._rotate_start(img, new_zaxis)
self.seeds = self._rotate_start(seeds, new_zaxis)
self.contour = self._rotate_start(contour, new_zaxis)
self.zaxis = new_zaxis
# import ipdb
# ipdb.set_trace()
# self.actual_slice_slider.valmax = self.img.shape[2] - 1
self.actual_slice = 0
self.rotated_back = False
# update slicer
self.fig.delaxes(self.ax_actual_slice)
self.ax_actual_slice.cla()
del(self.actual_slice_slider)
self.fig.add_axes(self.ax_actual_slice)
self.actual_slice_slider = Slider(self.ax_actual_slice, 'Slice', 0,
self.img.shape[2] - 1,
valinit=0)
self.actual_slice_slider.on_changed(self.sliceslider_update)
self.update_slice() | rotate image to selected axis
:param new_zaxis:
:return: |
def __flip(self, sliceimg):
if self.flipH:
sliceimg = sliceimg[:, -1:0:-1]
if self.flipV:
sliceimg = sliceimg [-1:0:-1,:]
return sliceimg | Flip if asked in self.flipV or self.flipH
:param sliceimg: one image slice
:return: flipp |
def on_scroll(self, event):
''' mouse wheel is used for setting slider value'''
if event.button == 'up':
self.next_slice()
if event.button == 'down':
self.prev_slice()
self.actual_slice_slider.set_val(self.actual_slicef on_scroll(self, event):
''' mouse wheel is used for setting slider value'''
if event.button == 'up':
self.next_slice()
if event.button == 'down':
self.prev_slice()
self.actual_slice_slider.set_val(self.actual_slice) | mouse wheel is used for setting slider value |
def on_press(self, event):
'on but-ton press we will see if the mouse is over us and store data'
if event.inaxes != self.ax:
return
# contains, attrd = self.rect.contains(event)
# if not contains: return
# print('event contains', self.rect.xy)
# x0, y0 = self.rect.xy
self.press = [event.xdata], [event.ydata], event.buttof on_press(self, event):
'on but-ton press we will see if the mouse is over us and store data'
if event.inaxes != self.ax:
return
# contains, attrd = self.rect.contains(event)
# if not contains: return
# print('event contains', self.rect.xy)
# x0, y0 = self.rect.xy
self.press = [event.xdata], [event.ydata], event.button | on but-ton press we will see if the mouse is over us and store data |
def on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if self.press is None:
return
if event.inaxes != self.ax:
return
# print(event.inaxes)
x0, y0, btn = self.press
x0.append(event.xdata)
y0.append(event.ydataf on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if self.press is None:
return
if event.inaxes != self.ax:
return
# print(event.inaxes)
x0, y0, btn = self.press
x0.append(event.xdata)
y0.append(event.ydata) | on motion we will move the rect if the mouse is over us |
def on_release(self, event):
'on release we reset the press data'
if self.press is None:
return
# print(self.press)
x0, y0, btn = self.press
if btn == 1:
color = 'r'
elif btn == 2:
color = 'b' # noqa
# plt.axes(self.ax)
# plt.plot(x0, y0)
# button Mapping
btn = self.button_map[btn]
self.set_seeds(y0, x0, self.actual_slice, btn)
# self.fig.canvas.draw()
# pdb.set_trace();
self.press = None
self.update_slice(f on_release(self, event):
'on release we reset the press data'
if self.press is None:
return
# print(self.press)
x0, y0, btn = self.press
if btn == 1:
color = 'r'
elif btn == 2:
color = 'b' # noqa
# plt.axes(self.ax)
# plt.plot(x0, y0)
# button Mapping
btn = self.button_map[btn]
self.set_seeds(y0, x0, self.actual_slice, btn)
# self.fig.canvas.draw()
# pdb.set_trace();
self.press = None
self.update_slice() | on release we reset the press data |
def get_seed_sub(self, label):
sx, sy, sz = np.nonzero(self.seeds == label)
return sx, sy, sz | Return list of all seeds with specific label |
def find(self, instance_id):
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance | find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance |
def create(self, instance, parameters, existing):
if not instance.isProvisioned():
# Set parameters
instance.parameters = parameters
# Existing cluster
if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]):
# We need to use an existing cluster that is not available !
raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER])
elif not existing:
# We need to create a new cluster
# We should not reach this code because the AtlasBroker.provision should
# raise an ErrPlanUnsupported before.
raise NotImplementedError()
result = self.backend.storage.store(instance)
# Provision done
return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED,
"",
str(result))
elif instance.parameters == parameters:
# Identical so nothing to do
return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS,
"",
"duplicate")
else:
# Different parameters ...
raise ErrInstanceAlreadyExists() | Create the instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
existing (bool): Create an instance on an existing Atlas cluster
Returns:
ProvisionedServiceSpec: Status
Raises:
ErrInstanceAlreadyExists: If instance exists but with different parameters
ErrClusterNotFound: Cluster does not exist |
def delete(self, instance):
#TODO: Really drop the database based on a policy set in `instance.parameters`.
#
# We need :
# - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain)
# - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database)
# - credential on the Atlas cluster `instance.get_cluster()` to drop the database
#
self.backend.storage.remove(instance)
return DeprovisionServiceSpec(False, "done") | Delete the instance
Args:
instance (AtlasServiceInstance.Instance): an existing instance
Returns:
DeprovisionServiceSpec: Status |
def push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
hash(item)
heapq.heappush(self._items, itemf push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
hash(item)
heapq.heappush(self._items, item) | Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.