code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,positionf plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position) | Blits a rendered word on to the main display surface |
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return Truf collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True | Returns True if the word collides with another plotted word. |
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surfacf expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface | Makes the cloud surface bigger. Maintains all word positions. |
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt filef smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file' | Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once. |
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
returf display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return | Displays the word cloud to the screen. |
def fermi_dist(energy, beta):
exponent = np.asarray(beta*energy).clip(-600, 600)
return 1./(np.exp(exponent) + 1) | Fermi Dirac distribution |
def diagonalize(operator):
eig_values, eig_vecs = LA.eigh(operator)
emin = np.amin(eig_values)
eig_values -= emin
return eig_values, eig_vecs | diagonalizes single site Spin Hamiltonian |
def gf_lehmann(eig_e, eig_states, d_dag, beta, omega, d=None):
ew = np.exp(-beta*eig_e)
zet = ew.sum()
G = np.zeros_like(omega)
basis_create = np.dot(eig_states.T, d_dag.dot(eig_states))
if d is None:
tmat = np.square(basis_create)
else:
tmat = np.dot(eig_states.T, d.T.dot(eig_states))*basis_create
tmat *= np.add.outer(ew, ew)
gap = np.add.outer(-eig_e, eig_e)
N = eig_e.size
for i, j in product(range(N), range(N)):
G += tmat[i, j] / (omega + gap[i, j])
return G / zet | Outputs the lehmann representation of the greens function
omega has to be given, as matsubara or real frequencies |
def expected_value(operator, eig_values, eig_states, beta):
aux = np.einsum('i,ji,ji', np.exp(-beta*eig_values),
eig_states, operator.dot(eig_states))
return aux / partition_func(beta, eig_values) | Calculates the average value of an observable
it requires that states and operators have the same base |
def create_category(cls, category, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_category_with_http_info(category, **kwargs)
else:
(data) = cls._create_category_with_http_info(category, **kwargs)
return data | Create Category
Create a new Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_category(category, async=True)
>>> result = thread.get()
:param async bool
:param Category category: Attributes of category to create (required)
:return: Category
If the method is called asynchronously,
returns the request thread. |
def delete_category_by_id(cls, category_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_category_by_id_with_http_info(category_id, **kwargs)
else:
(data) = cls._delete_category_by_id_with_http_info(category_id, **kwargs)
return data | Delete Category
Delete an instance of Category by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_category_by_id(category_id, async=True)
>>> result = thread.get()
:param async bool
:param str category_id: ID of category to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_category_by_id(cls, category_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_category_by_id_with_http_info(category_id, **kwargs)
else:
(data) = cls._get_category_by_id_with_http_info(category_id, **kwargs)
return data | Find Category
Return single instance of Category by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_category_by_id(category_id, async=True)
>>> result = thread.get()
:param async bool
:param str category_id: ID of category to return (required)
:return: Category
If the method is called asynchronously,
returns the request thread. |
def list_all_categories(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_categories_with_http_info(**kwargs)
else:
(data) = cls._list_all_categories_with_http_info(**kwargs)
return data | List Categories
Return a list of Categories
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_categories(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Category]
If the method is called asynchronously,
returns the request thread. |
def replace_category_by_id(cls, category_id, category, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_category_by_id_with_http_info(category_id, category, **kwargs)
else:
(data) = cls._replace_category_by_id_with_http_info(category_id, category, **kwargs)
return data | Replace Category
Replace all attributes of Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_category_by_id(category_id, category, async=True)
>>> result = thread.get()
:param async bool
:param str category_id: ID of category to replace (required)
:param Category category: Attributes of category to replace (required)
:return: Category
If the method is called asynchronously,
returns the request thread. |
def update_category_by_id(cls, category_id, category, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_category_by_id_with_http_info(category_id, category, **kwargs)
else:
(data) = cls._update_category_by_id_with_http_info(category_id, category, **kwargs)
return data | Update Category
Update attributes of Category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_category_by_id(category_id, category, async=True)
>>> result = thread.get()
:param async bool
:param str category_id: ID of category to update. (required)
:param Category category: Attributes of category to update. (required)
:return: Category
If the method is called asynchronously,
returns the request thread. |
def get_plain_text(self):
_msg = self.message if self.message is not None else [""]
msg = _msg if isinstance(_msg, list) else [_msg]
line = "" if not self.line else ", line {}".format(self.line)
ret = ["{} found in file '{}'{}::".format(self.type.capitalize(), self.filename, line),
" <<"]+ \
[" "+x for x in msg]+ \
[" >>"]
return ret | Returns a list |
def get_plain_text(self):
ret = []
for occ in self.occurrences:
ret.extend(occ.get_plain_text())
return ret | Returns a list of strings |
def slugify(string):
string = re.sub('[^\w .-]', '', string)
string = string.replace(" ", "-")
return string | Removes non-alpha characters, and converts spaces to hyphens. Useful for making file names.
Source: http://stackoverflow.com/questions/5574042/string-slugification-in-python |
def crunch_dir(name, n=50):
if len(name) > n + 3:
name = "..." + name[-n:]
return name | Puts "..." in the middle of a directory name if lengh > n. |
def add_bits_to_path(path_, filename_prefix=None, extension=None):
dir_, basename = os.path.split(path_)
if filename_prefix:
basename = filename_prefix+basename
if extension:
if not extension.startswith("."):
extension = "."+extension
basename = basename+extension
return os.path.join(dir_, basename) | Adds prefix/suffix to filename
Arguments:
path_ -- path to file
filename_prefix -- prefix to be added to file name
extension -- extension to be added to file name. The dot is automatically added, such as
"ext" and ".ext" will have the same effect
Examples:
> add_bits_to_path("/home/user/file", "prefix-")
/home/user/prefix-file
> add_bits_to_path("/home/user/file", None, ".ext")
/home/user/file.ext
> add_bits_to_path("/home/user/file", None, "ext") # dot in extension is optional
/home/user/file.ext
> add_bits_to_path("/home/user/", None, ".ext")
/home/user/.ext |
def multirow_str_vector(f, n, r=0):
so_far = 0
n_rows = 0
v = []
while True:
temp = str_vector(f)
n_rows += 1
n_now = len(temp)
if n_now+so_far > n:
a99.get_python_logger().warning(('Reading multi-row vector: '
'row %d should have %d values (has %d)') %
(r+n_rows, n-so_far, n_now))
v.extend(temp[:n-so_far])
so_far = n
elif n_now+so_far <= n:
so_far += n_now
v.extend(temp)
if so_far == n:
break
return v, n_rows | Assembles a vector that spans several rows in a text file.
Arguments:
f -- file-like object
n -- number of values expected
r (optional) -- Index of last row read in file (to tell which file row in
case of error)
Returns:
(list-of-strings, number-of-rows-read-from-file) |
def new_filename(prefix, extension=None, flag_minimal=True):
if extension is None:
extension = ""
if len(extension) > 0 and extension[0] == '.':
extension = extension[1:]
# extension-sensitive format for filename
fmt = '{0!s}-{1:04d}.{2!s}' if extension else '{0!s}-{1:04d}'
# Removes tailing dash because it would look funny (but will be re-added in format string)
prefix_ = prefix[:-1] if prefix.endswith("-") else prefix
i = -1
while True:
if i == -1:
if flag_minimal:
ret = "{}.{}".format(prefix_, extension) if extension else prefix_
else:
ret = fmt.format(prefix_, i, extension)
if not os.path.exists(ret):
break
i += 1
if i > 9999:
raise RuntimeError("Could not make a new file name for (prefix='{0!s}', extension='{1!s}')".format(prefix, extension))
return ret | returns a file name that does not exist yet, e.g. prefix.0001.extension
Args:
prefix:
extension: examples: "dat", ".dat" (leading dot will be detected, does not repeat dot in name)
flag_minimal:
- True: will try to be as "clean" as possible
- False: will generate filenames in a simple, same-length pattern
Example: ``new_filename("molecules-", "dat", True)``
In the example above, the first attempt will be "molecules.dat", then "molecules-0000.dat".
If flag_minimal were True, it would skip the first attempt. |
def rename_to_temp(filename):
with _rename_to_temp_lock:
root, ext = os.path.splitext(filename)
if len(ext) > 0:
ext = ext[1:] # the dot (".") is originally included
new_name = new_filename(root, ext)
os.rename(filename, new_name)
return new_name | *Thread-safe* renames file to temporary filename. Returns new name |
def create_symlink(source, link_name):
os_symlink = getattr(os, "symlink", None)
if isinstance(os_symlink, collections.Callable):
os_symlink(source, link_name)
else:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(source) else 0
if csl(link_name, source, flags) == 0:
raise ctypes.WinError() | Creates symbolic link for either operating system.
http://stackoverflow.com/questions/6260149/os-symlink-support-in-windows |
def is_text_file(filepath, blocksize=2**14):
with open(filepath, "rb") as fileobj:
block = fileobj.read(blocksize)
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# an empty file is considered a valid text file
return True
# Use translate's 'deletechars' argument to efficiently remove all
# occurrences of _text_characters from the block
nontext = block.translate(None, _text_characters)
return float(len(nontext)) / len(block) <= 0.30 | Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than some abound of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file. |
def get_path(*args, module=a99):
p = os.path.abspath(os.path.join(os.path.split(module.__file__)[0], *args))
return p | Returns full path to specified module
Args:
*args: are added at the end of module path with os.path.join()
module: Python module, defaults to a99
Returns: path string
>>> get_path() |
def harvest_repo(root_url, archive_path, tag=None, archive_mode='w:gz'):
if not svn_exists():
raise Exception("SVN not found. It probably needs installing.")
clone_path = mkdtemp(dir=cfg['CFG_TMPDIR'])
svn = get_which_svn()
if tag:
call([svn, 'co', root_url + '/tags/' + tag, clone_path])
else:
call([svn, 'co', root_url + '/trunk/', clone_path])
chdir(cfg['CFG_TMPDIR'])
tar = tarfile.open(name=archive_path, mode=archive_mode)
tar.add(clone_path, arcname=root_url.split('/').pop())
tar.close()
try:
rmtree(clone_path)
except OSError as e:
# Reraise unless ENOENT: No such file or directory
# (ok if directory has already been deleted)
if e.errno != errno.ENOENT:
raise | Archives a specific tag in a specific SVN repository.
:param root_url: This is the root url of the repo and should end in the
repo name.
:param archive_path: Where the archive will be stored - Must end in a
valid extension that matches the archive_mode type. Default requires
'tar.gz'
:param tag: This is the tag you want to harvest, None=HEAD
:param archive_mode: See 'tarfile.open' modes default w:gz > tar.gz |
def send_recovery_notice(self, login, app_alias, state=None):
data = {
"login": login,
"appAlias": app_alias,
"state": state,
}
response = self.__app.native_api_call('user-management', 'sendRecoveryNotice', data, self.__options, False, None, False, http_path="/api/meta/v1/", http_method="POST")
return json.loads(response.text) | Выслать письмо о восстановлении пароля |
def load_many(self, fobjs=None):
if fobjs is not None:
# tolerance
if not hasattr(fobjs, "__iter__"):
fobjs = [fobjs]
for index, (fobj, page) in enumerate(zip(fobjs, self.pages)):
if fobj is None:
continue
elif isinstance(fobj, ft.DataFile):
self.load(fobj, index)
elif isinstance(fobj, str):
self.load_filename(fobj, index)
else:
raise TypeError("Invalid object of class '{}'".format(fobj.__class__.__name__)) | Loads as many files as the number of pages
Args:
fobjs: [filename or DataFile obj, ...] |
def _add_log_tab(self):
# text_tab = "Log (Alt+&{})".format(len(self.pages)+1)
text_tab = "Log"
self.pages.append(MyPage(text_tab=text_tab))
# ### Log tab
te = self.textEdit_log = self.keep_ref(QTextEdit())
te.setReadOnly(True)
self.tabWidget.addTab(te, text_tab) | Adds element to pages and new tab |
def load(self, fobj, index=None):
if index is None:
index = self._get_tab_index()
page = self.pages[index]
if fobj is None:
return
if not isinstance(fobj, tuple(page.clss_load)):
raise RuntimeError('Object to load must be in {0!s} (not a {1!s})'.format(
[x.__name__ for x in page.clss_load], fobj.__class__.__name__))
page.editor.load(fobj)
self._update_gui_text_tabs() | Loads given DataFile object. **tolerant with None**
Args:
fobj: object of one of accepted classes
index: tab index to load fobj into. If not passed, loads into current tab |
def load_filename(self, filename, index=None):
filename = str(filename) # QString protection
if index is None:
index = self._get_tab_index()
page = self.pages[index]
# Maybe this is set on purpose before loading attempt to leave new load_dir set (?)
self.load_dir, _ = os.path.split(filename)
clss = page.clss_load
if len(clss) == 1:
# If there is only one class to handle the file, will load it in a way that eventual
# load errors will raise
f = clss[0]()
f.load(filename)
else:
# At the moment, the multi-class alternative will not display particular error information
# if the file does not load
f = f311.load_with_classes(filename, page.clss_load)
if f is None:
raise RuntimeError("Could not load '{0!s}'".format(filename))
self.load(f, index) | Loads file given filename
Args:
filename:
index: tab index to load file into. If not passed, loads into current tab |
def keyPressEvent(self, evt):
incr = 0
if evt.modifiers() == Qt.ControlModifier:
n = self.tabWidget.count()
if evt.key() in [Qt.Key_PageUp, Qt.Key_Backtab]:
incr = -1
elif evt.key() in [Qt.Key_PageDown, Qt.Key_Tab]:
incr = 1
if incr != 0:
new_index = self._get_tab_index() + incr
if new_index < 0:
new_index = n - 1
elif new_index >= n:
new_index = 0
self.tabWidget.setCurrentIndex(new_index) | This handles Ctrl+PageUp, Ctrl+PageDown, Ctrl+Tab, Ctrl+Shift+Tab |
def _on_changed(self):
page = self._get_page()
if not page.flag_autosave:
page.flag_changed = True
self._update_gui_text_tabs() | Slot for changed events |
def _get_tab_description(self):
text = self._get_page().text_tab
if "(" in text:
text = text[:text.index("(") - 1]
text = text[0].lower() + text[1:]
return text | Returns "description" of current tab (tab text without shortcut info). |
def _update_gui_text_tabs(self):
for index, page in enumerate(self.pages):
self.tabWidget.setTabText(index, "{} (Alt+&{}){}".format(page.text_tab, index+1, (" (changed)" if page.flag_changed else ""))) | Iterates through pages to update tab texts |
def __generic_save(self):
page = self._get_page()
f = page.editor.f
if not f:
return True
if not page.editor.flag_valid:
a99.show_error("Cannot save, {0!s} has error(s)!".format(f.description))
return True
if f.filename:
f.save_as()
self.add_log("Saved '{}'".format(f.filename))
page.flag_changed = False
self._update_gui_text_tabs()
if hasattr(page.editor, "update_gui_label_fn"):
page.editor.update_gui_label_fn() # duck typing
return True
else:
return self.__generic_save_as() | Returns False if user has cancelled a "save as" operation, otherwise True. |
def __generic_save_as(self):
page = self._get_page()
if not page.editor.f:
return True
if page.editor.f.filename:
d = page.editor.f.filename
else:
d = os.path.join(self.save_dir if self.save_dir is not None \
else self.load_dir if self.load_dir is not None \
else ".", page.editor.f.default_filename)
new_filename = QFileDialog.getSaveFileName(self, page.make_text_saveas(), d, page.wild)[0]
if new_filename:
self.save_dir, _ = os.path.split(str(new_filename))
page.editor.f.save_as(str(new_filename))
page.flag_changed = False
self._update_gui_text_tabs()
page.editor.update_gui_label_fn()
return True
return False | Returns False if user has cancelled operation, otherwise True. |
def update_config(self, cluster_config, login_config):
oldvalue = self.__update_option(cluster_config, 'ssh_to', 'ssh_to')
if oldvalue:
log.debug("Attribute 'ssh_to' updated: %s -> %s", oldvalue, self.ssh_to) | Update current configuration.
This method is usually called after loading a `Cluster`
instance from a persistent storage. Note that not all fields
are actually updated, but only those that can be safely
updated. |
def remove_node(self, node, stop=False):
if node.kind not in self.nodes:
raise NodeNotFound("Unable to remove node %s: invalid node type `%s`.",
node.name, node.kind)
else:
try:
index = self.nodes[node.kind].index(node)
if self.nodes[node.kind][index]:
del self.nodes[node.kind][index]
if stop:
node.stop()
self._naming_policy.free(node.kind, node.name)
self.repository.save_or_update(self)
except ValueError:
raise NodeNotFound("Node %s not found in cluster" % node.name) | Removes a node from the cluster.
By default, it doesn't also stop the node, just remove from
the known hosts of this cluster.
:param node: node to remove
:type node: :py:class:`Node`
:param stop: Stop the node
:type stop: bool |
def _start_nodes_sequentially(self, nodes):
started_nodes = set()
for node in copy(nodes):
started = self._start_node(node)
if started:
started_nodes.add(node)
# checkpoint cluster state
self.repository.save_or_update(self)
return started_nodes | Start the nodes sequentially without forking.
Return set of nodes that were actually started. |
def _start_nodes_parallel(self, nodes, max_thread_pool_size):
# Create one thread for each node to start
thread_pool_size = min(len(nodes), max_thread_pool_size)
thread_pool = Pool(processes=thread_pool_size)
log.debug("Created pool of %d threads", thread_pool_size)
# pressing Ctrl+C flips this flag, which in turn stops the main loop
# down below
keep_running = True
def sigint_handler(signal, frame):
"""
Makes sure the cluster is saved, before the sigint results in
exiting during node startup.
"""
log.error(
"Interrupted: will save cluster state and exit"
" after all nodes have started.")
keep_running = False
# intercept Ctrl+C
with sighandler(signal.SIGINT, sigint_handler):
result = thread_pool.map_async(self._start_node, nodes)
while not result.ready():
result.wait(1)
# check if Ctrl+C was pressed
if not keep_running:
log.error("Aborting upon user interruption ...")
# FIXME: `.close()` will keep the pool running until all
# nodes have been started; should we use `.terminate()`
# instead to interrupt node creation as soon as possible?
thread_pool.close()
thread_pool.join()
self.repository.save_or_update(self)
# FIXME: should raise an exception instead!
sys.exit(1)
# keep only nodes that were successfully started
return set(node for node, ok
in itertools.izip(nodes, result.get()) if ok) | Start the nodes using a pool of multiprocessing threads for speed-up.
Return set of nodes that were actually started. |
def _start_node(node):
log.debug("_start_node: working on node `%s`", node.name)
# FIXME: the following check is not optimal yet. When a node is still
# in a starting state, it will start another node here, since the
# `is_alive` method will only check for running nodes (see issue #13)
if node.is_alive():
log.info("Not starting node `%s` which is "
"already up&running.", node.name)
return True
else:
try:
node.start()
log.info("Node `%s` has been started.", node.name)
return True
except Exception as err:
log.exception("Could not start node `%s`: %s -- %s",
node.name, err, err.__class__)
return False | Start the given node VM.
:return: bool -- True on success, False otherwise |
def get_all_nodes(self):
nodes = self.nodes.values()
if nodes:
return reduce(operator.add, nodes, list())
else:
return [] | Returns a list of all nodes in this cluster as a mixed list of
different node kinds.
:return: list of :py:class:`Node` |
def get_node_by_name(self, nodename):
nodes = dict((n.name, n) for n in self.get_all_nodes())
try:
return nodes[nodename]
except KeyError:
raise NodeNotFound("Node %s not found" % nodename) | Return the node corresponding with name `nodename`
:params nodename: Name of the node
:type nodename: str |
def get_frontend_node(self):
if self.ssh_to:
if self.ssh_to in self.nodes:
cls = self.nodes[self.ssh_to]
if cls:
return cls[0]
else:
log.warning(
"preferred `ssh_to` `%s` is empty: unable to "
"get the choosen frontend node from that class.",
self.ssh_to)
else:
raise NodeNotFound(
"Invalid ssh_to `%s`. Please check your "
"configuration file." % self.ssh_to)
# If we reach this point, the preferred class was empty. Pick
# one using the default logic.
for cls in sorted(self.nodes.keys()):
if self.nodes[cls]:
return self.nodes[cls][0]
# Uh-oh, no nodes in this cluster.
raise NodeNotFound("Unable to find a valid frontend: "
"cluster has no nodes!") | Returns the first node of the class specified in the
configuration file as `ssh_to`, or the first node of
the first class in alphabetic order.
:return: :py:class:`Node`
:raise: :py:class:`elasticluster.exceptions.NodeNotFound` if no
valid frontend node is found |
def _parse(name):
match = NodeNamingPolicy._NODE_NAME_RE.match(name)
if match:
return match.groupdict()
else:
raise ValueError(
"Cannot parse node name `{name}`"
.format(name=name)) | Return dict of parts forming `name`. Raise `ValueError` if string
`name` cannot be correctly parsed.
The default implementation uses
`NodeNamingPolicy._NODE_NAME_RE` to parse the name back into
constituent parts.
This is ideally the inverse of :meth:`_format` -- it should be
able to parse a node name string into the parameter values
that were used to form it. |
def new(self, kind, **extra):
if self._free[kind]:
index = self._free[kind].pop()
else:
self._top[kind] += 1
index = self._top[kind]
return self._format(self.pattern, kind=kind, index=index, **extra) | Return a host name for a new node of the given kind.
The new name is formed by interpolating ``{}``-format
specifiers in the string given as ``pattern`` argument to the
class constructor. The following names can be used in the
``{}``-format specifiers:
* ``kind`` -- the `kind` argument
* ``index`` -- a positive integer number, garanteed to be unique (per kind)
* any other keyword argument used in the call to :meth:`new`
Example::
>>> p = NodeNamingPolicy(pattern='node-{kind}-{index}{spec}')
>>> p.new('foo', spec='bar')
'node-foo-1bar'
>>> p.new('foo', spec='quux')
'node-foo-2quux' |
def use(self, kind, name):
try:
params = self._parse(name)
index = int(params['index'], 10)
if index in self._free[kind]:
self._free[kind].remove(index)
top = self._top[kind]
if index > top:
self._free[kind].update(range(top + 1, index))
self._top[kind] = index
except ValueError:
log.warning(
"Cannot extract numerical index"
" from node name `%s`!", name) | Mark a node name as used. |
def free(self, kind, name):
try:
params = self._parse(name)
index = int(params['index'], 10)
self._free[kind].add(index)
assert index <= self._top[kind]
if index == self._top[kind]:
self._top[kind] -= 1
except ValueError:
# ignore failures in self._parse()
pass | Mark a node name as no longer in use.
It could thus be recycled to name a new node. |
def is_alive(self):
running = False
if not self.instance_id:
return False
try:
log.debug("Getting information for instance %s",
self.instance_id)
running = self._cloud_provider.is_instance_running(
self.instance_id)
except Exception as ex:
log.debug("Ignoring error while looking for vm id %s: %s",
self.instance_id, str(ex))
if running:
log.debug("node `%s` (instance id %s) is up and running",
self.name, self.instance_id)
self.update_ips()
else:
log.debug("node `%s` (instance id `%s`) still building...",
self.name, self.instance_id)
return running | Checks if the current node is up and running in the cloud. It
only checks the status provided by the cloud interface. Therefore a
node might be running, but not yet ready to ssh into it. |
def update_ips(self):
self.ips = self._cloud_provider.get_ips(self.instance_id)
return self.ips[:] | Retrieves the public and private ip of the instance by using the
cloud provider. In some cases the public ip assignment takes some
time, but this method is non blocking. To check for a public ip,
consider calling this method multiple times during a certain timeout. |
def get_config_obj(filename):
if not filename.startswith("."):
a99.get_python_logger().warning("Configuration filename '{}' does not start with a '.'".format(filename))
path_ = os.path.join(os.path.expanduser("~"), filename)
return AAConfigObj(path_, encoding="UTF8") | Reads/creates filename at user **home** folder and returns a AAConfigObj object |
def _get_section(self, path_):
if isinstance(path_, str):
path_ = path_.strip().split("/")
if path_[0] == "":
path_ = path_[1:]
obj = self
for section_name in path_[:-1]:
try:
obj = obj[section_name]
except KeyError:
obj[section_name] = {} # creates section
obj = obj[section_name]
return obj, path_ | Auto-creates section structure
Last element in path_ is considered to be the "filename" (item name)
Returns: (configobj.Session object, converted path) |
def get_item(self, path_, default):
section, path_ = self._get_section(path_)
key = path_[-1]
if key not in section:
self.set_item(path_, default)
return default
xvalue = section[key]
type_ = type(default)
if type_ in (str, list):
return xvalue
elif type_ == bool:
value = True if xvalue == "True" else False if xvalue == "False" else eval(xvalue)
elif type_ in (int, float):
value = type_(xvalue)
elif default is None:
value = None if xvalue == "None" else eval(xvalue)
else:
raise TypeError("Type not supported: {}".format(type_.__name__))
return value | Return item or default. In the latter, change file to have default.
Arguments:
path_ -- path to item in section/subsection structure. May be either:
["section", "subsection", ...] or
"[/]section/subsection/..." (leading slash is tolerated)
default -- value to return if item is not found
Argument 'default' is also used to determine the type of the data to return:
- str and list: returned as retrieved
- int and float: eval'ed
- bool: parsed |
def set_item(self, path_, value):
section, path_ = self._get_section(path_)
section[path_[-1]] = value
self.write() | Sets item and automatically saves file |
def all_subslices(itr):
assert iterable(itr), 'generators.all_subslices only accepts iterable arguments, not {}'.format(itr)
if not hasattr(itr, '__len__'): # if itr isnt materialized, make it a deque
itr = deque(itr)
len_itr = len(itr)
for start,_ in enumerate(itr):
d = deque()
for i in islice(itr, start, len_itr): # how many slices for this round
d.append(i)
yield tuple(d) | generates every possible slice that can be generated from an iterable |
def just(*args):
''' this works as an infinite loop that yields
the given argument(s) over and over
'''
assert len(args) >= 1, 'generators.just needs at least one arg'
if len(args) == 1: # if only one arg is given
try:
# try to cycle in a set for iteration speedup
return cycle(set(args))
except:
# revert to cycling args as a tuple
return cycle(args)
else:
return cycle({args}f just(*args):
''' this works as an infinite loop that yields
the given argument(s) over and over
'''
assert len(args) >= 1, 'generators.just needs at least one arg'
if len(args) == 1: # if only one arg is given
try:
# try to cycle in a set for iteration speedup
return cycle(set(args))
except:
# revert to cycling args as a tuple
return cycle(args)
else:
return cycle({args}) | this works as an infinite loop that yields
the given argument(s) over and over |
def create_countries_geo_zone(cls, countries_geo_zone, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_countries_geo_zone_with_http_info(countries_geo_zone, **kwargs)
else:
(data) = cls._create_countries_geo_zone_with_http_info(countries_geo_zone, **kwargs)
return data | Create CountriesGeoZone
Create a new CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_countries_geo_zone(countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to create (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread. |
def delete_countries_geo_zone_by_id(cls, countries_geo_zone_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, **kwargs)
else:
(data) = cls._delete_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, **kwargs)
return data | Delete CountriesGeoZone
Delete an instance of CountriesGeoZone by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_countries_geo_zone_by_id(countries_geo_zone_id, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_countries_geo_zone_by_id(cls, countries_geo_zone_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, **kwargs)
else:
(data) = cls._get_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, **kwargs)
return data | Find CountriesGeoZone
Return single instance of CountriesGeoZone by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_countries_geo_zone_by_id(countries_geo_zone_id, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to return (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread. |
def list_all_countries_geo_zones(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_countries_geo_zones_with_http_info(**kwargs)
else:
(data) = cls._list_all_countries_geo_zones_with_http_info(**kwargs)
return data | List CountriesGeoZones
Return a list of CountriesGeoZones
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_countries_geo_zones(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[CountriesGeoZone]
If the method is called asynchronously,
returns the request thread. |
def replace_countries_geo_zone_by_id(cls, countries_geo_zone_id, countries_geo_zone, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
else:
(data) = cls._replace_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
return data | Replace CountriesGeoZone
Replace all attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to replace (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to replace (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread. |
def update_countries_geo_zone_by_id(cls, countries_geo_zone_id, countries_geo_zone, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
else:
(data) = cls._update_countries_geo_zone_by_id_with_http_info(countries_geo_zone_id, countries_geo_zone, **kwargs)
return data | Update CountriesGeoZone
Update attributes of CountriesGeoZone
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_countries_geo_zone_by_id(countries_geo_zone_id, countries_geo_zone, async=True)
>>> result = thread.get()
:param async bool
:param str countries_geo_zone_id: ID of countriesGeoZone to update. (required)
:param CountriesGeoZone countries_geo_zone: Attributes of countriesGeoZone to update. (required)
:return: CountriesGeoZone
If the method is called asynchronously,
returns the request thread. |
def data_read_write(data_path_in, data_path_out, format_type, **kwargs):
if format_type == "dense":
# Set dense defaults
kwargs = _set_dense_defaults_and_eval(kwargs)
# Try to parse non label columns appropriately
try:
nlc = [nm.strip() for nm in kwargs['non_label_cols'].split(",")]
kwargs.pop('non_label_cols', None)
except KeyError:
raise KeyError("'non_label_cols' is a required keyword dense data")
# Read data with dense specific keywords
arch_data = pd.read_csv(data_path_in, sep=kwargs['delimiter'],
na_values=kwargs['na_values'])
form_data = format_dense(arch_data, nlc, **kwargs)
elif format_type == "grid":
pass
elif format_type == "stacked":
pass
elif format_type == "transect":
pass
else:
raise NameError("%s is not a supported data format" % format_type)
form_data.to_csv(data_path_out, index=False) | General function to read, format, and write data.
Parameters
----------
data_path_in : str
Path to the file that will be read
data_path_out : str
Path of the file that will be output
format_type : str
Either 'dense', 'grid', 'columnar', or 'transect'
kwargs
Specific keyword args for given data types. See Notes
Notes
-----
'Dense Parameters'
non_label_cols : str
Comma separated list of non label columns. ex. "lat, long, tree"
sep : str
The delimiter for the dense data. Default, ","
na_values : int, float, str
Value to be labeled as NA. Default, ""
See misc.format_dense() for additional keyword parameters |
def _set_dense_defaults_and_eval(kwargs):
kwargs['delimiter'] = kwargs.get('delimiter', ',')
kwargs['na_values'] = kwargs.get('na_values', '')
kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False)
kwargs['drop_na'] = kwargs.get('drop_na', False)
kwargs['label_col'] = kwargs.get('label_col', 'label')
kwargs['count_col'] = kwargs.get('count_col', 'count')
for key, val in kwargs.iteritems():
try:
kwargs[key] = eval(val)
except:
kwargs[key] = val
return kwargs | Sets default values in kwargs if kwargs are not already given.
Evaluates all values using eval
Parameters
-----------
kwargs : dict
Dictionary of dense specific keyword args
Returns
-------
: dict
Default, evaluated dictionary |
def update_points(self):
n = max(8, min(72, int(2*sqrt(self.r_x+self.r_y))))
d = pi * 2 / n
x, y, r_x, r_y = self.x, self.y, self.r_x, self.r_y
ps = []
for i in range(n):
ps += [(x + r_x * sin(d * i)), (y + r_y * cos(d * i))]
self.points = tuple(ps) | 椭圆的近似图形:72边形 |
def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):
draw_spectra_stacked(ss, title, num_rows, setup)
plt.show() | Plots one or more stacked in subplots sharing same x-axis.
Args:
ss: list of Spectrum objects
title=None: window title
num_rows=None: (optional) number of rows for subplot grid. If not passed,
num_rows will be the number of plots, and the number of columns will be 1.
If passed, number of columns is calculated automatically.
setup: PlotSpectrumSetup object |
def plot_spectra_overlapped(ss, title=None, setup=_default_setup):
plt.figure()
draw_spectra_overlapped(ss, title, setup)
plt.show() | Plots one or more spectra in the same plot.
Args:
ss: list of Spectrum objects
title=None: window title
setup: PlotSpectrumSetup object |
def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):
logger = a99.get_python_logger()
xmin, xmax, ymin_, ymax, xspan, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = len(ss)
a99.format_BLB()
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
for i, s in enumerate(ss):
title = s.title
fig = plt.figure()
plt.plot(s.x, s.y, c=_FAV_COLOR)
if setup.flag_xlabel and setup.fmt_xlabel:
_set_plot(plt.xlabel, setup.fmt_xlabel, s)
if setup.flag_ylabel and setup.fmt_ylabel:
_set_plot(plt.ylabel, setup.fmt_ylabel, s)
_set_plot(plt.title, setup.fmt_title, s)
plt.xlim([xmin-xspan*_T, xmax+xspan*_T])
plt.ylim([ymin-yspan*_T, ymax+yspan*_T])
plt.tight_layout()
plt.subplots_adjust(top=0.94) # workaround for cropped title
logger.info("Printing page {0:d}/{1:d} ('{2!s}')".format(i+1, num_pages, title))
pdf.savefig(fig)
plt.close()
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename)) | Plots spectra into a PDF file, one spectrum per page.
Splits into several pieces of width
Args:
ss: list of Spectrum objects
pdf_filename: name of output file |
def calc_max_min(ss):
Calculates (x, y) (max, min) for a list of Spectrum objects.
Returns (xmin, xmax, ymin, ymax, xspan, yspan)
"""
xmin, xmax, ymin, ymax = 1e38, -1e38, 1e38, -1e38
for s in ss:
assert isinstance(s, ft.Spectrum)
if len(s.x) > 0:
xmin, xmax = min(min(s.x), xmin), max(max(s.x), xmax)
ymin, ymax = min(min(s.y), ymin), max(max(s.y), ymax)
xspan = xmax-xmin
yspan = ymax - ymin
return xmin, xmax, ymin, ymax, xspan, yspan | Calculates (x, y) (max, min) for a list of Spectrum objects.
Returns (xmin, xmax, ymin, ymax, xspan, yspan) |
def target(self):
if self.council == 'Security Council' and self.category != 'Repeal':
# e.g. N:ever-wandering_souls
entity_type, entity_name = self.option.split(':')
entity_types = {
'R': aionationstates.Region,
'N': aionationstates.Nation
}
return entity_types[entity_type](entity_name) | :class:`Nation`, :class:`Region`, or None: Target of a
Liberation, Commendation, or Condemnation. ``None`` if the
resolution is not a Liberation, Commendation, or Condemnation. |
def repeal_target(self):
if not self.category == 'Repeal':
raise TypeError("This resolution doesn't repeal anything")
return wa.resolution(int(self.option) + 1) | The resolution this resolution has repealed, or is attempting
to repeal.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
TypeError:
If the resolution doesn't repeal anything. |
def resolution(self, index):
@api_query('resolution', id=str(index))
async def result(_, root):
elem = root.find('RESOLUTION')
if not elem:
raise NotFound(f'No resolution found with index {index}')
return Resolution(elem)
return result(self) | Resolution with a given index.
Parameters
----------
index : int
Resolution index.
Global if this is the ``aionationstates.wa`` object, local
if this is ``aionationstates.ga`` or ``aionationstates.sc``.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
:class:`NotFound`
If a resolution with the requested index doesn't exist. |
async def resolution_at_vote(self, root):
elem = root.find('RESOLUTION')
if elem:
resolution = ResolutionAtVote(elem)
resolution._council_id = self._council_id
return resolution | The proposal currently being voted on.
Returns
-------
:class:`ApiQuery` of :class:`ResolutionAtVote`
:class:`ApiQuery` of None
If no resolution is currently at vote. |
def get_binary_path(executable):
if sys.platform == 'win32':
if executable == 'start':
return executable
executable = executable + '.exe'
if executable in os.listdir('.'):
binary = os.path.join(os.getcwd(), executable)
else:
binary = next((os.path.join(path, executable)
for path in os.environ['PATH'].split(os.pathsep)
if os.path.isfile(os.path.join(path, executable))), None)
else:
binary = Popen(['which', executable], stdout=PIPE).stdout.read().strip().decode('utf-8')
return binary if binary else None | Gets the software name and returns the path of the binary. |
def indent(self, levels, first_line=None):
self._indentation_levels.append(levels)
self._indent_first_line.append(first_line) | Increase indentation by ``levels`` levels. |
def wrap(self, text, width=None, indent=None):
width = width if width is not None else self.options.wrap_length
indent = indent if indent is not None else self.indentation
initial_indent = self.initial_indentation
return textwrap.fill(text, width=width,
initial_indent=initial_indent,
subsequent_indent=indent) | Return ``text`` wrapped to ``width`` and indented with ``indent``.
By default:
* ``width`` is ``self.options.wrap_length``
* ``indent`` is ``self.indentation``. |
def Construct(self): # pylint: disable-msg=C0103
# Parse the GDML
self.gdml_parser.Read(self.filename)
self.world = self.gdml_parser.GetWorldVolume()
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world | Construct a cuboid from a GDML file without sensitive detector |
def Construct(self): # pylint: disable-msg=C0103
# Parse the GDML
self.world = self.gdml_parser.GetWorldVolume()
# Create sensitive detector
self.sensitive_detector = ScintSD()
# Get logical volume for X view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(1)
assert my_lv.GetName() == "ScintillatorBarX"
my_lv.SetSensitiveDetector(self.sensitive_detector)
# Get logical volume for Y view, then attach SD
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(2)
assert my_lv.GetName() == "ScintillatorBarY"
my_lv.SetSensitiveDetector(self.sensitive_detector)
my_lv = G4.G4LogicalVolumeStore.GetInstance().GetVolumeID(0)
assert my_lv.GetName() == "SteelPlane"
# field
self.field_manager = G4.G4FieldManager()
self.my_field = MagneticField.WandsToroidField(self.field_polarity)
self.field_manager.SetDetectorField(self.my_field)
self.field_manager.CreateChordFinder(self.my_field)
my_lv.SetFieldManager(self.field_manager, False)
self.log.info("Materials:")
self.log.info(G4.G4Material.GetMaterialTable())
# Return pointer to world volume
return self.world | Construct nuSTORM from a GDML file |
def included(powernode:str, inclusions:dict, nodes_only=False) -> iter:
if nodes_only:
condition = lambda e: e != powernode and inclusions[e] == ()
else:
condition = lambda e: e != powernode
yield from (elem for elem in utils.walk(powernode, (inclusions,))
if condition(elem)) | Yield (power)nodes below given powernode (contained by it,
or contained by a powernode contained by it, etc).
>>> sorted(included('p1', {'p1': ('p2', 1), 'p2': (3,), 1: (), 3: ()}), key=str)
[1, 3, 'p2']
>>> sorted(included('p1', {'p1': ('p2', 1), 'p2': (3,), 1: (), 3: ()}, nodes_only=True), key=str)
[1, 3] |
def mergeability_validation(tree:BubbleTree) -> iter:
def gen_warnings(one, two, inc_message:str) -> [str]:
"Yield the warning for given (power)nodes if necessary"
nodetype = ''
if tree.inclusions[one] and tree.inclusions[two]:
nodetype = 'power'
elif tree.inclusions[one] or tree.inclusions[two]:
nodetype = '(power)'
if one > two: one, two = two, one
shared = set(tree.edges.get(one, ())) & set(tree.edges.get(two, ()))
if shared:
yield (f"WARNING mergeable {nodetype}nodes: {one} and {two}"
f" are {inc_message}, and share"
f" {len(shared)} neigbor{'s' if len(shared) > 1 else ''}")
for one, two in it.combinations(tree.roots, 2):
yield from gen_warnings(one, two, inc_message='both roots')
for parent, childs in tree.inclusions.items():
for one, two in it.combinations(childs, 2):
yield from gen_warnings(one, two, inc_message=f'in the same level (under {parent})') | Yield message about mergables powernodes |
def guild_details(guild_id=None, name=None):
if guild_id and name:
warnings.warn("both guild_id and name are specified, "
"name will be ignored")
if guild_id:
params = {"guild_id": guild_id}
cache_name = "guild_details.%s.json" % guild_id
elif name:
params = {"guild_name": name}
cache_name = "guild_details.%s.json" % name
else:
raise Exception("specify either guild_id or name")
return get_cached("guild_details.json", cache_name, params=params) | This resource returns details about a guild.
:param guild_id: The guild id to query for.
:param name: The guild name to query for.
*Note: Only one parameter is required; if both are set, the guild Id takes
precedence and a warning will be logged.*
The response is a dictionary with the following keys:
guild_id (string):
The guild id.
guild_name (string):
The guild name.
tag (string):
The guild tag.
emblem (object):
If present, it holds detailed information about the guilds emblem.
The emblem dictionary contains the following information:
background_id (number):
The id of the background image.
foreground_id (number):
The id of the foreground image.
flags (list):
A list of additional flags, possible values are:
``FlipBackgroundHorizontal``, ``FlipBackgroundVertical``,
``FlipForegroundHorizontal`` and ``FlipForegroundVertical``.
background_color_id (number):
The background color id.
foreground_primary_color_id (number):
The primary foreground color id.
foreground_secondary_color_id (number):
The secondary foreground color id. |
def chunks(stream, chunk_size, output_type=tuple):
''' returns chunks of a stream '''
assert iterable(stream), 'chunks needs stream to be iterable'
assert (isinstance(chunk_size, int) and chunk_size > 0) or callable(chunk_size), 'chunks needs chunk_size to be a positive int or callable'
assert callable(output_type), 'chunks needs output_type to be callable'
if callable(chunk_size):
''' chunk_size is acting as a separator function '''
for chunk in chunk_on(stream, chunk_size, output_type):
yield chunk
else:
it = iter(stream)
marker = object()
iters = [it] * chunk_size
pipeline = apply_to_last(
zip_longest(*iters, fillvalue=marker),
lambda last_chunk: tuple(i for i in last_chunk if i is not marker)
)
if output_type is not tuple:
pipeline = map(output_type, pipeline)
for chunk in pipeline:
yield chunf chunks(stream, chunk_size, output_type=tuple):
''' returns chunks of a stream '''
assert iterable(stream), 'chunks needs stream to be iterable'
assert (isinstance(chunk_size, int) and chunk_size > 0) or callable(chunk_size), 'chunks needs chunk_size to be a positive int or callable'
assert callable(output_type), 'chunks needs output_type to be callable'
if callable(chunk_size):
''' chunk_size is acting as a separator function '''
for chunk in chunk_on(stream, chunk_size, output_type):
yield chunk
else:
it = iter(stream)
marker = object()
iters = [it] * chunk_size
pipeline = apply_to_last(
zip_longest(*iters, fillvalue=marker),
lambda last_chunk: tuple(i for i in last_chunk if i is not marker)
)
if output_type is not tuple:
pipeline = map(output_type, pipeline)
for chunk in pipeline:
yield chunk | returns chunks of a stream |
def camelcase_to_slash(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1/\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1/\2', s1).lower() | Converts CamelCase to camel/case
code ripped from http://stackoverflow.com/questions/1175208/does-the-python-standard-library-have-function-to-convert-camelcase-to-camel-cas |
def get_charset(request):
content_type = request.META.get('CONTENT_TYPE', None)
if content_type:
return extract_charset(content_type) if content_type else None
else:
return None | Extract charset from the content type |
def parse_accept_header(accept):
def parse_media_range(accept_item):
""" Parse media range and subtype """
return accept_item.split('/', 1)
def comparator(a, b):
""" Compare accept items a and b """
# first compare q values
result = -cmp(a[2], b[2])
if result is not 0:
# q values differ, no need to compare media types
return result
# parse media types and compare them (asterisks are lower in precedence)
mtype_a, subtype_a = parse_media_range(a[0])
mtype_b, subtype_b = parse_media_range(b[0])
if mtype_a == '*' and subtype_a == '*':
return 1
if mtype_b == '*' and subtype_b == '*':
return -1
if subtype_a == '*':
return 1
if subtype_b == '*':
return -1
return 0
if not accept:
return []
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(comparator)
return result | Parse the Accept header
todo: memoize
:returns: list with pairs of (media_type, q_value), ordered by q
values. |
def _resolve_path(obj, path=None):
if path:
for attr_name in path.split('__'):
obj = getattr(obj, attr_name)
return obj | Resolve django-like path eg. object2__object3 for object
Args:
obj: The object the view is displaying.
path (str, optional): Description
Returns:
A oject at end of resolved path |
def in_session(self):
session = self.get_session()
try:
yield session
session.commit()
except IntegrityError:
session.rollback()
raise DuplicateError("Duplicate unique value detected!")
except (OperationalError, DisconnectionError):
session.rollback()
self.close()
logger.warn("Database Connection Lost!")
raise DatabaseConnectionError()
except Exception:
session.rollback()
raise
finally:
session.close() | Provide a session scope around a series of operations. |
def info(self, req) -> ResponseInfo:
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r | Since this will always respond with height=0, Tendermint
will resync this app from the begining |
def check_tx(self, tx) -> ResponseCheckTx:
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk) | Validate the Tx before entry into the mempool
Checks the txs are submitted in order 1,2,3...
If not an order, a non-zero code is returned and the tx
will be dropped. |
def query(self, req) -> ResponseQuery:
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height) | Return the last tx count |
def commit(self) -> ResponseCommit:
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) | Return the current encode state value to tendermint |
def track(context, file_names):
context.obj.find_repo_type()
for fn in file_names:
context.obj.call([context.obj.vc_name, 'add', fn]) | Keep track of each file in list file_names.
Tracking does not create or delete the actual file, it only tells the
version control system whether to maintain versions (to keep track) of
the file. |
def untrack(context, file_names):
context.obj.find_repo_type()
for fn in file_names:
if context.obj.vc_name == 'git':
context.obj.call(['git', 'rm', '--cached', fn])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'forget', fn]) | Forget about tracking each file in the list file_names
Tracking does not create or delete the actual file, it only tells the
version control system whether to maintain versions (to keep track) of
the file. |
def commit(context, message, name):
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
context.obj.call(['git', 'commit', '-a', '-m', message])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'commit', '-m', message])
if name != '' and context.obj.vc_name == 'git':
context.obj.call(['git', 'tag', '-a', name, '-m', message])
elif name != '' and context.obj.vc_name == 'hg':
context.obj.call(['hg', 'tag', '-m', message, name]) | Commit saved changes to the repository.
message - commit message
name - tag name |
def revert(context, file_names):
context.obj.find_repo_type()
if len(file_names) == 0:
click.echo('No file names to checkout specified.')
click.echo('The following have changed since the last check in.')
context.invoke(status)
for fn in file_names:
if context.obj.vc_name == 'git':
context.obj.call(['git', 'checkout', '--', fn])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'revert', '--no-backup', fn]) | Revert each file in the list file_names back to version in repo |
def up(context):
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
context.obj.call(['git', 'push'])
context.obj.call(['git', 'push', '--tags'])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'push']) | (upload) Synchronise local repo to remote repo |
def down(context, repo_url):
if repo_url == '':
context.obj.find_repo_type()
if context.obj.vc_name == 'git':
context.obj.call(['git', 'pull'])
elif context.obj.vc_name == 'hg':
context.obj.call(['hg', 'pull', '-u'])
else:
context.obj.call([context.obj.vc_name, 'clone', repo_url]) | (download) Synchronise remote repo to local repo.
If repo_url is given, then clone from remote URL. |
def status(context):
context.obj.find_repo_type()
context.obj.call([context.obj.vc_name, 'status']) | See which files have changed, checked in, and uploaded |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.