code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def rotateDirection(self, displayDirection):
if self._needSwapWH(self._display_direction, displayDirection):
self._display_size = ( self._display_size[1], self._display_size[0] )
if self.redefineBuffer( { "size":self._display_size, "color_mode":self._buffer_color_mode } ):
self.View.resize(self._display_size[0], self._display_size[1])
self._display_direction = displayDirection | !
\~english rotate screen direction
@param displayDirection: Screen Direction. value can be chosen: 0, 90, 180, 270
\~chinese 旋转显示屏方向
@param displayDirection: 显示屏方向。可选值: 0, 90, 180, 270
\~
@note
\~english after rotate the View resize to screen size
\~chinese 改变方向后,默认的 View 大小会更新为当前 Screen 的大小
\~\n |
def create_tax_class(cls, tax_class, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_tax_class_with_http_info(tax_class, **kwargs)
else:
(data) = cls._create_tax_class_with_http_info(tax_class, **kwargs)
return data | Create TaxClass
Create a new TaxClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_tax_class(tax_class, async=True)
>>> result = thread.get()
:param async bool
:param TaxClass tax_class: Attributes of taxClass to create (required)
:return: TaxClass
If the method is called asynchronously,
returns the request thread. |
def delete_tax_class_by_id(cls, tax_class_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
else:
(data) = cls._delete_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
return data | Delete TaxClass
Delete an instance of TaxClass by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_tax_class_by_id(tax_class_id, async=True)
>>> result = thread.get()
:param async bool
:param str tax_class_id: ID of taxClass to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_tax_class_by_id(cls, tax_class_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
else:
(data) = cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs)
return data | Find TaxClass
Return single instance of TaxClass by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tax_class_by_id(tax_class_id, async=True)
>>> result = thread.get()
:param async bool
:param str tax_class_id: ID of taxClass to return (required)
:return: TaxClass
If the method is called asynchronously,
returns the request thread. |
def list_all_tax_classes(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_tax_classes_with_http_info(**kwargs)
else:
(data) = cls._list_all_tax_classes_with_http_info(**kwargs)
return data | List TaxClasses
Return a list of TaxClasses
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_tax_classes(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[TaxClass]
If the method is called asynchronously,
returns the request thread. |
def replace_tax_class_by_id(cls, tax_class_id, tax_class, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs)
else:
(data) = cls._replace_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs)
return data | Replace TaxClass
Replace all attributes of TaxClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_class_by_id(tax_class_id, tax_class, async=True)
>>> result = thread.get()
:param async bool
:param str tax_class_id: ID of taxClass to replace (required)
:param TaxClass tax_class: Attributes of taxClass to replace (required)
:return: TaxClass
If the method is called asynchronously,
returns the request thread. |
def update_tax_class_by_id(cls, tax_class_id, tax_class, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs)
else:
(data) = cls._update_tax_class_by_id_with_http_info(tax_class_id, tax_class, **kwargs)
return data | Update TaxClass
Update attributes of TaxClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_tax_class_by_id(tax_class_id, tax_class, async=True)
>>> result = thread.get()
:param async bool
:param str tax_class_id: ID of taxClass to update. (required)
:param TaxClass tax_class: Attributes of taxClass to update. (required)
:return: TaxClass
If the method is called asynchronously,
returns the request thread. |
def autodiscover_modules(packages, related_name_re='.+',
ignore_exceptions=False):
warnings.warn('autodiscover_modules has been deprecated. '
'Use Flask-Registry instead.', DeprecationWarning)
global _RACE_PROTECTION
if _RACE_PROTECTION:
return []
_RACE_PROTECTION = True
modules = []
try:
tmp = [find_related_modules(pkg, related_name_re, ignore_exceptions)
for pkg in packages]
for l in tmp:
for m in l:
if m is not None:
modules.append(m)
# Workaround for finally-statement
except:
_RACE_PROTECTION = False
raise
_RACE_PROTECTION = False
return modules | Autodiscover function follows the pattern used by Celery.
:param packages: List of package names to auto discover modules in.
:type packages: list of str
:param related_name_re: Regular expression used to match modules names.
:type related_name_re: str
:param ignore_exceptions: Ignore exception when importing modules.
:type ignore_exceptions: bool |
def find_related_modules(package, related_name_re='.+',
ignore_exceptions=False):
warnings.warn('find_related_modules has been deprecated.',
DeprecationWarning)
package_elements = package.rsplit(".", 1)
try:
if len(package_elements) == 2:
pkg = __import__(package_elements[0], globals(), locals(), [
package_elements[1]])
pkg = getattr(pkg, package_elements[1])
else:
pkg = __import__(package_elements[0], globals(), locals(), [])
pkg_path = pkg.__path__
except AttributeError:
return []
# Find all modules named according to related_name
p = re.compile(related_name_re)
modules = []
for name in find_modules(package, include_packages=True):
if p.match(name.split('.')[-1]):
try:
modules.append(import_string(name, silent=ignore_exceptions))
except Exception as e:
if not ignore_exceptions:
raise e
return modules | Find matching modules using a package and a module name pattern. |
def import_related_module(package, pkg_path, related_name,
ignore_exceptions=False):
try:
imp.find_module(related_name, pkg_path)
except ImportError:
return
try:
return getattr(
__import__('%s' % (package), globals(), locals(), [related_name]),
related_name
)
except Exception as e:
if ignore_exceptions:
current_app.logger.exception(
'Can not import "{}" package'.format(package)
)
else:
raise e | Import module from given path. |
def ansi(string, *args):
ansi = ''
for arg in args:
arg = str(arg)
if not re.match(ANSI_PATTERN, arg):
raise ValueError('Additional arguments must be ansi strings')
ansi += arg
return ansi + string + colorama.Style.RESET_ALL | Convenience function to chain multiple ColorWrappers to a string |
def puts(*args, **kwargs):
# parse kwargs
trim = kwargs.pop('trim', False)
padding = kwargs.pop('padding', None)
stream = kwargs.pop('stream', sys.stdout)
# HACK: check if stream is IndentedFile
indent = getattr(stream, 'indent', 0)
# stringify args
args = [str(i) for i in args]
# helpers
def trimstr(ansi, width):
string = ''; size = 0; i = 0
while i < len(ansi):
mobj = re.match(ANSI_PATTERN, ansi[i:])
if mobj:
# append ansi code
string = string + mobj.group(0)
i += len(mobj.group(0))
else:
# loop for more ansi codes even at max width
size += 1
if size > width: break
# append normal char
string = string + ansi[i]
i += 1
return (string, size)
# process strings
if not stream.isatty():
# remove ansi codes and print
for string in args:
stream.write(re.sub(ANSI_PATTERN, '', string) + '\n')
else:
# get terminal width
try: curses.setupterm()
except:
trim = False
padding = None
else:
width = curses.tigetnum('cols') - indent
for string in args:
if trim or padding:
trimmed, size = trimstr(string, width)
# trim string
if trim:
if len(trimmed) < len(string):
trimmed = trimstr(string, width - 3)[0] + colorama.Style.RESET_ALL + '...'
string = trimmed
# add padding
if padding:
string += padding * (width - size)
# print final string
stream.write(string + '\n') | Full feature printing function featuring
trimming and padding for both files and ttys |
def _restore_tree_for(root, translate):
# type: (Any, Dict[Type[Nonterminal], Type[Rule]]) -> Union[Nonterminal, Terminal]
# the symbol is epsilon directly, just return Terminal.
if root is EPSILON:
return Terminal(EPSILON)
# create nonterminal
created_nonterm = root() # type: Nonterminal
created_rule = translate[root]() # type: Rule
created_nonterm._set_to_rule(created_rule)
created_rule._from_symbols.append(created_nonterm)
# all symbols from the right are rewritable to epsilon, so we need to restore them as well
for ch in created_rule.right:
p = _restore_tree_for(ch, translate) # type: Nonterminal
p._set_from_rule(created_rule)
created_rule._to_symbols.append(p)
return created_nonterm | Create part of AST that generate epsilon.
:param root: Symbol in the original rule that results in epsilon.
Can be Nonterminal or epsilon itself.
:param translate: Dictionary where key is nonterminal and value is rule which is next to generate epsilon.
:return: Nonterminal instance with part of AST generating epsilon. |
def epsilon_rules_restore(root):
# type: (Nonterminal) -> Nonterminal
items = Traversing.post_order(root)
items = filter(lambda x: isinstance(x, EpsilonRemovedRule), items)
for rule in items:
# create original rule
created_rule = rule.from_rule() # type: Rule
# attach parrents parents
for s in rule.from_symbols: # type: Nonterminal
s._set_to_rule(created_rule)
created_rule._from_symbols.append(s)
# attach children up to replace index (that will contain epsilon)
for i in range(rule.replace_index):
ch = rule.to_symbols[i] # type: Nonterminal
ch._set_from_rule(created_rule)
created_rule._to_symbols.append(ch)
# add symbols originally rewrote to epsilon
symb = _restore_tree_for(created_rule.right[rule.replace_index], rule.backtrack) # type: Nonterminal
created_rule._to_symbols.append(symb)
symb._set_from_rule(created_rule)
# attach rest of children
for i in range(rule.replace_index, len(rule.to_symbols)):
ch = rule.to_symbols[i] # type: Nonterminal
ch._set_from_rule(created_rule)
created_rule._to_symbols.append(ch)
return root | Transform parsed tree to contain epsilon rules originally removed from the grammar.
:param root: Root of the parsed tree.
:return: Modified tree including epsilon rules. |
def char_between(lower, upper, func_name):
'''return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
function = register_function(func_name,
lambda char: lower<=char<=upper)
return char_on_predicate(functionf char_between(lower, upper, func_name):
'''return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
function = register_function(func_name,
lambda char: lower<=char<=upper)
return char_on_predicate(function) | return current char and step if char is between lower and upper, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function |
def char_in(string, func_name):
'''return current char and step if char is in string, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
function = register_function(func_name,
lambda char: char in string)
return char_on_predicate(functionf char_in(string, func_name):
'''return current char and step if char is in string, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function'''
function = register_function(func_name,
lambda char: char in string)
return char_on_predicate(function) | return current char and step if char is in string, where
@test: a python function with one argument, which tests on one char and return True or False
@test must be registered with register_function |
def update_version(self, version, step=1):
"Compute an new version and write it as a tag"
# update the version based on the flags passed.
if self.config.patch:
version.patch += step
if self.config.minor:
version.minor += step
if self.config.major:
version.major += step
if self.config.build:
version.build_number += step
if self.config.build_number:
version.build_number = self.config.build_number
# create a new tag in the repo with the new version.
if self.config.dry_run:
log.info('Not updating repo to version {0}, because of --dry-run'.format(version))
else:
version = self.call_plugin_function('set_version', version)
return versiof update_version(self, version, step=1):
"Compute an new version and write it as a tag"
# update the version based on the flags passed.
if self.config.patch:
version.patch += step
if self.config.minor:
version.minor += step
if self.config.major:
version.major += step
if self.config.build:
version.build_number += step
if self.config.build_number:
version.build_number = self.config.build_number
# create a new tag in the repo with the new version.
if self.config.dry_run:
log.info('Not updating repo to version {0}, because of --dry-run'.format(version))
else:
version = self.call_plugin_function('set_version', version)
return version | Compute an new version and write it as a tag |
def row(self):
ret = None
i = self.tableWidget.currentRow()
if i >= 0:
ret = self._data[i]
return ret | Returns current data row: MyDBRow object, or None |
def _find_id(self, id_):
for i, row in enumerate(self._data):
if row["id"] == id_:
t = self.tableWidget
# idx = t.itemFromIndex()
t.setCurrentCell(i, 0)
break | Moves to row where formula is (if found, otherwise does nothing) |
def _wanna_emit_id_changed(self):
if self._last_id != self._get_id():
self._last_id = self._get_id()
self.id_changed.emit() | Filters intentions to emit the id_changed signal (only does if id really changed) |
def _get_id(self):
ret = None
row = self.row
if row:
ret = row["id"]
return ret | Getter because using the id property from within was not working |
def remove_unreachable_symbols(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
# copy if required
if inplace is False:
grammar = copy(grammar)
# check if start symbol is set
if grammar.start is None:
raise StartSymbolNotSetException()
# create process sets
reachable = {grammar.start}
rules = grammar.rules.copy()
# begin iterations
while True:
# create sets for current iteration
active = reachable.copy()
# loop the working rules
for rule in rules.copy():
# lf left part of rule already in reachable symbols
if rule.fromSymbol in reachable:
# set symbols on the right as reachable
for symbol in rule.right:
active.add(symbol)
# remove rule from the next iteration
rules.remove(rule)
# end of rules loop
# if current and previous iterations are same, we are done
if active == reachable:
break
# otherwise swap the sets
reachable = active
# remove the symbols
nonterminals_to_remove = grammar.nonterminals.difference(reachable)
terminals_to_remove = grammar.terminals.difference(reachable)
grammar.nonterminals.remove(*nonterminals_to_remove)
grammar.terminals.remove(*terminals_to_remove)
# return grammar
return grammar | Remove unreachable symbols from the gramar
:param grammar: Grammar where to symbols remove
:param inplace: True if transformation should be performed in place. False by default.
:return: Grammar without unreachable symbols. |
def fermion_avg(efermi, norm_hopping, func):
if func == 'ekin':
func = bethe_ekin_zeroT
elif func == 'ocupation':
func = bethe_filling_zeroT
return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)]) | calcules for every slave it's average over the desired observable |
def spinflipandhop(slaves):
Sdw = [csr_matrix(spin_gen(slaves, i, 0)) for i in range(slaves)]
Sup = [mat.T for mat in Sdw]
sfh = np.zeros_like(Sup[0])
orbitals = slaves//2
for n in range(orbitals):
for m in range(n+1, orbitals):
sfh += Sup[2*n ] * Sdw[2*n + 1] * Sup[2*m + 1] * Sdw[2*m ]
sfh += Sup[2*n+1] * Sdw[2*n ] * Sup[2*m ] * Sdw[2*m+1]
sfh += Sup[2*n] * Sup[2*n + 1] * Sdw[2*m] * Sdw[2*m+1]
sfh += Sup[2*m] * Sup[2*m + 1] * Sdw[2*n] * Sdw[2*n+1]
return sfh | Calculates the interaction term of a spin flip and pair hopping |
def spin_z_op(param, oper):
slaves = param['slaves']
oper['Sz'] = np.array([spin_z(slaves, spin) for spin in range(slaves)])
oper['Sz+1/2'] = oper['Sz'] + 0.5*np.eye(2**slaves)
oper['sumSz2'] = oper['Sz'].sum(axis=0)**2 # because Sz is diagonal
Sz_mat_shape = oper['Sz'].reshape(param['orbitals'], 2, 2**slaves, 2**slaves)
oper['sumSz-sp2'] = (Sz_mat_shape.sum(axis=1)**2).sum(axis=0)
oper['sumSz-or2'] = (Sz_mat_shape.sum(axis=0)**2).sum(axis=0) | Generates the required Sz operators, given the system parameter setup
and the operator dictionary |
def spin_gen_op(oper, gauge):
slaves = len(gauge)
oper['O'] = np.array([spin_gen(slaves, i, c) for i, c in enumerate(gauge)])
oper['O_d'] = np.transpose(oper['O'], (0, 2, 1))
oper['O_dO'] = np.einsum('...ij,...jk->...ik', oper['O_d'], oper['O'])
oper['Sfliphop'] = spinflipandhop(slaves) | Generates the generic spin matrices for the system |
def set_filling(self, populations):
populations = np.asarray(populations)
#
# self.param['orbital_e'] -= bethe_findfill_zeroT( \
# self.param['avg_particles'],
# self.param['orbital_e'],
# self.param['hopping'])
efermi = - bethe_find_crystalfield(
populations, self.param['hopping'])
self.param['populations'] = populations
# fermion_avg(efermi, self.param['hopping'], 'ocupation')
self.param['ekin'] = fermion_avg(efermi, self.param['hopping'], 'ekin')
spin_gen_op(self.oper, estimate_gauge(populations)) | Sets the orbital enenergies for on the reference of the free case.
By setting the desired local populations on every orbital.
Then generate the necesary operators to respect such configuraion |
def reset(self, populations, lag, mu, u_int, j_coup, mean_f):
self.set_filling(populations)
self.param['lambda'] = lag
self.param['orbital_e'] = mu
self.selfconsistency(u_int, j_coup, mean_f) | Resets the system into the last known state as given by the input
values |
def update_H(self, mean_field, l):
self.H_s = self.spin_hamiltonian(mean_field, l)
try:
self.eig_energies, self.eig_states = diagonalize(self.H_s)
except np.linalg.linalg.LinAlgError:
np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l)
raise
except ValueError:
np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l)
print(mean_field, l)
raise | Updates the spin hamiltonian and recalculates its eigenbasis |
def spin_hamiltonian(self, h, l):
h_spin = np.einsum('i,ijk', h[1], self.oper['O'])
h_spin += np.einsum('i,ijk', h[0], self.oper['O_d'])
h_spin += np.einsum('i,ijk', l, self.oper['Sz+1/2'])
h_spin += self.oper['Hint']
return h_spin | Constructs the single site spin Hamiltonian |
def inter_spin_hamiltonian(self, u_int, J_coup):
J_coup *= u_int
h_int = (u_int - 2*J_coup)/2.*self.oper['sumSz2']
h_int += J_coup*self.oper['sumSz-sp2']
h_int -= J_coup/2.*self.oper['sumSz-or2']
h_int -= J_coup*self.oper['Sfliphop']
return h_int | Calculates the interaction Hamiltonian. The Hund coupling is a
fraction of the coulom interaction |
def expected(self, observable, beta=1e5):
return expected_value(observable,
self.eig_energies,
self.eig_states,
beta) | Wrapper to the expected_value function to fix the eigenbasis |
def quasiparticle_weight(self):
return np.array([self.expected(op)**2 for op in self.oper['O']]) | Calculates quasiparticle weight |
def mean_field(self):
mean_field = []
for sp_oper in [self.oper['O'], self.oper['O_d']]:
avgO = np.array([self.expected(op) for op in sp_oper])
avgO[abs(avgO) < 1e-10] = 0.
mean_field.append(avgO*self.param['ekin'])
return np.array(mean_field) | Calculates mean field |
def selfconsistency(self, u_int, J_coup, mean_field_prev=None):
if mean_field_prev is None:
mean_field_prev = np.array([self.param['ekin']]*2)
hlog = [mean_field_prev]
self.oper['Hint'] = self.inter_spin_hamiltonian(u_int, J_coup)
converging = True
half_fill = (self.param['populations'] == 0.5).all()
while converging:
if half_fill:
self.update_H(hlog[-1], self.param['lambda'])
else:
res = root(self.restriction, self.param['lambda'], (hlog[-1]))#, method='lm')
if not res.success:
res.x = res.x * 0.5 + 0.5*self.param['lambda']
self.update_H(self.mean_field()*0.5 + 0.5*hlog[-1], res.x)
print('fail', self.param['populations'][3:5])
if (self.quasiparticle_weight() < 0.001).all():
return hlog
self.param['lambda'] = res.x
hlog.append(self.mean_field())
converging = (abs(hlog[-1] - hlog[-2]) > self.param['tol']).all() \
or (abs(self.restriction(self.param['lambda'], hlog[-1])) > self.param['tol']).all()
return hlog | Iterates over the hamiltonian to get the stable selfcosistent one |
def restriction(self, lam, mean_field):
self.update_H(mean_field, lam)
restric = np.array([self.expected(op) - n for op, n in zip(self.oper['Sz+1/2'], self.param['populations'])])
return restric | Lagrange multiplier in lattice slave spin |
def others2db(file_path, file_type, is_copy, step_id, db_conn):
logging.info("Processing '%s'" % file_path)
df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none()
if not df:
df = db_conn.DataFile(
path=file_path,
type=file_type,
is_copy=is_copy,
processing_step_id=step_id
)
db_conn.db_session.merge(df)
db_conn.db_session.commit()
else:
if file_type not in [None, '', df.type]:
df.type = file_type
db_conn.db_session.commit()
if is_copy not in [None, df.is_copy]:
df.is_copy = is_copy
db_conn.db_session.commit()
if step_id not in [None, df.processing_step_id]:
df.processing_step_id = step_id
db_conn.db_session.commit() | Extract some meta-data from files (actually mostly from their paths) and stores it in a DB.
Arguments:
:param file_path: File path.
:param file_type: File type.
:param is_copy: Indicate if this file is a copy.
:param step_id: Step ID.
:param db_conn: Database connection.
:return: |
def create_currency(cls, currency, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_currency_with_http_info(currency, **kwargs)
else:
(data) = cls._create_currency_with_http_info(currency, **kwargs)
return data | Create Currency
Create a new Currency
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_currency(currency, async=True)
>>> result = thread.get()
:param async bool
:param Currency currency: Attributes of currency to create (required)
:return: Currency
If the method is called asynchronously,
returns the request thread. |
def delete_currency_by_id(cls, currency_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_currency_by_id_with_http_info(currency_id, **kwargs)
else:
(data) = cls._delete_currency_by_id_with_http_info(currency_id, **kwargs)
return data | Delete Currency
Delete an instance of Currency by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_currency_by_id(currency_id, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_currency_by_id(cls, currency_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_currency_by_id_with_http_info(currency_id, **kwargs)
else:
(data) = cls._get_currency_by_id_with_http_info(currency_id, **kwargs)
return data | Find Currency
Return single instance of Currency by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_currency_by_id(currency_id, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to return (required)
:return: Currency
If the method is called asynchronously,
returns the request thread. |
def list_all_currencies(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_currencies_with_http_info(**kwargs)
else:
(data) = cls._list_all_currencies_with_http_info(**kwargs)
return data | List Currencies
Return a list of Currencies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_currencies(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Currency]
If the method is called asynchronously,
returns the request thread. |
def replace_currency_by_id(cls, currency_id, currency, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_currency_by_id_with_http_info(currency_id, currency, **kwargs)
else:
(data) = cls._replace_currency_by_id_with_http_info(currency_id, currency, **kwargs)
return data | Replace Currency
Replace all attributes of Currency
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_currency_by_id(currency_id, currency, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to replace (required)
:param Currency currency: Attributes of currency to replace (required)
:return: Currency
If the method is called asynchronously,
returns the request thread. |
def update_currency_by_id(cls, currency_id, currency, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
else:
(data) = cls._update_currency_by_id_with_http_info(currency_id, currency, **kwargs)
return data | Update Currency
Update attributes of Currency
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_currency_by_id(currency_id, currency, async=True)
>>> result = thread.get()
:param async bool
:param str currency_id: ID of currency to update. (required)
:param Currency currency: Attributes of currency to update. (required)
:return: Currency
If the method is called asynchronously,
returns the request thread. |
def run_cmd(cmd, out=os.path.devnull, err=os.path.devnull):
logger.debug(' '.join(cmd))
with open(out, 'w') as hout:
proc = subprocess.Popen(cmd, stdout=hout, stderr=subprocess.PIPE)
err_msg = proc.communicate()[1].decode()
with open(err, 'w') as herr:
herr.write(str(err_msg))
msg = '({}) {}'.format(' '.join(cmd), err_msg)
if proc.returncode != 0:
logger.error(msg)
raise RuntimeError(msg) | Runs an external command
:param list cmd: Command to run.
:param str out: Output file
:param str err: Error file
:raises: RuntimeError |
def run_cmd_if_file_missing(cmd, fname, out=os.path.devnull, err=os.path.devnull):
if fname is None or not os.path.exists(fname):
run_cmd(cmd, out, err)
return True
else:
return False | Runs an external command if file is absent.
:param list cmd: Command to run.
:param str fname: Path to the file, which existence is being checked.
:param str out: Output file
:param str err: Error file
:return: True if cmd was executed, False otherwise
:rtype: boolean |
def merge_files(sources, destination):
with open(destination, 'w') as hout:
for f in sources:
if os.path.exists(f):
with open(f) as hin:
shutil.copyfileobj(hin, hout)
else:
logger.warning('File is missing: {}'.format(f)) | Copy content of multiple files into a single file.
:param list(str) sources: source file names (paths)
:param str destination: destination file name (path)
:return: |
def add_path(self, path):
if os.path.exists(path):
self.paths.add(path)
return path
else:
#logger.debug('Path {} doesn\'t exist'.format(path))
return None | Adds a new path to the list of searchable paths
:param path: new path |
def get(self, name):
for d in self.paths:
if os.path.exists(d) and name in os.listdir(d):
return os.path.join(d, name)
logger.debug('File not found {}'.format(name))
return None | Looks for a name in the path.
:param name: file name
:return: path to the file |
def overwrite_fits(hdulist, filename):
assert isinstance(hdulist, (fits.HDUList, fits.PrimaryHDU))
temp_name = None
flag_delete_temp = False
if os.path.isfile(filename):
# PyFITS does not overwrite file
temp_name = a99.rename_to_temp(filename)
try:
hdulist.writeto(filename, output_verify='warn')
flag_delete_temp = temp_name is not None
except:
# Writing failed, reverts renaming
os.rename(temp_name, filename)
raise
if flag_delete_temp:
os.unlink(temp_name) | Saves a FITS file. Combined file rename, save new, delete renamed for FITS files
Why: HDUlist.writeto() does not overwrite existing files
Why(2): It is also a standardized way to save FITS files |
def load_conf(yml_file, conf={}):
with open(yml_file) as f:
data = yaml.load(f)
if conf:
data.update(conf)
return dictdot(data) | To load the config
:param yml_file: the config file path
:param conf: dict, to override global config
:return: dict |
def table_exists(
dbConn,
log,
dbTableName):
log.debug('starting the ``table_exists`` function')
sqlQuery = u"""
SELECT count(*)
FROM information_schema.tables
WHERE table_name = '%(dbTableName)s'
""" % locals()
tableExists = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
)
if tableExists[0]["count(*)"] == 0:
tableExists = False
else:
tableExists = True
log.debug('completed the ``table_exists`` function')
return tableExists | *Probe a database to determine if a given table exists*
**Key Arguments:**
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
- ``dbTableName`` -- the database tablename
**Return:**
- ``tableExists`` -- True or False
**Usage:**
To test if a table exists in a database:
.. code-block:: python
from fundamentals.mysql import table_exists
exists = table_exists(
dbConn=dbConn,
log=log,
dbTableName="stupid_named_table"
)
print exists
# OUTPUT: False |
def __handle_request(self, request, *args, **kw):
self._authenticate(request)
self._check_permission(request)
method = self._get_method(request)
data = self._get_input_data(request)
data = self._clean_input_data(data, request)
response = self._exec_method(method, request, data, *args, **kw)
return self._process_response(response, request) | Intercept the request and response.
This function lets `HttpStatusCodeError`s fall through. They
are caught and transformed into HTTP responses by the caller.
:return: ``HttpResponse`` |
def _exec_method(self, method, request, data, *args, **kw):
if self._is_data_method(request):
return method(data, request, *args, **kw)
else:
return method(request, *args, **kw) | Execute appropriate request handler. |
def _process_response(self, response, request):
def coerce_response():
""" Coerce the response object into devil structure. """
if not isinstance(response, Response):
return Response(0, response)
return response
if isinstance(response, HttpResponse):
# we don't do anything if resource returns django's http response
return response
devil_res = coerce_response()
if devil_res.content and devil_res.get_code_num() in (0, 200, 201):
# serialize, format and validate
serialized_res = devil_res.content = self._serialize_object(devil_res.content, request)
formatted_res = self._format_response(request, devil_res)
self._validate_output_data(response, serialized_res, formatted_res, request)
else:
# no data -> format only
formatted_res = self._format_response(request, devil_res)
return formatted_res | Process the response.
If the response is ``HttpResponse``, does nothing. Otherwise,
serializes, formats and validates the response.
:param response: resource's response. This can be
- ``None``,
- django's ``HttpResponse``
- devil's ``Response``
- dictionary (or list of dictionaries)
- object (or list of objects) that are first serialized into dict
using ``self.factory``.
- plaintext
:returns: Django's ``HttpResponse`` |
def _format_response(self, request, response):
res = datamapper.format(request, response, self)
# data is now formatted, let's check if the status_code is set
if res.status_code is 0:
res.status_code = 200
# apply headers
self._add_resposne_headers(res, response)
return res | Format response using appropriate datamapper.
Take the devil response and turn it into django response, ready to
be returned to the client. |
def _add_resposne_headers(self, django_response, devil_response):
try:
headers = devil_response.headers
except AttributeError:
# ok, there was no devil_response
pass
else:
for k, v in headers.items():
django_response[k] = v
return django_response | Add response headers.
Add HTTP headers from devil's response to django's response. |
def _get_input_data(self, request):
# only PUT and POST should provide data
if not self._is_data_method(request):
return None
content = [row for row in request.read()]
content = ''.join(content) if content else None
return self._parse_input_data(content, request) if content else None | If there is data, parse it, otherwise return None. |
def _clean_input_data(self, data, request):
# sanity check
if not self._is_data_method(request):
# this is not PUT or POST -> return
return data
# do cleaning
try:
if self.representation:
# representation defined -> perform validation
self._validate_input_data(data, request)
if self.factory:
# factory defined -> create object
return self._create_object(data, request)
else:
# no factory nor representation -> return the same data back
return data
except ValidationError, exc:
return self._input_validation_failed(exc, data, request) | Clean input data. |
def _get_input_validator(self, request):
method = request.method.upper()
if method != 'POST':
return self.representation
elif self.post_representation:
return self.post_representation
else:
return self.representation | Return appropriate input validator.
For POST requests, ``self.post_representation`` is returned
if it is present, ``self.representation`` otherwise. |
def _validate_input_data(self, data, request):
validator = self._get_input_validator(request)
if isinstance(data, (list, tuple)):
return map(validator.validate, data)
else:
return validator.validate(data) | Validate input data.
:param request: the HTTP request
:param data: the parsed data
:return: if validation is performed and succeeds the data is converted
into whatever format the validation uses (by default Django's
Forms) If not, the data is returned unchanged.
:raises: HttpStatusCodeError if data is not valid |
def _validate_output_data(
self, original_res, serialized_res, formatted_res, request):
validator = self.representation
# when not to validate...
if not validator:
return
try:
if isinstance(serialized_res, (list, tuple)):
map(validator.validate, serialized_res)
else:
validator.validate(serialized_res)
except ValidationError, exc:
self._output_validation_failed(exc, serialized_res, request) | Validate the response data.
:param response: ``HttpResponse``
:param data: payload data. This implementation assumes
dict or list of dicts.
:raises: `HttpStatusCodeError` if data is not valid |
def _create_object(self, data, request):
if request.method.upper() == 'POST' and self.post_factory:
fac_func = self.post_factory.create
else:
fac_func = self.factory.create
if isinstance(data, (list, tuple)):
return map(fac_func, data)
else:
return fac_func(data) | Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given. |
def _serialize_object(self, response_data, request):
if not self.factory:
return response_data
if isinstance(response_data, (list, tuple)):
return map(
lambda item: self.factory.serialize(item, request),
response_data)
else:
return self.factory.serialize(response_data, request) | Create a python datatype from the given python object.
This will use ``self.factory`` object's ``serialize()`` function
to convert the object into dictionary.
If no factory is defined, this will simply return the same data
that was given.
:param response_data: data returned by the resource |
def _get_unknown_error_response(self, request, exc):
logging.getLogger('devil').error(
'while doing %s on %s with [%s], devil caught: %s' % (
request.method, request.path_info, str(request.GET), str(exc)), exc_info=True)
if settings.DEBUG:
raise
else:
return HttpResponse(status=codes.INTERNAL_SERVER_ERROR[1]) | Generate HttpResponse for unknown exceptions.
todo: this should be more informative.. |
def _get_error_response(self, exc):
if exc.has_code(codes.UNAUTHORIZED):
return self._get_auth_challenge(exc)
else:
if exc.has_code(codes.INTERNAL_SERVER_ERROR):
logging.getLogger('devil').error('devil caught http error: ' + str(exc), exc_info=True)
else:
logging.getLogger('devil').error('devil caught http error: ' + str(exc))
content = exc.content or ''
return HttpResponse(content=content, status=exc.get_code_num()) | Generate HttpResponse based on the HttpStatusCodeError. |
def _get_auth_challenge(self, exc):
response = HttpResponse(content=exc.content, status=exc.get_code_num())
response['WWW-Authenticate'] = 'Basic realm="%s"' % REALM
return response | Returns HttpResponse for the client. |
def _get_method(self, request):
methodname = request.method.lower()
method = getattr(self, methodname, None)
if not method or not callable(method):
raise errors.MethodNotAllowed()
return method | Figure out the requested method and return the callable. |
def _authenticate(self, request):
def ensure_user_obj():
""" Make sure that request object has user property.
If `request.user` is not present or is `None`, it is
created and initialized with `AnonymousUser`.
"""
try:
if request.user:
return
except AttributeError:
pass
request.user = AnonymousUser()
def anonymous_access(exc_obj):
""" Determine what to do with unauthenticated requests.
If the request has already been authenticated, does
nothing.
:param exc_obj: exception object to be thrown if anonymous
access is not permitted.
"""
if request.user and request.user.is_authenticated():
# request is already authenticated
pass
elif self.allow_anonymous:
request.user = AnonymousUser()
else:
raise exc_obj
# first, make sure that the request carries `user` attribute
ensure_user_obj()
if self.authentication:
# authentication handler is configured
try:
self.authentication.authenticate(request)
except errors.Unauthorized, exc:
# http request doesn't carry any authentication information
anonymous_access(exc)
else:
# no authentication configured
anonymous_access(errors.Forbidden()) | Perform authentication. |
def _check_permission(self, request):
if self.access_controller:
self.access_controller.check_perm(request, self) | Check user permissions.
:raises: Forbidden, if user doesn't have access to the resource. |
def print_devices_change_callback(devices, key, new):
dev = devices[key]
print('- ', new, ' ', dev)
if dev['type'] == QSType.unknown:
print(" ERR decoding")
elif dev['value'] == -1:
dev(" ERR decoding: -1?")
qcord = pyqwikswitch.decode_qwikcord(dev['data'][pyqwikswitch.QS_VALUE])
if qcord is not None:
print(' qwikcord (CTAVG, CTsum) = ' + str(qcord)) | Print the reply from &devices() and highlight errors. |
def print_item_callback(item):
print('&listen [{}, {}={}]'.format(
item.get('cmd', ''),
item.get('id', ''),
item.get('data', ''))) | Print an item callback, used by &listen. |
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--url', help='QSUSB URL [http://127.0.0.1:2020]',
default='http://127.0.0.1:2020')
parser.add_argument('--file', help='a test file from /&devices')
parser.add_argument('--test_ids', help='List of test IDs',
default='@0c2700,@0ac2f0')
args = parser.parse_args()
if args.file:
with open(args.file) as data_file:
data = json.load(data_file)
qsusb = pyqwikswitch.QSDevices(
print_devices_change_callback, print_devices_change_callback)
print_bad_data(data)
qsusb.set_qs_values(data)
return
print('Execute a basic test on server: {}\n'.format(args.url))
def qs_to_value(key, new):
print(" --> New value: {}={}".format(key, new))
qsusb = QSUsb(args.url, 1, qs_to_value)
print('Version: ' + qsusb.version())
qsusb.set_qs_values()
qsusb.listen(print_item_callback, timeout=5)
print("Started listening")
try:
# Do some test while listening
if args.test_ids and len(args.test_ids) > 0:
test_devices_set(qsusb.devices, args.test_ids.split(','))
print("\n\nListening for 60 seconds (test buttons now)\n")
sleep(60)
except KeyboardInterrupt:
pass
finally:
qsusb.stop() # Close all threads
print("Stopped listening") | Quick test for QSUsb class. |
def get(self):
with self._lock:
now = datetime.now()
active = []
for i, vef in enumerate(self.futures):
# has expired
if (vef[1] or datetime.max) <= now:
self.futures.pop(i)
continue
# in future
elif (vef[2] or datetime.min) >= now:
continue
else:
active.append(i)
if active:
# this will evict values old values
# because new ones are "more recent" via future
value, _e, _f = self.futures[active[-1]]
for i in active[:-1]:
self.futures.pop(i)
return value
raise ValueError("dicttime: no current value, however future has (%d) values" % len(self.futures)) | Called to get the asset values and if it is valid |
def add_node(node, **kwds):
nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems():
try:
visit, depart = val
except ValueError:
raise ValueError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
if key == 'html':
from docutils.writers.html4css1 import HTMLTranslator as translator
elif key == 'latex':
from docutils.writers.latex2e import LaTeXTranslator as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart) | add_node from Sphinx |
def _lookup(self, bearer, target=None, permission=None):
if target is None:
key = (bearer, permission)
lookup = self.bearer
elif permission is None:
key = (bearer, target)
lookup = self.target
else:
key = (bearer, target, permission)
lookup = self
return lookup, key | Lookup the proper registry for this permission.
Returns (<registry>, <key>) where registry is the proper lookup
and key is the generated key to use for the permission. |
def retrieve(self, *args, **kwargs):
lookup, key = self._lookup(*args, **kwargs)
return lookup[key] | Retrieve the permsission function for the provided things. |
def call_proxy_with_paging(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
max_pages=MAX_PAGES):
copy_payload = copy.deepcopy(payload)
idx = 0
for idx in range(max_pages):
resp = self.__api_proxy_call(engine, copy_payload, method, analyze_json_error_param,
retry_request_substr_variants)
yield resp
paging_resp = resp.json().get("paging")
if not paging_resp:
break
copy_payload["paging"] = paging_resp
if idx >= max_pages:
self.__app.log.warning("Достигнут максимальный предел страниц", {"max_pages": max_pages}) | Постраничный запрос
:param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param max_pages: Максимальное количество страниц в запросе
:return: объект генератор |
def call_proxy(self, engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream=False):
return self.__api_proxy_call(engine, payload, method, analyze_json_error_param, retry_request_substr_variants,
stream) | :param engine: Система
:param payload: Данные для запроса
:param method: string Может содержать native_call | tsv | json_newline
:param analyze_json_error_param: Нужно ли производить анализ параметра error в ответе прокси
:param retry_request_substr_variants: Список подстрок, при наличии которых в ответе будет происходить перезапрос
:param stream:
:return: |
def check_err(resp, analyze_json_error_param=False, retry_request_substr_variants=None):
if retry_request_substr_variants is None:
retry_request_substr_variants = []
# РКН блокировки вызывают ошибку SSL
retry_request_substr_variants.append("TLSV1_ALERT_ACCESS_DENIED")
if resp.status_code in [502, 503, 504]:
raise RetryHttpRequestError(resp.text)
if resp.status_code >= 400:
rtext = resp.text
for v_ in retry_request_substr_variants:
if v_ in rtext:
raise RetryHttpRequestError(rtext)
raise UnexpectedError("HTTP request failed: {} {}".format(resp.status_code, rtext))
if analyze_json_error_param:
data_ = resp.json()
if 'error' in data_ and data_.get('error'):
error = data_.get('error')
full_err_ = json.dumps(error)
if error.get("type") == "RateLimitError":
raise RateLimitError(error.get("message"), waiting_time=error.get("waiting_time"))
for v_ in retry_request_substr_variants:
if v_ in full_err_:
raise RetryHttpRequestError(full_err_)
raise ApiProxyError(full_err_)
return resp | :type retry_request_substr_variants: list Список вхождений строк, при налиции которых в ошибке апи будет произведен повторный запрос к апи |
def has_rabf_motif(self):
if self.rabf_motifs:
for gdomain in self.gdomain_regions:
beg, end = map(int, gdomain.split('-'))
motifs = [x for x in self.rabf_motifs if x[1] >= beg and x[2] <= end]
if motifs:
matches = int(pairwise2.align.globalxx('12345', ''.join(str(x[0]) for x in motifs))[0][2])
if matches >= self.motif_number:
return True
return False | Checks if the sequence has enough RabF motifs within the G domain
If there exists more than one G domain in the sequence enough RabF motifs is required in at least one
of those domains to classify the sequence as a Rab. |
def summarize(self):
data = [
['Sequence ID', self.seqrecord.id],
['G domain', ' '.join(self.gdomain_regions) if self.gdomain_regions else None],
['E-value vs rab db', self.evalue_bh_rabs],
['E-value vs non-rab db', self.evalue_bh_non_rabs],
['RabF motifs', ' '.join(map(str, self.rabf_motifs)) if self.rabf_motifs else None],
['Is Rab?', self.is_rab()]
]
summary = ''
for name, value in data:
summary += '{:25s}{}\n'.format(name, value)
if self.is_rab():
summary += '{:25s}{}\n'.format('Top 5 subfamilies',
', '.join('{:s} ({:.2g})'.format(name, score) for name, score
in self.rab_subfamily_top5))
return summary | G protein annotation summary in a text format
:return: A string summary of the annotation
:rtype: str |
def write(self):
rabs = [x.seqrecord for x in self.gproteins.values() if x.is_rab()]
return SeqIO.write(rabs, self.tmpfname + '.phase2', 'fasta') | Write sequences predicted to be Rabs as a fasta file.
:return: Number of written sequences
:rtype: int |
def check(self):
pathfinder = Pathfinder(True)
if pathfinder.add_path(pathfinder['superfamily']) is None:
raise RuntimeError("'superfamily' data directory is missing")
for tool in ('hmmscan', 'phmmer', 'mast', 'blastp', 'ass3.pl', 'hmmscan.pl'):
if not pathfinder.exists(tool):
raise RuntimeError('Dependency {} is missing'.format(tool)) | Check if data and third party tools, necessary to run the classification, are available
:raises: RuntimeError |
def create_brand(cls, brand, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_brand_with_http_info(brand, **kwargs)
else:
(data) = cls._create_brand_with_http_info(brand, **kwargs)
return data | Create Brand
Create a new Brand
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_brand(brand, async=True)
>>> result = thread.get()
:param async bool
:param Brand brand: Attributes of brand to create (required)
:return: Brand
If the method is called asynchronously,
returns the request thread. |
def delete_brand_by_id(cls, brand_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_brand_by_id_with_http_info(brand_id, **kwargs)
else:
(data) = cls._delete_brand_by_id_with_http_info(brand_id, **kwargs)
return data | Delete Brand
Delete an instance of Brand by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_brand_by_id(brand_id, async=True)
>>> result = thread.get()
:param async bool
:param str brand_id: ID of brand to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_brand_by_id(cls, brand_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_brand_by_id_with_http_info(brand_id, **kwargs)
else:
(data) = cls._get_brand_by_id_with_http_info(brand_id, **kwargs)
return data | Find Brand
Return single instance of Brand by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_brand_by_id(brand_id, async=True)
>>> result = thread.get()
:param async bool
:param str brand_id: ID of brand to return (required)
:return: Brand
If the method is called asynchronously,
returns the request thread. |
def list_all_brands(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_brands_with_http_info(**kwargs)
else:
(data) = cls._list_all_brands_with_http_info(**kwargs)
return data | List Brands
Return a list of Brands
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_brands(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Brand]
If the method is called asynchronously,
returns the request thread. |
def replace_brand_by_id(cls, brand_id, brand, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_brand_by_id_with_http_info(brand_id, brand, **kwargs)
else:
(data) = cls._replace_brand_by_id_with_http_info(brand_id, brand, **kwargs)
return data | Replace Brand
Replace all attributes of Brand
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_brand_by_id(brand_id, brand, async=True)
>>> result = thread.get()
:param async bool
:param str brand_id: ID of brand to replace (required)
:param Brand brand: Attributes of brand to replace (required)
:return: Brand
If the method is called asynchronously,
returns the request thread. |
def update_brand_by_id(cls, brand_id, brand, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_brand_by_id_with_http_info(brand_id, brand, **kwargs)
else:
(data) = cls._update_brand_by_id_with_http_info(brand_id, brand, **kwargs)
return data | Update Brand
Update attributes of Brand
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_brand_by_id(brand_id, brand, async=True)
>>> result = thread.get()
:param async bool
:param str brand_id: ID of brand to update. (required)
:param Brand brand: Attributes of brand to update. (required)
:return: Brand
If the method is called asynchronously,
returns the request thread. |
def filter_(*permissions, **kwargs):
bearer = kwargs['bearer']
target = kwargs.get('target')
bearer_cls = type_for(bearer)
# We need a query object. There are many ways to get one, Either we can
# be passed one, or we can make one from the session. We can either be
# passed the session, or we can grab the session from the bearer passed.
if 'query' in kwargs:
query = kwargs['query']
elif 'session' in kwargs:
query = kwargs['session'].query(target)
else:
query = object_session(bearer).query(target)
getter = functools.partial(
registry.retrieve,
bearer=bearer_cls,
target=target)
try:
# Generate a hash of {rulefn: permission} that we can use later
# to collect all of the rules.
if len(permissions):
rules = {getter(permission=x): x for x in permissions}
else:
rules = {getter(): None}
except KeyError:
# No rules defined. Default to no permission.
return query.filter(sql.false())
# Invoke all the rules and collect the results
# Abusing reduce here to invoke each rule and send the return value (query)
# from one rule to the next one. In this way the query becomes
# increasingly decorated as it marches through the system.
# q == query
# r = (rulefn, permission)
reducer = lambda q, r: r[0](permission=r[1], query=q, bearer=bearer)
return reduce(reducer, six.iteritems(rules), query) | Constructs a clause to filter all bearers or targets for a given
berarer or target. |
def has(*permissions, **kwargs):
target = kwargs['target']
kwargs['target'] = type_for(target)
# TODO: Predicate evaluation?
return target in filter_(*permissions, **kwargs) | Checks if the passed bearer has the passed permissions (optionally on
the passed target). |
def get_now_datetime_filestamp(longTime=False):
## > IMPORTS ##
from datetime import datetime, date, time
now = datetime.now()
if longTime:
now = now.strftime("%Y%m%dt%H%M%S%f")
else:
now = now.strftime("%Y%m%dt%H%M%S")
return now | *A datetime stamp to be appended to the end of filenames: ``YYYYMMDDtHHMMSS``*
**Key Arguments:**
- ``longTime`` -- make time string longer (more change of filenames being unique)
**Return:**
- ``now`` -- current time and date in filename format
**Usage:**
.. code-block:: python
from fundamentals.download import get_now_datetime_filestamp
get_now_datetime_filestamp(longTime=False)
#Out: '20160316t154635'
get_now_datetime_filestamp(longTime=True)
#Out: '20160316t154644133638' |
def create_app(application, request_class=Request):
def wsgi(environ, start_response):
response = application.serve(
request=request_class(environ),
path=environ.get("PATH_INFO", ""),
)
start_response(
response.status, [
(name, b",".join(values))
for name, values in response.headers.canonicalized()
],
)
return [response.content]
return wsgi | Create a WSGI application out of the given Minion app.
Arguments:
application (Application):
a minion app
request_class (callable):
a class to use for constructing incoming requests out of the WSGI
environment. It will be passed a single arg, the environ.
By default, this is :class:`minion.request.WSGIRequest` if
unprovided. |
def get_store_profile_by_id(cls, store_profile_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
else:
(data) = cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
return data | Find StoreProfile
Return single instance of StoreProfile by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_profile_by_id(store_profile_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to return (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread. |
def replace_store_profile_by_id(cls, store_profile_id, store_profile, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs)
else:
(data) = cls._replace_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs)
return data | Replace StoreProfile
Replace all attributes of StoreProfile
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_store_profile_by_id(store_profile_id, store_profile, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to replace (required)
:param StoreProfile store_profile: Attributes of storeProfile to replace (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread. |
def update_store_profile_by_id(cls, store_profile_id, store_profile, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs)
else:
(data) = cls._update_store_profile_by_id_with_http_info(store_profile_id, store_profile, **kwargs)
return data | Update StoreProfile
Update attributes of StoreProfile
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_store_profile_by_id(store_profile_id, store_profile, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to update. (required)
:param StoreProfile store_profile: Attributes of storeProfile to update. (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread. |
def get_documentation(self, request, *args, **kw):
ret = dict()
ret['resource'] = self.name()
ret['urls'] = self._get_url_doc()
ret['description'] = self.__doc__
ret['representation'] = self._get_representation_doc()
ret['methods'] = self._get_method_doc()
return ret | Generate the documentation. |
def _serialize_object(self, response_data, request):
if self._is_doc_request(request):
return response_data
else:
return super(DocumentedResource, self)._serialize_object(
response_data, request) | Override to not serialize doc responses. |
def _validate_output_data(
self, original_res, serialized_res, formatted_res, request):
if self._is_doc_request(request):
return
else:
return super(DocumentedResource, self)._validate_output_data(
original_res, serialized_res, formatted_res, request) | Override to not validate doc output. |
def _get_method(self, request):
if self._is_doc_request(request):
return self.get_documentation
else:
return super(DocumentedResource, self)._get_method(request) | Override to check if this is a documentation request. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.