code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def delete_tax_rate_by_id(cls, tax_rate_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs)
else:
(data) = cls._delete_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs)
return data | Delete TaxRate
Delete an instance of TaxRate by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_tax_rate_by_id(tax_rate_id, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_tax_rate_by_id(cls, tax_rate_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs)
else:
(data) = cls._get_tax_rate_by_id_with_http_info(tax_rate_id, **kwargs)
return data | Find TaxRate
Return single instance of TaxRate by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tax_rate_by_id(tax_rate_id, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to return (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread. |
def list_all_tax_rates(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_tax_rates_with_http_info(**kwargs)
else:
(data) = cls._list_all_tax_rates_with_http_info(**kwargs)
return data | List TaxRates
Return a list of TaxRates
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_tax_rates(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[TaxRate]
If the method is called asynchronously,
returns the request thread. |
def replace_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._replace_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data | Replace TaxRate
Replace all attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to replace (required)
:param TaxRate tax_rate: Attributes of taxRate to replace (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread. |
def update_tax_rate_by_id(cls, tax_rate_id, tax_rate, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
else:
(data) = cls._update_tax_rate_by_id_with_http_info(tax_rate_id, tax_rate, **kwargs)
return data | Update TaxRate
Update attributes of TaxRate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_tax_rate_by_id(tax_rate_id, tax_rate, async=True)
>>> result = thread.get()
:param async bool
:param str tax_rate_id: ID of taxRate to update. (required)
:param TaxRate tax_rate: Attributes of taxRate to update. (required)
:return: TaxRate
If the method is called asynchronously,
returns the request thread. |
def build_append_file_task(urllocation, filelocation):
config = file_utils.get_celcius_config()
basename = filelocation.split('/')[-1]
tmp_filelocation = filelocation.replace(basename, 'tmp_'+basename)
new_filelocation = filelocation.replace(basename, 'new_'+basename)
if config['retrieve_command'] == 'curl':
download_cmd = curl.build_download_file_command(urllocation, tmp_filelocation)
elif config['retrieve_command'] == 'wget':
download_cmd = wget.build_download_file_command(urllocation, tmp_filelocation)
else:
print("Invalid retrieve command!")
sys.exit(1)
diff_cmd = diff.build_append_file_command(filelocation, tmp_filelocation)
compare_cmd = concat.build_and_concat_commands([download_cmd, diff_cmd])
redirect_cmd = redirect.redirect_output(compare_cmd, new_filelocation)
full_cmd = concat.concat_commands([touch.touch(filelocation).build_command(), redirect_cmd, rm.build_force_rm_command(tmp_filelocation).build_command(), rm.build_force_rm_command(filelocation).build_command(), mv.mv(new_filelocation, filelocation).build_command()])
return full_cmd | Build a task to watch a specific remote url and
append that data to the file. This method should be used
when you would like to keep all of the information stored
on the local machine, but also append the new information
found at the url.
For instance, if the local file is:
```
foo
```
And the remote file is:
```
bar
```
The resulting file will contain:
```
foo
bar
``` |
def get_authentic_node_name(self, node_name: str) -> Optional[str]:
# Is there a node with the given name?
vertex: IGraphVertex = None
try:
vertex: IGraphVertex = self._wrapped_graph.vs.find(node_name)
except ValueError:
pass
# Is node_name a node index?
if vertex is None:
try:
vertex: IGraphVertex = self._wrapped_graph.vs[int(node_name)]
except ValueError:
return None
except IndexError:
return None
try:
return vertex["name"]
except KeyError:
return str(vertex.index) | Returns the exact, authentic node name for the given node name if a node corresponding to
the given name exists in the graph (maybe not locally yet) or `None` otherwise.
By default, this method checks whether a node with the given name exists locally in the
graph and return `node_name` if it does or `None` otherwise.
In `Graph` extensions that are used by applications where the user can enter potentially
incorrect node names, this method should be overridden to improve usability.
Arguments:
node_name (str): The node name to return the authentic node name for.
Returns:
The authentic name of the node corresponding to the given node name or
`None` if no such node exists. |
def _create_memory_database_interface(self) -> GraphDatabaseInterface:
Base = declarative_base()
engine = sqlalchemy.create_engine("sqlite://", poolclass=StaticPool)
Session = sessionmaker(bind=engine)
dbi: GraphDatabaseInterface = create_graph_database_interface(
sqlalchemy, Session(), Base, sqlalchemy.orm.relationship
)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return dbi | Creates and returns the in-memory database interface the graph will use. |
def _load_neighbors_from_external_source(self) -> None:
graph: IGraphWrapper = self._graph
ig_vertex: IGraphVertex = graph.wrapped_graph.vs[self._igraph_index]
ig_neighbors: List[IGraphVertex] = ig_vertex.neighbors()
for ig_neighbor in ig_neighbors:
try:
name: str = ig_neighbor["name"]
except KeyError:
name: str = str(ig_neighbor.index)
try:
external_id: Optional[str] = ig_neighbor["external_id"]
except KeyError:
external_id: Optional[str] = None
neighbor: IGraphNode = graph.nodes.get_node_by_name(name,
can_validate_and_load=True,
external_id=external_id)
graph.add_edge(self, neighbor) | Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node. |
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode:
return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id) | Returns a new `IGraphNode` instance with the given index and name.
Arguments:
index (int): The index of the node to create.
name (str): The name of the node to create.
external_id (Optional[str]): The external ID of the node. |
def parse(self):
self.parse_fields()
records = []
for line in self.t['data'].split('\n'):
if EMPTY_ROW.match(line):
continue
row = [self.autoconvert(line[start_field:end_field+1])
for start_field, end_field in self.fields]
records.append(tuple(row))
self.records = records | Parse the table data string into records. |
def recarray(self):
if self.records is None:
self.parse()
try:
# simple (should this also be subjected to convert.to_int64() ?)
return numpy.rec.fromrecords(self.records, names=self.names)
except ValueError:
# complicated because fromrecords cannot deal with records of lists
# Quick hack: use objects for lists etc (instead of building the proper
# data types (see docs for numpy.dtype , eg dtype('coord', (float, 3)) )
D = numpy.empty(len(self.records[0]), dtype=object) # number of fields from first record
types = numpy.array([map(type, r) for r in self.records]) # types of all fields
for icol, isSame in enumerate([numpy.all(col) for col in types.T]):
if isSame:
D[icol] = types[0][icol]
else:
D[icol] = object
dtype = numpy.dtype(zip(self.names, D))
# from numpy.rec.records
# TODO: this is not working properly yet; for instance, text fields
# are reduced to length 0 (<U0) and the final convert.to_int64 dies
# with '<U0'*** TypeError: TypeError('data type not understood',)
retval = numpy.array(self.records, dtype=dtype)
res = retval.view(numpy.recarray)
## res.dtype = numpy.dtype((numpy.rec.record, res.dtype)) # fails -- ARGH, this makes it a recarray
return convert.to_int64(res) | Return a recarray from the (parsed) string. |
def parse_fields(self):
rule = self.t['toprule'].rstrip() # keep leading space for correct columns!!
if not (rule == self.t['midrule'].rstrip() and rule == self.t['botrule'].rstrip()):
raise ParseError("Table rules differ from each other (check white space).")
names = self.t['fields'].split()
nfields = len(rule.split())
if nfields != len(names):
raise ParseError("number of field names (%d) does not match number of fields (%d)"
% (nfields, len(names)))
fields = [] # list of tuples (first,last) column of the field
ifield = 0
is_field = rule.startswith('=') # state
len_rule = len(rule)
start_field = 0
end_field = 0
for c in xrange(len_rule):
char = rule[c]
if not is_field and char == '=':
start_field = c
is_field = True
if is_field and (char == ' ' or c == len_rule-1):
# finished field
fields.append((start_field, c))
ifield += 1
is_field = False
self.names = names
self.fields = fields | Determine the start and end columns and names of the fields. |
def email_quote_txt(text,
indent_txt='>>',
linebreak_input="\n",
linebreak_output="\n"):
if (text == ""):
return ""
lines = text.split(linebreak_input)
text = ""
for line in lines:
text += indent_txt + line + linebreak_output
return text | Takes a text and returns it in a typical mail quoted format, e.g.::
C'est un lapin, lapin de bois.
>>Quoi?
Un cadeau.
>>What?
A present.
>>Oh, un cadeau.
will return::
>>C'est un lapin, lapin de bois.
>>>>Quoi?
>>Un cadeau.
>>>>What?
>>A present.
>>>>Oh, un cadeau.
@param text: the string to quote
@param indent_txt: the string used for quoting (default: '>>')
@param linebreak_input: in the text param, string used for linebreaks
@param linebreak_output: linebreak used for output
@return: the text as a quoted string |
def escape_email_quoted_text(text, indent_txt='>>', linebreak_txt='\n'):
washer = HTMLWasher()
lines = text.split(linebreak_txt)
output = ''
for line in lines:
line = line.strip()
nb_indent = 0
while True:
if line.startswith(indent_txt):
nb_indent += 1
line = line[len(indent_txt):]
else:
break
output += (nb_indent * indent_txt) + washer.wash(line,
render_unallowed_tags=True) + linebreak_txt
nb_indent = 0
return output[:-1] | Escape text using an email-like indenting rule.
As an example, this text::
>>Brave Sir Robin ran away...
<img src="malicious_script />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
will be escaped like this::
>>Brave Sir Robin ran away...
<img src="malicious_script />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
@param text: the string to escape
@param indent_txt: the string used for quoting
@param linebreak_txt: in the text param, string used for linebreaks |
def _print(self, text, color=None, **kwargs):
COLORS = {
'red': '\033[91m{}\033[00m',
'green': '\033[92m{}\033[00m',
'yellow': '\033[93m{}\033[00m',
'cyan': '\033[96m{}\033[00m'
}
_ = COLORS[color]
six.print_(_.format(text), **kwargs) | print text with given color to terminal |
def _is_unique(self, name, path):
project = None
try:
project = Project.select().where(
(Project.name == name) |
(Project.path == path)
)[0]
except:
pass
return project is None | verify if there is a project with given name or path
on the database |
def _path_is_valid(self, path):
VALIDATORS = [
(os.path.isabs, self._ERROR_PATH_NOT_ABSOLUTE),
(os.path.exists, self._ERROR_PATH_DOESNT_EXISTS),
(os.path.isdir, self._ERROR_PATH_NOT_A_DIR),
]
for validator in VALIDATORS:
func, str_err = validator
if not func(path):
self._print(str_err.format(path), 'red')
return
return True | validates if a given path is:
- absolute,
- exists on current machine
- is a directory |
def add(self, name, path=None, **kwargs):
path = path or kwargs.pop('default_path', None)
if not self._path_is_valid(path):
return
if not self._is_unique(name, path):
p = Project.select().where(
(Project.name == name) |
(Project.path == path)
)[0]
self._print(self._ERROR_PROJECT_EXISTS.format(name, p.path), 'red')
return
Project.create(name=name, path=path)
self._print(self._SUCCESS_PROJECT_ADDED.format(name), 'green') | add new project with given name and path to database
if the path is not given, current working directory will be taken
...as default |
def list(self, **kwargs):
projects = Project.select().order_by(Project.name)
if len(projects) == 0:
self._print('No projects available', 'yellow')
return
for project in projects:
project_repr = self._PROJECT_ITEM.format(project.name, project.path)
row = '- {}'.format(self._PROJECT_ITEM.format(project.name, project.path))
six.print_(row) | displays all projects on database |
def parent_tags(self):
tags = set()
for addr in self._addresses:
if addr.attr == 'text':
tags.add(addr.element.tag)
tags.update(el.tag for el in addr.element.iterancestors())
tags.discard(HTMLFragment._root_tag)
return frozenset(tags) | Provides tags of all parent HTML elements. |
def involved_tags(self):
if len(self._addresses) < 2:
# there can't be a tag boundary if there's only 1 or 0 characters
return frozenset()
# creating 'parent_sets' mapping, where the first item in tuple
# is the address of character and the second is set
# of character's parent HTML elements
parent_sets = []
# meanwhile we are creatingalso a set of common parents so we can
# put them away later on (we're not interested in them as
# they're only some global wrappers)
common_parents = set()
for addr in self._addresses:
parents = set()
if addr.attr == 'text':
parents.add(addr.element)
parents.update(addr.element.iterancestors())
parent_sets.append((addr, parents))
if not common_parents:
common_parents = parents
else:
common_parents &= parents
# constructing final set of involved tags
involved_tags = set()
prev_addr = None
for addr, parents in parent_sets:
parents = parents - common_parents
involved_tags.update(p.tag for p in parents)
# hidden tags - sometimes there are tags without text which
# can hide between characters, but they actually break textflow
is_tail_of_hidden = (
prev_addr and
addr.attr == 'tail' and
prev_addr.element != addr.element
)
if is_tail_of_hidden:
involved_tags.add(addr.element)
prev_addr = addr
return frozenset(involved_tags) | Provides all HTML tags directly involved in this string. |
def _parse(self, html):
if self._has_body_re.search(html):
tree = lxml.html.document_fromstring(html).find('.//body')
self.has_body = True
else:
tree = lxml.html.fragment_fromstring(html,
create_parent=self._root_tag)
if tree.tag != self._root_tag:
# ensure the root element exists even if not really needed,
# so the tree has always the same structure
root = lxml.html.HtmlElement()
root.tag = self._root_tag
root.append(tree)
return root
return tree | Parse given string as HTML and return it's etree representation. |
def _iter_texts(self, tree):
skip = (
not isinstance(tree, lxml.html.HtmlElement) # comments, etc.
or tree.tag in self.skipped_tags
)
if not skip:
if tree.text:
yield Text(tree.text, tree, 'text')
for child in tree:
for text in self._iter_texts(child):
yield text
if tree.tail:
yield Text(tree.tail, tree, 'tail') | Iterates over texts in given HTML tree. |
def _analyze_tree(self, tree):
addresses = []
for text in self._iter_texts(tree):
for i, char in enumerate(text.content):
if char in whitespace:
char = ' '
addresses.append(CharAddress(char, text.element, text.attr, i))
# remove leading and trailing whitespace
while addresses and addresses[0].char == ' ':
del addresses[0]
while addresses and addresses[-1].char == ' ':
del addresses[-1]
return addresses | Analyze given tree and create mapping of indexes to character
addresses. |
def _validate_index(self, index):
if isinstance(index, slice):
if index.step and index.step != 1:
raise IndexError('Step is not allowed.')
indexes = (index.start, index.stop)
else:
indexes = (index,)
for index in indexes:
if index is not None and index < 0:
raise IndexError('Negative indexes are not allowed.') | Validates given index, eventually raises errors. |
def _find_pivot_addr(self, index):
if not self.addresses or index.start == 0:
return CharAddress('', self.tree, 'text', -1) # string beginning
if index.start > len(self.addresses):
return self.addresses[-1]
return self.addresses[index.start] | Inserting by slicing can lead into situation where no addresses are
selected. In that case a pivot address has to be chosen so we know
where to add characters. |
def apply_connectivity_changes(request, add_vlan_action, remove_vlan_action, logger=None):
if not logger:
logger = logging.getLogger("apply_connectivity_changes")
if request is None or request == '':
raise Exception('ConnectivityOperations', 'request is None or empty')
holder = connectivity_request_from_json(request)
driver_response = DriverResponse()
results = []
driver_response_root = DriverResponseRoot()
for action in holder.actions:
logger.info('Action: ', action.__dict__)
if action.type == ConnectivityActionRequest.SET_VLAN:
action_result = add_vlan_action(action)
elif action.type == ConnectivityActionRequest.REMOVE_VLAN:
action_result = remove_vlan_action(action)
else:
continue
results.append(action_result)
driver_response.actionResults = results
driver_response_root.driverResponse = driver_response
return driver_response_root | Standard implementation for the apply_connectivity_changes operation
This function will accept as an input the actions to perform for add/remove vlan. It implements
the basic flow of decoding the JSON connectivity changes requests, and combining the results
of the add/remove vlan functions into a result object.
:param str request: json string sent from the CloudShell server describing the connectivity changes to perform
:param Function -> ConnectivityActionResult remove_vlan_action: This action will be called for VLAN remove operations
:param Function -> ConnectivityActionResult add_vlan_action: This action will be called for VLAN add operations
:param logger: logger to use for the operation, if you don't provide a logger, a default Python logger will be used
:return Returns a driver action result object, this can be returned to CloudShell server by the command result
:rtype: DriverResponseRoot |
def check_api_key(email, api_key):
table = boto3.resource("dynamodb").Table(os.environ['people'])
user = table.get_item(Key={'email': email})
if not user:
return False
user = user.get("Item")
if api_key != user.get('api_key', None):
return False
return user | Check the API key of the user. |
def lambda_handler(event, context):
email = event.get('email', None)
api_key = event.get('api_key', None)
if not (api_key or email):
msg = "Missing authentication parameters in your request"
return {'success': False, 'message': msg}
indicators = list(set(event.get('indicators', list())))
if len(indicators) == 0:
return {'success': False, 'message': "No indicators sent in"}
user = check_api_key(email, api_key)
if not user:
return {'success': False, 'message': "Email or API key was invalid."}
role = check_role(user)
if not role:
return {'success': False, 'message': "Account not approved to contribute."}
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
table = boto3.resource("dynamodb").Table(os.environ['database'])
with table.batch_writer(overwrite_by_pkeys=['indicator']) as batch:
for item in indicators:
if item == "":
continue
if len(item) != 32:
item = hashlib.md5(item).hexdigest()
try:
batch.put_item(Item={'indicator': item,
'creator': user.get('email'),
'datetime': current_time})
except Exception as e:
logger.error(str(e))
msg = "Wrote {} indicators".format(len(indicators))
return {'success': True, 'message': msg, 'writeCount': len(indicators)} | Main handler. |
def parse_email_url(url):
conf = {}
url = urlparse.urlparse(url)
# Remove query strings
path = url.path[1:]
path = path.split('?', 2)[0]
# Update with environment configuration
conf.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': url.username,
'EMAIL_HOST_PASSWORD': url.password,
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': url.port,
})
if url.scheme in EMAIL_SCHEMES:
conf['EMAIL_BACKEND'] = EMAIL_SCHEMES[url.scheme]
if url.scheme == 'smtps':
conf['EMAIL_USE_TLS'] = True
else:
conf['EMAIL_USE_TLS'] = False
return conf | Parses an email URL. |
def config(name='EMAIL_URL', default='console://'):
conf = {}
s = env(name, default)
if s:
conf = parse_email_url(s)
return conf | Returns a dictionary with EMAIL_* settings from EMAIL_URL. |
def replace(html, replacements=None):
if not replacements:
return html # no replacements
html = HTMLFragment(html)
for r in replacements:
r.replace(html)
return unicode(html) | Performs replacements on given HTML string. |
def _is_replacement_allowed(self, s):
if any(tag in s.parent_tags for tag in self.skipped_tags):
return False
if any(tag not in self.textflow_tags for tag in s.involved_tags):
return False
return True | Tests whether replacement is allowed on given piece of HTML text. |
def replace(self, html):
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break | Perform replacements on given HTML fragment. |
def read_relative_file(filename, relative_to=None):
if relative_to is None:
relative_to = os.path.dirname(__file__)
with open(os.path.join(os.path.dirname(relative_to), filename)) as f:
return f.read() | Returns contents of the given file, which path is supposed relative
to this package. |
def get_events(self):
to_send = {'limit': 50}
response = self._send_data('POST', 'admin', 'get-events', to_send)
output = {'message': ""}
for event in response['events']:
desc = "Source IP: {ip}\n"
desc += "Datetime: {time}\n"
desc += "Indicator: {match}\n"
desc += "Method: {method}\n"
desc += "URL: {url}\n"
desc += "Request Type: {type}\n"
desc += "User-Agent: {userAgent}\n"
desc += "Contact: {contact}\n"
desc += "\n"
output['message'] += desc.format(**event)
return output | Get events from the cloud node. |
def flush_events(self):
response = self._send_data('DELETE', 'admin', 'flush-events', {})
if response['success']:
msg = "Events flushed"
else:
msg = "Flushing of events failed"
output = {'message': msg}
return output | Flush events from the cloud node. |
def put(self):
try:
self.cloudwatch.put_metric_data(
Namespace=self.namespace,
MetricData=[{
'MetricName': self.name,
'Value': self.value,
'Timestamp': self.timestamp
}]
)
except Exception:
logging.exception("Error pushing {0} to CloudWatch.".format(str(self))) | Push the info represented by this ``Metric`` to CloudWatch. |
def log(self, message, level=logging.INFO, *args, **kwargs):
msg = "{}.{}: {}[{}]: {}".format(
self.__class__.__name__, self.status, self.__class__.path,
self.uuid, message
)
extra = kwargs.pop("extra", dict())
extra.update(dict(kmsg=Message(
self.uuid, entrypoint=self.__class__.path, params=self.params,
metadata=self.metadata
).dump()))
return logger.log(
level=level, msg=msg, extra=extra, *args, **kwargs
) | Send log entry
:param str message: log message
:param int level: `Logging level <https://docs.python.org/3/library/logging.html#levels>`_
:param list args: log record arguments
:param dict kwargs: log record key argument |
def setFocus(self, reason=None):
self.formLayout.itemAt(0, QFormLayout.FieldRole).widget().setFocus() | Sets focus to first field. Note: reason is ignored. |
def _connect(self):
logger.info("Connecting to rabbit")
for url in self._urls:
try:
self._connection = pika.BlockingConnection(pika.URLParameters(url))
self._channel = self._connection.channel()
self._declare()
if self._confirm_delivery:
self._channel.confirm_delivery()
logger.info("Enabled delivery confirmation")
logger.debug("Connected to rabbit")
return True
except pika.exceptions.AMQPConnectionError:
logger.exception("Unable to connect to rabbit")
continue
except Exception:
logger.exception("Unexpected exception connecting to rabbit")
continue
raise pika.exceptions.AMQPConnectionError | Connect to a RabbitMQ instance
:returns: Boolean corresponding to success of connection
:rtype: bool |
def _disconnect(self):
try:
self._connection.close()
logger.debug("Disconnected from rabbit")
except Exception:
logger.exception("Unable to close connection") | Cleanly close a RabbitMQ connection.
:returns: None |
def publish_message(self, message, content_type=None, headers=None, mandatory=False, immediate=False):
logger.debug("Publishing message")
try:
self._connect()
return self._do_publish(mandatory=mandatory,
immediate=immediate,
content_type=content_type,
headers=headers,
message=message)
except pika.exceptions.AMQPConnectionError:
logger.error("AMQPConnectionError occurred. Message not published.")
raise PublishMessageError
except NackError:
# raised when a message published in publisher-acknowledgments mode
# is returned via `Basic.Return` followed by `Basic.Ack`.
logger.error("NackError occurred. Message not published.")
raise PublishMessageError
except UnroutableError:
# raised when a message published in publisher-acknowledgments
# mode is returned via `Basic.Return` followed by `Basic.Ack`.
logger.error("UnroutableError occurred. Message not published.")
raise PublishMessageError
except Exception:
logger.exception("Unknown exception occurred. Message not published.")
raise PublishMessageError | Publish a response message to a RabbitMQ instance.
:param message: Response message
:param content_type: Pika BasicProperties content_type value
:param headers: Message header properties
:param mandatory: The mandatory flag
:param immediate: The immediate flag
:returns: Boolean corresponding to the success of publishing
:rtype: bool |
def on_execute__set_surface_alphas(self, request):
'''
.. versionchanged:: 0.12
Queue redraw after setting surface alphas.
'''
data = decode_content_data(request)
logger.debug('[on_execute__set_surface_alphas] %s',
data['surface_alphas'])
for name, alpha in data['surface_alphas'].iteritems():
self.parent.canvas_slave.set_surface_alpha(name, alpha)
self.parent.canvas_slave.render()
gobject.idle_add(self.parent.canvas_slave.drawf on_execute__set_surface_alphas(self, request):
'''
.. versionchanged:: 0.12
Queue redraw after setting surface alphas.
'''
data = decode_content_data(request)
logger.debug('[on_execute__set_surface_alphas] %s',
data['surface_alphas'])
for name, alpha in data['surface_alphas'].iteritems():
self.parent.canvas_slave.set_surface_alpha(name, alpha)
self.parent.canvas_slave.render()
gobject.idle_add(self.parent.canvas_slave.draw) | .. versionchanged:: 0.12
Queue redraw after setting surface alphas. |
def on_execute__set_dynamic_electrode_states(self, request):
'''
.. versionadded:: 0.15
Set dynamic electrode states.
'''
data = decode_content_data(request)
self.parent.on_dynamic_electrode_states_set(data['electrode_states']f on_execute__set_dynamic_electrode_states(self, request):
'''
.. versionadded:: 0.15
Set dynamic electrode states.
'''
data = decode_content_data(request)
self.parent.on_dynamic_electrode_states_set(data['electrode_states']) | .. versionadded:: 0.15
Set dynamic electrode states. |
def on_connect_button__clicked(self, event):
'''
Connect to Zero MQ plugin hub (`zmq_plugin.hub.Hub`) using the settings
from the text entry fields (e.g., hub URI, plugin name).
Emit `plugin-connected` signal with the new plugin instance after hub
connection has been established.
'''
hub_uri = self.plugin_uri.get_text()
ui_plugin_name = self.ui_plugin_name.get_text()
plugin = self.create_plugin(ui_plugin_name, hub_uri)
self.init_plugin(plugin)
self.connect_button.set_sensitive(False)
self.emit('plugin-connected', pluginf on_connect_button__clicked(self, event):
'''
Connect to Zero MQ plugin hub (`zmq_plugin.hub.Hub`) using the settings
from the text entry fields (e.g., hub URI, plugin name).
Emit `plugin-connected` signal with the new plugin instance after hub
connection has been established.
'''
hub_uri = self.plugin_uri.get_text()
ui_plugin_name = self.ui_plugin_name.get_text()
plugin = self.create_plugin(ui_plugin_name, hub_uri)
self.init_plugin(plugin)
self.connect_button.set_sensitive(False)
self.emit('plugin-connected', plugin) | Connect to Zero MQ plugin hub (`zmq_plugin.hub.Hub`) using the settings
from the text entry fields (e.g., hub URI, plugin name).
Emit `plugin-connected` signal with the new plugin instance after hub
connection has been established. |
def create_quali_api_instance(context, logger):
if hasattr(context, 'reservation') and context.reservation:
domain = context.reservation.domain
elif hasattr(context, 'remote_reservation') and context.remote_reservation:
domain = context.remote_reservation.domain
else:
domain = None
address = context.connectivity.server_address
token = context.connectivity.admin_auth_token
if token:
instance = QualiAPIHelper(address, logger, token=token, domain=domain)
else:
instance = QualiAPIHelper(address, logger, username='admin', password='admin', domain=domain)
return instance | Get needed attributes from context and create instance of QualiApiHelper
:param context:
:param logger:
:return: |
def login(self):
uri = 'API/Auth/Login'
if self._token:
json_data = {'token': self._token, 'domain': self._domain}
else:
json_data = {'username': self._username, 'password': self._password, 'domain': self._domain}
result = self.__rest_client.request_put(uri, json_data)
self.__rest_client.session.headers.update(authorization="Basic {0}".format(result.replace('"', ''))) | Login
:return: |
def run_migrations_offline():
url = winchester_config['database']['url']
context.configure(url=url)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output. |
def run_migrations_online():
engine = engine_from_config(
winchester_config['database'],
prefix='',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. |
def follow_cf(save, Uspan, target_cf, nup, n_tot=5.0, slsp=None):
if slsp == None:
slsp = Spinon(slaves=6, orbitals=3, avg_particles=n_tot,
hopping=[0.5]*6, populations = np.asarray([n_tot]*6)/6)
zet, lam, mu, mean_f = [], [], [], []
for co in Uspan:
print('U=', co, 'del=', target_cf)
res=root(targetpop, nup[-1],(co,target_cf,slsp, n_tot))
print(res.x)
if res.x>nup[-1]: break
nup.append(res.x)
slsp.param['populations']=population_distri(nup[-1])
mean_f.append(slsp.mean_field())
zet.append(slsp.quasiparticle_weight())
lam.append(slsp.param['lambda'])
mu.append(orbital_energies(slsp.param, zet[-1]))
# plt.plot(np.asarray(zet)[:,0], label='d={}, zl'.format(str(target_cf)))
# plt.plot(np.asarray(zet)[:,5], label='d={}, zh'.format(str(target_cf)))
case = save.createGroup('cf={}'.format(target_cf))
varis = st.setgroup(case)
st.storegroup(varis, Uspan[:len(zet)], zet, lam, mu, nup[1:],target_cf,mean_f) | Calculates the quasiparticle weight in single
site spin hamiltonian under with N degenerate half-filled orbitals |
def targetpop(upper_density, coul, target_cf, slsp, n_tot):
if upper_density < 0.503: return 0.
trypops=population_distri(upper_density, n_tot)
slsp.set_filling(trypops)
slsp.selfconsistency(coul,0)
efm_free = dos_bethe_find_crystalfield(trypops, slsp.param['hopping'])
orb_ener = slsp.param['lambda']+ slsp.quasiparticle_weight()*efm_free
obtained_cf = orb_ener[5] - orb_ener[0]
return target_cf - obtained_cf | restriction on finding the right populations that leave the crystal
field same |
def produce_pdf(rst_content=None, doctree_content=None, filename=None):
if filename is None:
filename = os.path.join(
"/tmp", ''.join([random.choice(string.ascii_letters +
string.digits) for n in range(15)]) + '.pdf')
r2p = RstToPdf(stylesheets=['pdf.style'],
style_path=[os.path.join(os.path.dirname(__file__),
'styles')],
breaklevel=0,
splittables=True,
footer="""###Title### - ###Page###/###Total###""")
r2p.createPdf(text=rst_content,
doctree=doctree_content,
output=filename)
return filename | produce a pdf content based of a given rst content
If filename is given, it will store the result using the given filename
if no filename is given, it will generate a pdf in /tmp/ with a random
name |
def load(self, filename=None):
DataFile.load(self, filename)
self.spectrum.filename = filename | Method was overriden to set spectrum.filename as well |
def _do_save_as(self, filename):
if len(self.spectrum.x) < 2:
raise RuntimeError("Spectrum must have at least two points")
if os.path.isfile(filename):
os.unlink(filename) # PyFITS does not overwrite file
hdu = self.spectrum.to_hdu()
overwrite_fits(hdu, filename) | Saves spectrum back to FITS file. |
def alternator(*pipes):
''' a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends '''
try:
for p in cycle(map(iter, pipes)):
yield next(p)
except StopIteration:
pasf alternator(*pipes):
''' a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends '''
try:
for p in cycle(map(iter, pipes)):
yield next(p)
except StopIteration:
pass | a lot like zip, just instead of:
(a,b),(a,b),(a,b)
it works more like:
a,b,a,b,a,b,a
until one of the pipes ends |
def matches():
wvw_matches = get_cached("wvw/matches.json", False).get("wvw_matches")
for match in wvw_matches:
match["start_time"] = parse_datetime(match["start_time"])
match["end_time"] = parse_datetime(match["end_time"])
return wvw_matches | This resource returns a list of the currently running WvW matches, with
the participating worlds included in the result. Further details about a
match can be requested using the ``match_details`` function.
The response is a list of match objects, each of which contains the
following properties:
wvw_match_id (string):
The WvW match id.
red_world_id (number):
The world id of the red world.
blue_world_id (number):
The world id of the blue world.
green_world_id (number):
The world id of the green world.
start_time (datetime):
A timestamp of when the match started.
end_time (datetime):
A timestamp of when the match ends. |
def objective_names(lang="en"):
params = {"lang": lang}
cache_name = "objective_names.%(lang)s.json" % params
data = get_cached("wvw/objective_names.json", cache_name, params=params)
return dict([(objective["id"], objective["name"]) for objective in data]) | This resource returns a list of the localized WvW objective names for
the specified language.
:param lang: The language to query the names for.
:return: A dictionary mapping the objective Ids to the names.
*Note that these are not the names displayed in the game, but rather the
abstract type.* |
def _parse_data(self, data, charset):
builder = TreeBuilder(numbermode=self._numbermode)
if isinstance(data,basestring):
xml.sax.parseString(data, builder)
else:
xml.sax.parse(data, builder)
return builder.root[self._root_element_name()] | Parse the xml data into dictionary. |
def _format_data(self, data, charset):
if data is None or data == '':
return u''
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, charset)
xml.startDocument()
xml.startElement(self._root_element_name(), {})
self._to_xml(xml, data)
xml.endElement(self._root_element_name())
xml.endDocument()
return stream.getvalue() | Format data into XML. |
def _to_xml(self, xml, data, key=None):
if isinstance(data, (list, tuple)):
for item in data:
elemname = self._list_item_element_name(key)
xml.startElement(elemname, {})
self._to_xml(xml, item)
xml.endElement(elemname)
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value, key)
xml.endElement(key)
else:
xml.characters(smart_unicode(data)) | Recursively convert the data into xml.
This function was originally copied from the
`Piston project <https://bitbucket.org/jespern/django-piston/>`_
It has been modified since.
:param xml: the xml document
:type xml: SimplerXMLGenerator
:param data: data to be formatted
:param key: name of the parent element (for root this is ``None``) |
def startElement(self, name, attrs):
self.stack.append((self.current, self.chardata))
self.current = {}
self.chardata = [] | Initialize new node and store current node into stack. |
def endElement(self, name):
if self.current:
# we have nested elements
obj = self.current
else:
# text only node
text = ''.join(self.chardata).strip()
obj = self._parse_node_data(text)
newcurrent, self.chardata = self.stack.pop()
self.current = self._element_to_node(newcurrent, name, obj) | End current xml element, parse and add to to parent node. |
def _parse_node_data(self, data):
data = data or ''
if self.numbermode == 'basic':
return self._try_parse_basic_number(data)
elif self.numbermode == 'decimal':
return self._try_parse_decimal(data)
else:
return data | Parse the value of a node. Override to provide your own parsing. |
def _try_parse_basic_number(self, data):
# try int first
try:
return int(data)
except ValueError:
pass
# try float next
try:
return float(data)
except ValueError:
pass
# no luck, return data as it is
return data | Try to convert the data into ``int`` or ``float``.
:returns: ``Decimal`` or ``data`` if conversion fails. |
def _element_to_node(self, node, name, value):
# is the target node a list?
try:
node.append(value)
except AttributeError:
pass
else:
return node
# target node is a dict
if name in node:
# there's already an element with same name -> convert the node into list
node = node.values() + [value]
else:
# just add the value into the node
node[name] = value
return node | Insert the parsed element (``name``, ``value`` pair) into the node.
You should always use the returned node and forget the one
that was given in parameter.
:param node: the node where the is added to
:returns: the node. Note that this may be a new node instance. |
def _get_programs_dict(pkgname_only, flag_protected, flag_no_pfant=False):
allinfo = f311.get_programs_dict(pkgname_only, flag_protected)
if not flag_no_pfant and "pyfant" in allinfo:
_add_PFANT(allinfo)
return allinfo | Returns dictionary {(package description): [ExeInfo0, ...], ...} |
def apize_raw(url, method='GET'):
def decorator(func):
def wrapper(*args, **kwargs):
elem = func(*args, **kwargs)
if type(elem) is not dict:
raise BadReturnVarType(func.__name__)
response = send_request(url, method,
elem.get('data', {}),
elem.get('args', {}),
elem.get('params', {}),
elem.get('headers', {}),
elem.get('cookies', {}),
elem.get('timeout', 8),
elem.get('is_json', False),
elem.get('verify_cert', True)
)
return response
return wrapper
return decorator | Convert data and params dict -> json. |
def extract_version(path):
# Regular expression for the version
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open(path + '__init__.py', 'r', encoding='utf-8') as f:
version = f.read()
if version:
version = _version_re.search(version)
if version:
version = version.group(1)
version = str(ast.literal_eval(version.rstrip()))
extracted = version
else:
extracted = None
else:
extracted = None
return extracted | Reads the file at the specified path and returns the version contained in it.
This is meant for reading the __init__.py file inside a package, and so it
expects a version field like:
__version__ = '1.0.0'
:param path: path to the Python file
:return: the version inside the file |
def _make_connect(module, args, kwargs):
# pylint: disable-msg=W0142
return functools.partial(module.connect, *args, **kwargs) | Returns a function capable of making connections with a particular
driver given the supplied credentials. |
def connect(module, *args, **kwargs):
mdr = SingleConnectionMediator(
module, _make_connect(module, args, kwargs))
return Context(module, mdr) | Connect to a database using the given DB-API driver module. Returns
a database context representing that connection. Any arguments or
keyword arguments are passed the module's :py:func:`connect` function. |
def create_pool(module, max_conns, *args, **kwargs):
if not hasattr(module, 'threadsafety'):
raise NotSupported("Cannot determine driver threadsafety.")
if max_conns < 1:
raise ValueError("Minimum number of connections is 1.")
if module.threadsafety >= 2:
return Pool(module, max_conns, *args, **kwargs)
if module.threadsafety >= 1:
return DummyPool(module, *args, **kwargs)
raise ValueError("Bad threadsafety level: %d" % module.threadsafety) | Create a connection pool appropriate to the driver module's capabilities. |
def transactional(wrapped):
# pylint: disable-msg=C0111
def wrapper(*args, **kwargs):
with Context.current().transaction():
return wrapped(*args, **kwargs)
return functools.update_wrapper(wrapper, wrapped) | A decorator to denote that the content of the decorated function or
method is to be ran in a transaction.
The following code is equivalent to the example for
:py:func:`dbkit.transaction`::
import sqlite3
import sys
from dbkit import connect, transactional, query_value, execute
# ...do some stuff...
with connect(sqlite3, '/path/to/my.db') as ctx:
try:
change_ownership(page_id, new_owner_id)
catch ctx.IntegrityError:
print >> sys.stderr, "Naughty!"
@transactional
def change_ownership(page_id, new_owner_id):
old_owner_id = query_value(
"SELECT owner_id FROM pages WHERE page_id = ?",
(page_id,))
execute(
"UPDATE users SET owned = owned - 1 WHERE id = ?",
(old_owner_id,))
execute(
"UPDATE users SET owned = owned + 1 WHERE id = ?",
(new_owner_id,))
execute(
"UPDATE pages SET owner_id = ? WHERE page_id = ?",
(new_owner_id, page_id)) |
def execute(stmt, args=()):
ctx = Context.current()
with ctx.mdr:
cursor = ctx.execute(stmt, args)
row_count = cursor.rowcount
_safe_close(cursor)
return row_count | Execute an SQL statement. Returns the number of affected rows. |
def query(stmt, args=(), factory=None):
ctx = Context.current()
factory = ctx.default_factory if factory is None else factory
with ctx.mdr:
return factory(ctx.execute(stmt, args), ctx.mdr) | Execute a query. This returns an iterator of the result set. |
def query_row(stmt, args=(), factory=None):
for row in query(stmt, args, factory):
return row
return None | Execute a query. Returns the first row of the result set, or `None`. |
def query_value(stmt, args=(), default=None):
for row in query(stmt, args, TupleFactory):
return row[0]
return default | Execute a query, returning the first value in the first row of the
result set. If the query returns no result set, a default value is
returned, which is `None` by default. |
def execute_proc(procname, args=()):
ctx = Context.current()
with ctx.mdr:
cursor = ctx.execute_proc(procname, args)
row_count = cursor.rowcount
_safe_close(cursor)
return row_count | Execute a stored procedure. Returns the number of affected rows. |
def query_proc(procname, args=(), factory=None):
ctx = Context.current()
factory = ctx.default_factory if factory is None else factory
with ctx.mdr:
return factory(ctx.execute_proc(procname, args), ctx.mdr) | Execute a stored procedure. This returns an iterator of the result set. |
def query_proc_row(procname, args=(), factory=None):
for row in query_proc(procname, args, factory):
return row
return None | Execute a stored procedure. Returns the first row of the result set,
or `None`. |
def query_proc_value(procname, args=(), default=None):
for row in query_proc(procname, args, TupleFactory):
return row[0]
return default | Execute a stored procedure, returning the first value in the first row
of the result set. If it returns no result set, a default value is
returned, which is `None` by default. |
def make_placeholders(seq, start=1):
if len(seq) == 0:
raise ValueError('Sequence must have at least one element.')
param_style = Context.current().param_style
placeholders = None
if isinstance(seq, dict):
if param_style in ('named', 'pyformat'):
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = (template % key
for key in six.iterkeys(seq))
elif isinstance(seq, (list, tuple)):
if param_style == 'numeric':
placeholders = (':%d' % i
for i in xrange(start, start + len(seq)))
elif param_style in ('qmark', 'format', 'pyformat'):
placeholders = itertools.repeat(
'?' if param_style == 'qmark' else '%s',
len(seq))
if placeholders is None:
raise NotSupported(
"Param style '%s' does not support sequence type '%s'" % (
param_style, seq.__class__.__name__))
return ', '.join(placeholders) | Generate placeholders for the given sequence. |
def make_file_object_logger(fh):
def logger_func(stmt, args, fh=fh):
"""
A logger that logs everything sent to a file object.
"""
now = datetime.datetime.now()
six.print_("Executing (%s):" % now.isoformat(), file=fh)
six.print_(textwrap.dedent(stmt), file=fh)
six.print_("Arguments:", file=fh)
pprint.pprint(args, fh)
return logger_func | Make a logger that logs to the given file object. |
def current(cls, with_exception=True):
if with_exception and len(cls.stack) == 0:
raise NoContext()
return cls.stack.top() | Returns the current database context. |
def transaction(self):
# The idea here is to fake the nesting of transactions. Only when
# we've gotten back to the topmost transaction context do we actually
# commit or rollback.
with self.mdr:
try:
self._depth += 1
yield self
self._depth -= 1
except self.mdr.OperationalError:
# We've lost the connection, so there's no sense in
# attempting to roll back back the transaction.
self._depth -= 1
raise
except:
self._depth -= 1
if self._depth == 0:
self.mdr.rollback()
raise
if self._depth == 0:
self.mdr.commit() | Sets up a context where all the statements within it are ran within
a single database transaction. For internal use only. |
def cursor(self):
cursor = self.mdr.cursor()
with self.transaction():
try:
yield cursor
if cursor.rowcount != -1:
self.last_row_count = cursor.rowcount
self.last_row_id = getattr(cursor, 'lastrowid', None)
except:
self.last_row_count = None
self.last_row_id = None
_safe_close(cursor)
raise | Get a cursor for the current connection. For internal use only. |
def execute(self, stmt, args):
self.logger(stmt, args)
with self.cursor() as cursor:
cursor.execute(stmt, args)
return cursor | Execute a statement, returning a cursor. For internal use only. |
def execute_proc(self, procname, args):
self.logger(procname, args)
with self.cursor() as cursor:
cursor.callproc(procname, args)
return cursor | Execute a stored procedure, returning a cursor. For internal use
only. |
def close(self):
self.logger = None
for exc in _EXCEPTIONS:
setattr(self, exc, None)
try:
self.mdr.close()
finally:
self.mdr = None | Close the connection this context wraps. |
def connect(self):
ctx = Context(self.module, self.create_mediator())
ctx.logger = self.logger
ctx.default_factory = self.default_factory
return ctx | Returns a context that uses this pool as a connection source. |
def close(self):
if self.mdr is None:
return
exc = (None, None, None)
try:
self.cursor.close()
except:
exc = sys.exc_info()
try:
if self.mdr.__exit__(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
self.mdr = None
self.cursor = None
if exc != (None, None, None):
six.reraise(*exc) | Release all resources associated with this factory. |
def add_item(cls, item, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._add_item_with_http_info(item, **kwargs)
else:
(data) = cls._add_item_with_http_info(item, **kwargs)
return data | Add item.
Add new item to the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_item(item, async=True)
>>> result = thread.get()
:param async bool
:param LineItem item: Line item to add to cart (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread. |
def checkout(cls, order, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._checkout_with_http_info(order, **kwargs)
else:
(data) = cls._checkout_with_http_info(order, **kwargs)
return data | Checkout cart.
Checkout cart, Making an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.checkout(order, async=True)
>>> result = thread.get()
:param async bool
:param Order order: Required order details. (required)
:return: Order
If the method is called asynchronously,
returns the request thread. |
def delete_item(cls, item_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_item_with_http_info(item_id, **kwargs)
else:
(data) = cls._delete_item_with_http_info(item_id, **kwargs)
return data | Remove item.
Remove item from shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_item(item_id, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to delete. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread. |
def empty(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._empty_with_http_info(**kwargs)
else:
(data) = cls._empty_with_http_info(**kwargs)
return data | Empty cart.
Empty the shopping cart.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.empty(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread. |
def get(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_with_http_info(**kwargs)
else:
(data) = cls._get_with_http_info(**kwargs)
return data | Get cart.
Retrieve the shopping cart of the current session.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(async=True)
>>> result = thread.get()
:param async bool
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread. |
def update_item(cls, item_id, item, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_item_with_http_info(item_id, item, **kwargs)
else:
(data) = cls._update_item_with_http_info(item_id, item, **kwargs)
return data | Update cart.
Update cart item.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_item(item_id, item, async=True)
>>> result = thread.get()
:param async bool
:param str item_id: Item ID to update. (required)
:param LineItem item: Line item to update. (required)
:return: ShoppingCart
If the method is called asynchronously,
returns the request thread. |
def get_perm_names(cls, resource):
return [cls.get_perm_name(resource, method) for method in cls.METHODS] | Return all permissions supported by the resource.
This is used for auto-generating missing permissions rows into
database in syncdb. |
def get_perm_name(cls, resource, method):
return '%s_%s_%s' % (
cls.PREFIX,
cls._get_resource_name(resource),
method.lower()) | Compose permission name
@param resource the resource
@param method the request method (case doesn't matter). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.