code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def forgot_password():
"""View function that handles a forgotten password request."""
form_class = _security.forgot_password_form
if request.is_json:
form = form_class(MultiDict(request.get_json()))
else:
form = form_class()
if form.validate_on_submit():
send_reset_password_instructions(form.user)
if not request.is_json:
do_flash(*get_message('PASSWORD_RESET_REQUEST',
email=form.user.email))
if request.is_json:
return _render_json(form, include_user=False)
return _security.render_template(config_value('FORGOT_PASSWORD_TEMPLATE'),
forgot_password_form=form,
**_ctx('forgot_password')) | View function that handles a forgotten password request. |
def get_method_documentation(method):
"""
This function uses "inspect" to retrieve information about a method.
Also, if you place comment on the method, method can be docummented with "reStructured Text".
:param method: method to describe
:returns:
{
'name' : <string> - name of the method,
'friendly_name' : <string> - friendly name of the method,
'parameters' : {
'required' : [ 'param1', 'param2' ],
'optionnal' : {
'param3' : 'default_value3',
'param4' : 'default_value4',
},
'help' : {
'summary' : <string> - Summary - general description like in the comment,
'parameters' : {
'param1' : 'description',
'param2' : 'description',
},
'return' : <string> - Can be multiline,
}
}
"""
from inspect import getargspec
result = {
'name': method.__name__,
'friendly_name': ' '.join([name.capitalize() for name in method.__name__.split('_')]),
}
arg_specs = getargspec(method)
arguments = {}
if not arg_specs.defaults:
if len(arg_specs.args[1:]) > 0:
arguments['required'] = list(arg_specs.args[1:])
else:
if len(arg_specs.args[1:-(len(arg_specs.defaults))]):
arguments['required'] = list(arg_specs.args[1:-(len(arg_specs.defaults))])
arguments['optional'] = {}
for i in range(len(arg_specs.defaults)):
arguments['optional'][arg_specs.args[-(len(arg_specs.defaults)) + i]] = arg_specs.defaults[i]
if arguments != {}:
result['parameters'] = arguments
doc = method.__doc__.strip() if method.__doc__ else ''
if ':' in method.__doc__:
doc = {'summary': method.__doc__[0:doc.find(' :')].strip()}
params = re.findall(r":param ([^\s]*): (.*)\n", method.__doc__)
if len(params) > 0:
doc['parameters'] = {}
for param in params:
doc['parameters'][param[0]] = param[1].strip()
regex = re.compile(r":returns:(.*)", re.MULTILINE | re.DOTALL)
returns = regex.search(method.__doc__)
if returns and returns.group(0):
doc['return'] = returns.group(0).replace(':returns:', '').replace('\n ', '\n').strip()
if doc != '':
result['help'] = doc
return result | This function uses "inspect" to retrieve information about a method.
Also, if you place comment on the method, method can be docummented with "reStructured Text".
:param method: method to describe
:returns:
{
'name' : <string> - name of the method,
'friendly_name' : <string> - friendly name of the method,
'parameters' : {
'required' : [ 'param1', 'param2' ],
'optionnal' : {
'param3' : 'default_value3',
'param4' : 'default_value4',
},
'help' : {
'summary' : <string> - Summary - general description like in the comment,
'parameters' : {
'param1' : 'description',
'param2' : 'description',
},
'return' : <string> - Can be multiline,
}
} |
def add_state_editor(self, state_m):
"""Triggered whenever a state is selected.
:param state_m: The selected state model.
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier in self.closed_tabs:
state_editor_ctrl = self.closed_tabs[state_identifier]['controller']
state_editor_view = state_editor_ctrl.view
handler_id = self.closed_tabs[state_identifier]['source_code_changed_handler_id']
source_code_view_is_dirty = self.closed_tabs[state_identifier]['source_code_view_is_dirty']
del self.closed_tabs[state_identifier] # pages not in self.closed_tabs and self.tabs at the same time
else:
state_editor_view = StateEditorView()
if isinstance(state_m, LibraryStateModel):
state_editor_view['main_notebook_1'].set_current_page(
state_editor_view['main_notebook_1'].page_num(state_editor_view.page_dict["Data Linkage"]))
state_editor_ctrl = StateEditorController(state_m, state_editor_view)
self.add_controller(state_identifier, state_editor_ctrl)
if state_editor_ctrl.get_controller('source_ctrl') and state_m.state.get_next_upper_library_root_state() is None:
# observe changed to set the mark dirty flag
handler_id = state_editor_view.source_view.get_buffer().connect('changed', self.script_text_changed,
state_m)
self.view.get_top_widget().connect('draw', state_editor_view.source_view.on_draw)
else:
handler_id = None
source_code_view_is_dirty = False
(tab, inner_label, sticky_button) = create_tab_header('', self.on_tab_close_clicked,
self.on_toggle_sticky_clicked, state_m)
set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty)
state_editor_view.get_top_widget().title_label = inner_label
state_editor_view.get_top_widget().sticky_button = sticky_button
page_content = state_editor_view.get_top_widget()
page_id = self.view.notebook.prepend_page(page_content, tab)
page = self.view.notebook.get_nth_page(page_id)
self.view.notebook.set_tab_reorderable(page, True)
page.show_all()
self.view.notebook.show()
self.tabs[state_identifier] = {'page': page, 'state_m': state_m,
'controller': state_editor_ctrl, 'sm_id': self.model.selected_state_machine_id,
'is_sticky': False,
'source_code_view_is_dirty': source_code_view_is_dirty,
'source_code_changed_handler_id': handler_id}
return page_id | Triggered whenever a state is selected.
:param state_m: The selected state model. |
def register_converter(operator_name, conversion_function, overwrite=False):
'''
:param operator_name: A unique operator ID. It is usually a string but you can use a type as well
:param conversion_function: A callable object
:param overwrite: By default, we raise an exception if the caller of this function is trying to assign an existing
key (i.e., operator_name) a new value (i.e., conversion_function). Set this flag to True to enable overwriting.
'''
if not overwrite and operator_name in _converter_pool:
raise ValueError('We do not overwrite registrated converter by default')
_converter_pool[operator_name] = conversion_function | :param operator_name: A unique operator ID. It is usually a string but you can use a type as well
:param conversion_function: A callable object
:param overwrite: By default, we raise an exception if the caller of this function is trying to assign an existing
key (i.e., operator_name) a new value (i.e., conversion_function). Set this flag to True to enable overwriting. |
def get_all_apps():
"""Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps.
"""
LOG.info('Retreiving list of all Spinnaker applications')
url = '{}/applications'.format(API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Could not retrieve application list'
pipelines = response.json()
LOG.debug('All Applications:\n%s', pipelines)
return pipelines | Get a list of all applications in Spinnaker.
Returns:
requests.models.Response: Response from Gate containing list of all apps. |
def filterAcceptsRow(self, row, parentindex):
"""Return True, if the filter accepts the given row of the parent
:param row: the row to filter
:type row: :class:`int`
:param parentindex: the parent index
:type parentindex: :class:`QtCore.QModelIndex`
:returns: True, if the filter accepts the row
:rtype: :class:`bool`
:raises: None
"""
if not super(ReftrackSortFilterModel, self).filterAcceptsRow(row, parentindex):
return False
if parentindex.isValid():
m = parentindex.model()
else:
m = self.sourceModel()
i = m.index(row, 18, parentindex)
reftrack = i.data(REFTRACK_OBJECT_ROLE)
if not reftrack:
return True
else:
return self.filter_accept_reftrack(reftrack) | Return True, if the filter accepts the given row of the parent
:param row: the row to filter
:type row: :class:`int`
:param parentindex: the parent index
:type parentindex: :class:`QtCore.QModelIndex`
:returns: True, if the filter accepts the row
:rtype: :class:`bool`
:raises: None |
def getmlsthelper(referencefilepath, start, organism, update):
"""Prepares to run the getmlst.py script provided in SRST2"""
from accessoryFunctions.accessoryFunctions import GenObject
# Initialise a set to for the organism(s) for which new alleles and profiles are desired
organismset = set()
# Allow for Shigella to use the Escherichia MLST profile/alleles
organism = organism if organism != 'Shigella' else 'Escherichia'
# As there are multiple profiles for certain organisms, this dictionary has the schemes I use as values
organismdictionary = {'Escherichia': 'Escherichia coli#1',
'Shigella': 'Escherichia coli#1',
'Vibrio': 'Vibrio parahaemolyticus',
'Campylobacter': 'Campylobacter jejuni',
'Listeria': 'Listeria monocytogenes',
'Bacillus': 'Bacillus cereus',
'Klebsiella': 'Klebsiella pneumoniae'}
# Allow for a genus not in the dictionary being specified
try:
organismset.add(organismdictionary[organism])
except KeyError:
# Add the organism to the set
organismset.add(organism)
for scheme in organismset:
organismpath = os.path.join(referencefilepath, 'MLST', organism)
# Find all folders (with the trailing / in the glob search) and remove the trailing /
try:
lastfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/')
except IndexError:
lastfolder = []
# Run the method to determine the most recent folder, and how recently it was updated
delta, foldersize, d1 = schemedate(lastfolder)
# Set the path/name of the folder to contain the new alleles and profile
newfolder = '{}/{}'.format(organismpath, d1)
if update:
if delta.days > 7 or foldersize < 100:
printtime('Downloading {} MLST scheme from pubmlst.org'.format(organism), start)
# Create the object to store the argument attributes to feed to getmlst
getmlstargs = GenObject()
getmlstargs.species = scheme
getmlstargs.repository_url = 'http://pubmlst.org/data/dbases.xml'
getmlstargs.force_scheme_name = False
getmlstargs.path = newfolder
# Create the path to store the downloaded
make_path(getmlstargs.path)
getmlst.main(getmlstargs)
# Even if there is an issue contacting the database, files are created, however, they are populated
# with XML strings indicating that the download failed
# Read the first character in the file
try:
profilestart = open(glob('{}/*.txt'.format(newfolder))[0]).readline()
except IndexError:
profilestart = []
# If it is a <, then the download failed
if not profilestart or profilestart[0] == '<':
# Delete the folder, and use the previous definitions instead
shutil.rmtree(newfolder)
newfolder = lastfolder
# If the profile and alleles are up-to-date, set :newfolder to :lastfolder
else:
newfolder = lastfolder
# If update isn't specified, don't update
else:
newfolder = lastfolder
# Ensure that the profile/alleles updated successfully
# Calculate the size of the folder by adding the sizes of all the files within the folder together
try:
newfoldersize = sum(os.path.getsize('{}/{}'.format(newfolder, f)) for f in os.listdir(newfolder)
if os.path.isfile('{}/{}'.format(newfolder, f)))
except (OSError, TypeError):
newfoldersize = 100
# If the profile/allele failed, remove the folder, and use the most recent update
if newfoldersize < 100:
shutil.rmtree(newfolder)
try:
newfolder = sorted(glob('{}/*/'.format(organismpath)))[-1].rstrip('/')
except IndexError:
newfolder = organismpath
# Return the name/path of the allele-containing folder
return newfolder | Prepares to run the getmlst.py script provided in SRST2 |
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""Sends and receive an ICMPv6 Neighbor Solicitation message
This function sends an ICMPv6 Neighbor Solicitation message
to get the MAC address of the neighbor with specified IPv6 address address.
'src' address is used as source of the message. Message is sent on iface.
By default, timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res | Sends and receive an ICMPv6 Neighbor Solicitation message
This function sends an ICMPv6 Neighbor Solicitation message
to get the MAC address of the neighbor with specified IPv6 address address.
'src' address is used as source of the message. Message is sent on iface.
By default, timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame). |
def load_yaml_config(self, conf):
"""Load a YAML configuration file and recursively update the overall configuration."""
with open(conf) as fd:
self.config = recursive_dict_update(self.config, yaml.load(fd, Loader=UnsafeLoader)) | Load a YAML configuration file and recursively update the overall configuration. |
def list_policies(self, filters=None):
"""Retrieve installed trap, drop and bypass policies.
:param filters: retrieve only matching policies (optional)
:type filters: dict
:return: list of installed trap, drop and bypass policies
:rtype: list
"""
_, policy_list = self.handler.streamed_request("list-policies",
"list-policy", filters)
return policy_list | Retrieve installed trap, drop and bypass policies.
:param filters: retrieve only matching policies (optional)
:type filters: dict
:return: list of installed trap, drop and bypass policies
:rtype: list |
def add_extension_attribute(self, ext_name, key, value):
"""
Banana banana
"""
attributes = self.extension_attributes.pop(ext_name, {})
attributes[key] = value
self.extension_attributes[ext_name] = attributes | Banana banana |
def watcher(self) -> Watcher:
"""
Gives an access to action's watcher.
:return: Action's watcher instance.
"""
if not hasattr(self, "_watcher"):
self._watcher = Watcher()
return self._watcher | Gives an access to action's watcher.
:return: Action's watcher instance. |
def GetResources(filename, types=None, names=None, languages=None):
"""
Get resources from dll/exe file.
types = a list of resource types to search for (None = all)
names = a list of resource names to search for (None = all)
languages = a list of resource languages to search for (None = all)
Return a dict of the form {type_: {name: {language: data}}} which
might also be empty if no matching resources were found.
"""
hsrc = win32api.LoadLibraryEx(filename, 0, LOAD_LIBRARY_AS_DATAFILE)
res = _GetResources(hsrc, types, names, languages)
win32api.FreeLibrary(hsrc)
return res | Get resources from dll/exe file.
types = a list of resource types to search for (None = all)
names = a list of resource names to search for (None = all)
languages = a list of resource languages to search for (None = all)
Return a dict of the form {type_: {name: {language: data}}} which
might also be empty if no matching resources were found. |
def gen_anytext(*args):
"""
Convenience function to create bag of words for anytext property
"""
bag = []
for term in args:
if term is not None:
if isinstance(term, list):
for term2 in term:
if term2 is not None:
bag.append(term2)
else:
bag.append(term)
return ' '.join(bag) | Convenience function to create bag of words for anytext property |
def write(self, values):
"""
Write values to the targeted documents
Values need to be a dict as : {document_id: value}
"""
# Insert only for docs targeted by the target
filtered = {_id: value for _id, value in values.items() if _id in self._document_ids}
if not filtered:
return
bulk = self.get_collection().initialize_ordered_bulk_op()
for _id, value in filtered.items():
bulk.find({'_id': _id}).upsert() \
.update_one({'$set': {self._field: value}})
bulk.execute() | Write values to the targeted documents
Values need to be a dict as : {document_id: value} |
def _parse_values(self):
""" Get values
"""
data = []
if self.has_tabs:
def _parse_tab_text(tab):
# Annoying html in tabs
if tab.select_one(".visible_normal"):
return tab.select_one(".visible_normal").text
else:
return tab.text
sub_table_ids = [_parse_tab_text(x) for x in self.soup.select(".table_switch li")]
sub_tables = self.soup.select(".dataTables_wrapper")
assert len(sub_tables) == len(sub_table_ids)
assert len(sub_tables) > 0
for measure, table in zip(sub_table_ids, sub_tables):
if self.has_horizontal_scroll:
_data = self._parse_horizontal_scroll_table(table)
for region, col, value in _data:
data.append({
"region_or_unit": region,
"select_period": col, # Hardcode warning!
"measure": measure,
})
else:
if self.has_horizontal_scroll:
raise NotImplementedError()
if self.has_vertical_scroll:
table = self.soup.select_one("#DataTables_Table_0_wrapper")
_data = self._parse_vertical_scroll_table(table)
else:
table = self.soup.select(".chart.table.scrolling")[-1]
_data = self._parse_regular_table(table)
for region, measure, value in _data:
data.append({
"region_or_unit": region,
"measure": measure,
"value": value
})
return data | Get values |
def complete_modules(text):
'''complete mavproxy module names'''
import MAVProxy.modules, pkgutil
modlist = [x[1] for x in pkgutil.iter_modules(MAVProxy.modules.__path__)]
ret = []
loaded = set(complete_loadedmodules(''))
for m in modlist:
if not m.startswith("mavproxy_"):
continue
name = m[9:]
if not name in loaded:
ret.append(name)
return ret | complete mavproxy module names |
def create_sequence_sites(chain, seq_site_length):
"""Create sequence sites using sequence ids.
:param dict chain: Chain object that contains chemical shift values and assignment information.
:param int seq_site_length: Length of a single sequence site.
:return: List of sequence sites.
:rtype: :py:class:`list`
"""
seq_ids = sorted(list(chain.keys()), key=int) # make sure that sequence is sorted by sequence id
slices = [itertools.islice(seq_ids, i, None) for i in range(seq_site_length)]
seq_site_ids = list(zip(*slices))
sequence_sites = []
for seq_site_id in seq_site_ids:
seq_site = plsimulator.SequenceSite(chain[seq_id] for seq_id in seq_site_id)
if seq_site.is_sequential():
sequence_sites.append(seq_site)
else:
continue
return sequence_sites | Create sequence sites using sequence ids.
:param dict chain: Chain object that contains chemical shift values and assignment information.
:param int seq_site_length: Length of a single sequence site.
:return: List of sequence sites.
:rtype: :py:class:`list` |
def from_dict(input_dict, data=None):
"""
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
"""
import copy
input_dict = copy.deepcopy(input_dict)
model_class = input_dict.pop('class')
input_dict["name"] = str(input_dict["name"])
import GPy
model_class = eval(model_class)
return model_class._build_from_input_dict(input_dict, data) | Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object. |
def sha256_fingerprint_from_raw_ssh_pub_key(raw_key):
"""Encode a raw SSH key (string of bytes, as from
`str(paramiko.AgentKey)`) to a fingerprint in the SHA256 form:
SHA256:j2WoSeOWhFy69BQ39fuafFAySp9qCZTSCEyT2vRKcL+s
"""
digest = hashlib.sha256(raw_key).digest()
h = base64.b64encode(digest).decode('utf-8')
h = h.rstrip().rstrip('=') # drop newline and possible base64 padding
return 'SHA256:' + h | Encode a raw SSH key (string of bytes, as from
`str(paramiko.AgentKey)`) to a fingerprint in the SHA256 form:
SHA256:j2WoSeOWhFy69BQ39fuafFAySp9qCZTSCEyT2vRKcL+s |
def to_json(model, sort=False, **kwargs):
"""
Return the model as a JSON document.
``kwargs`` are passed on to ``json.dumps``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
Returns
-------
str
String representation of the cobra model as a JSON document.
See Also
--------
save_json_model : Write directly to a file.
json.dumps : Base function.
"""
obj = model_to_dict(model, sort=sort)
obj[u"version"] = JSON_SPEC
return json.dumps(obj, allow_nan=False, **kwargs) | Return the model as a JSON document.
``kwargs`` are passed on to ``json.dumps``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
Returns
-------
str
String representation of the cobra model as a JSON document.
See Also
--------
save_json_model : Write directly to a file.
json.dumps : Base function. |
def update(self, index, id, doc_type="_doc", body=None, params=None):
"""
Update a document based on a script or partial data provided.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_
:arg index: The name of the index
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg if_seq_no:
:arg if_primary_term:
:arg lang: The script language (default: painless)
:arg parent: ID of the parent document. Is is only used for routing and
when for the upsert request
:arg refresh: If `true` then refresh the effected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_forarg retry_on_conflict: Specify how many times should the operation be
retried when a conflict occurs (default: 0)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the update operation. Defaults to
1, meaning the primary shard only. Set to `all` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1)
"""
for param in (index, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"POST", _make_path(index, doc_type, id, "_update"), params=params, body=body
) | Update a document based on a script or partial data provided.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_
:arg index: The name of the index
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg if_seq_no:
:arg if_primary_term:
:arg lang: The script language (default: painless)
:arg parent: ID of the parent document. Is is only used for routing and
when for the upsert request
:arg refresh: If `true` then refresh the effected shards to make this
operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default)
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_forarg retry_on_conflict: Specify how many times should the operation be
retried when a conflict occurs (default: 0)
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'force'
:arg wait_for_active_shards: Sets the number of shard copies that must
be active before proceeding with the update operation. Defaults to
1, meaning the primary shard only. Set to `all` for all shard
copies, otherwise set to any non-negative value less than or equal
to the total number of copies for the shard (number of replicas + 1) |
def get_django_user(self, username, password=None):
"""
Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not).
"""
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username)
if password is not None:
user.set_password(password)
user.save()
return user | Get the Django user with the given username, or create one if it
doesn't already exist. If `password` is given, then set the user's
password to that (regardless of whether the user was created or not). |
def increase_volume(percentage):
'''Increase the volume.
Increase the volume by a given percentage.
Args:
percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.
Raises:
ValueError: if the percentage is >100 or <0.
'''
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
volume_int = percentage / 10
old_volume = get()
new_volume = old_volume + volume_int
if new_volume > 10:
new_volume = 10
set_volume(new_volume * 10)
else:
# Linux/Unix
formatted = '%d%%+' % percentage
# + or - increases/decreases in amixer
sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait() | Increase the volume.
Increase the volume by a given percentage.
Args:
percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.
Raises:
ValueError: if the percentage is >100 or <0. |
def __init_configsvrs(self, params):
"""create and start config servers"""
self._configsvrs = []
for cfg in params:
# Remove flags that turn on auth.
cfg = self._strip_auth(cfg)
server_id = cfg.pop('server_id', None)
version = cfg.pop('version', self._version)
cfg.update({'configsvr': True})
if self.enable_ipv6:
common.enable_ipv6_single(cfg)
self._configsvrs.append(Servers().create(
'mongod', cfg, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id)) | create and start config servers |
def _file_write(path, content):
'''
Write content to a file
'''
with salt.utils.files.fopen(path, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(content))
fp_.close() | Write content to a file |
def pager_fatality_rates():
"""USGS Pager fatality estimation model.
Fatality rate(MMI) = cum. standard normal dist(1/BETA * ln(MMI/THETA)).
Reference:
Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a).
Estimating casualties for large worldwide earthquakes using an empirical
approach. U.S. Geological Survey Open-File Report 2009-1136.
v1.0:
Theta: 14.05, Beta: 0.17, Zeta 2.15
Jaiswal, K, and Wald, D (2010)
An Empirical Model for Global Earthquake Fatality Estimation
Earthquake Spectra, Volume 26, No. 4, pages 1017–1037
v2.0:
Theta: 13.249, Beta: 0.151, Zeta: 1.641)
(http://pubs.usgs.gov/of/2009/1136/pdf/
PAGER%20Implementation%20of%20Empirical%20model.xls)
:returns: Fatality rate calculated as:
lognorm.cdf(mmi, shape=Beta, scale=Theta)
:rtype: dic
"""
# Model coefficients
theta = 13.249
beta = 0.151
mmi_range = list(range(2, 11))
fatality_rate = {mmi: 0 if mmi < 4 else log_normal_cdf(
mmi, median=theta, sigma=beta) for mmi in mmi_range}
return fatality_rate | USGS Pager fatality estimation model.
Fatality rate(MMI) = cum. standard normal dist(1/BETA * ln(MMI/THETA)).
Reference:
Jaiswal, K. S., Wald, D. J., and Hearne, M. (2009a).
Estimating casualties for large worldwide earthquakes using an empirical
approach. U.S. Geological Survey Open-File Report 2009-1136.
v1.0:
Theta: 14.05, Beta: 0.17, Zeta 2.15
Jaiswal, K, and Wald, D (2010)
An Empirical Model for Global Earthquake Fatality Estimation
Earthquake Spectra, Volume 26, No. 4, pages 1017–1037
v2.0:
Theta: 13.249, Beta: 0.151, Zeta: 1.641)
(http://pubs.usgs.gov/of/2009/1136/pdf/
PAGER%20Implementation%20of%20Empirical%20model.xls)
:returns: Fatality rate calculated as:
lognorm.cdf(mmi, shape=Beta, scale=Theta)
:rtype: dic |
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 6-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('nhl')
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the following "
"URL exists: %s" % url)
raise ValueError(output)
for player in page('table#roster tbody tr').items():
player_id = self._get_id(player)
if self._slim:
name = self._get_name(player)
self._players[player_id] = name
else:
player_instance = Player(player_id)
self._players.append(player_instance) | Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 6-digit string representing the year to pull the team's roster
from. |
def tent_map(x, steps, mu=2):
"""
Generates a time series of the tent map.
Characteristics and Background:
The name of the tent map is derived from the fact that the plot of x_i vs
x_i+1 looks like a tent. For mu > 1 one application of the mapping function
can be viewed as stretching the surface on which the value is located and
then folding the area that is greater than one back towards the zero. This
corresponds nicely to the definition of chaos as expansion in one dimension
which is counteracted by a compression in another dimension.
Calculating the Lyapunov exponent:
The lyapunov exponent of the tent map can be easily calculated as due to
this stretching behavior a small difference delta between two neighboring
points will indeed grow exponentially by a factor of mu in each iteration.
We thus can assume that:
delta_n = delta_0 * mu^n
We now only have to change the basis to e to obtain the exact formula that
is used for the definition of the lyapunov exponent:
delta_n = delta_0 * e^(ln(mu) * n)
Therefore the lyapunov exponent of the tent map is:
lambda = ln(mu)
References:
.. [tm_1] https://en.wikipedia.org/wiki/Tent_map
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
mu (int):
parameter mu that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series
"""
for _ in range(steps):
x = mu * x if x < 0.5 else mu * (1 - x)
yield x | Generates a time series of the tent map.
Characteristics and Background:
The name of the tent map is derived from the fact that the plot of x_i vs
x_i+1 looks like a tent. For mu > 1 one application of the mapping function
can be viewed as stretching the surface on which the value is located and
then folding the area that is greater than one back towards the zero. This
corresponds nicely to the definition of chaos as expansion in one dimension
which is counteracted by a compression in another dimension.
Calculating the Lyapunov exponent:
The lyapunov exponent of the tent map can be easily calculated as due to
this stretching behavior a small difference delta between two neighboring
points will indeed grow exponentially by a factor of mu in each iteration.
We thus can assume that:
delta_n = delta_0 * mu^n
We now only have to change the basis to e to obtain the exact formula that
is used for the definition of the lyapunov exponent:
delta_n = delta_0 * e^(ln(mu) * n)
Therefore the lyapunov exponent of the tent map is:
lambda = ln(mu)
References:
.. [tm_1] https://en.wikipedia.org/wiki/Tent_map
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
mu (int):
parameter mu that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series |
def nexec(statement, globals=None, locals=None, **kwargs):
"""Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`."""
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from napi.transformers import NapiTransformer
from ast import fix_missing_locations as fml
try:
node = parse(statement, '<string>', 'exec')
except ImportError:#KeyError:
exec(statement)
else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = NapiTransformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'exec')
return builtins.eval(code, globals, locals) | Execute *statement* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *statement* is transformed using
:class:`.NapiTransformer`. |
def _draw_frame(self, framedata):
"""Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data.
"""
original = self.read_frame()
if original is None:
self.update_info(self.info_string(message='Finished.',
frame=framedata))
return
if self.original is not None:
processed = self.process_frame(original.copy())
if self.cmap_original is not None:
original = to_gray(original)
elif not is_color_image(original):
self.original.set_cmap('gray')
self.original.set_data(original)
else:
processed = self.process_frame(original)
if self.cmap_processed is not None:
processed = to_gray(processed)
elif not is_color_image(processed):
self.processed.set_cmap('gray')
if self.annotations:
self.annotate(framedata)
self.processed.set_data(processed)
self.update_info(self.info_string(frame=framedata)) | Reads, processes and draws the frames.
If needed for color maps, conversions to gray scale are performed. In
case the images are no color images and no custom color maps are
defined, the colormap `gray` is applied.
This function is called by TimedAnimation.
Args:
framedata: The frame data. |
def match_any_learning_objective(self, match):
"""Matches an item with any objective.
arg: match (boolean): ``true`` to match items with any
learning objective, ``false`` to match items with no
learning objectives
*compliance: mandatory -- This method must be implemented.*
"""
match_key = 'learningObjectiveIds'
param = '$exists'
if match:
flag = 'true'
else:
flag = 'false'
if match_key in self._my_osid_query._query_terms:
self._my_osid_query._query_terms[match_key][param] = flag
else:
self._my_osid_query._query_terms[match_key] = {param: flag}
self._my_osid_query._query_terms[match_key]['$nin'] = [[], ['']] | Matches an item with any objective.
arg: match (boolean): ``true`` to match items with any
learning objective, ``false`` to match items with no
learning objectives
*compliance: mandatory -- This method must be implemented.* |
def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) | Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that. |
def eeg_microstates_relabel(method, results, microstates_labels, reverse_microstates=None):
"""
Relabel the microstates.
"""
microstates = list(method['microstates'])
for index, microstate in enumerate(method['microstates']):
if microstate in list(reverse_microstates.keys()):
microstates[index] = reverse_microstates[microstate]
method["data"][index] = -1*method["data"][index]
if microstate in list(microstates_labels.keys()):
microstates[index] = microstates_labels[microstate]
method['microstates'] = np.array(microstates)
return(results, method) | Relabel the microstates. |
def _(s: Influence) -> bool:
""" Check if an Influence statement is grounded """
return is_grounded(s.subj) and is_grounded(s.obj) | Check if an Influence statement is grounded |
def channel(self):
"""If no channel exists, a new one is requested."""
if not self._channel:
self._channel_ref = weakref.ref(self.connection.get_channel())
return self._channel | If no channel exists, a new one is requested. |
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce() | Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions. |
def check(ctx):
""" Check built package is valid.
"""
check_command = f"twine check {ctx.directory!s}/dist/*"
report.info(ctx, "package.check", "checking package")
ctx.run(check_command) | Check built package is valid. |
def get_collections(self, data):
"""Return serialized list of collection objects on data that user has `view` permission on."""
collections = self._filter_queryset('view_collection', data.collection_set.all())
from .collection import CollectionSerializer
class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer):
"""Collection without data field serializer."""
return self._serialize_items(CollectionWithoutDataSerializer, 'collections', collections) | Return serialized list of collection objects on data that user has `view` permission on. |
def get_network_by_id(self, network_id: int) -> Network:
"""Get a network from the database by its identifier."""
return self.session.query(Network).get(network_id) | Get a network from the database by its identifier. |
def set_active_vectors(self, name, preference='cell'):
"""Finds the vectors by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveVectors(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveVectors(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name] | Finds the vectors by name and appropriately sets it as active |
def run(self):
"""Run.
:raises BuildFailed: extension build failed and need to skip cython part.
"""
try:
build_ext.build_ext.run(self)
# Copy __init__.py back to repair package.
build_dir = os.path.abspath(self.build_lib)
root_dir = os.path.abspath(os.path.join(__file__, ".."))
target_dir = build_dir if not self.inplace else root_dir
src_file = os.path.join("advanced_descriptors", "__init__.py")
src = os.path.join(root_dir, src_file)
dst = os.path.join(target_dir, src_file)
if src != dst:
shutil.copyfile(src, dst)
except (
distutils.errors.DistutilsPlatformError,
FileNotFoundError,
):
raise BuildFailed() | Run.
:raises BuildFailed: extension build failed and need to skip cython part. |
def iam(self):
"""Generate iam details."""
iam = {
'group': self.format['iam_group'].format(**self.data),
'lambda_role': self.format['iam_lambda_role'].format(**self.data),
'policy': self.format['iam_policy'].format(**self.data),
'profile': self.format['iam_profile'].format(**self.data),
'role': self.format['iam_role'].format(**self.data),
'user': self.format['iam_user'].format(**self.data),
'base': self.format['iam_base'].format(**self.data),
}
return iam | Generate iam details. |
def update_loadbalancer(self, lbaas_loadbalancer, body=None):
"""Updates a load balancer."""
return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
body=body) | Updates a load balancer. |
def get_default_config_help(self):
"""
Return help text for collector configuration.
"""
config_help = super(MemoryLxcCollector, self).get_default_config_help()
config_help.update({
"sys_path": "Defaults to '/sys/fs/cgroup/lxc'",
})
return config_help | Return help text for collector configuration. |
def from_json(self, resource_root, data):
"""
Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise
"""
if data is None:
return None
if self._atype == datetime.datetime:
return datetime.datetime.strptime(data, self.DATE_FMT)
elif self._atype == ApiConfig:
# ApiConfig is special. We want a python dictionary for summary views,
# but an ApiList for full views. Try to detect each case from the JSON
# data.
if not data['items']:
return { }
first = data['items'][0]
return json_to_config(data, len(first) == 2)
elif self._is_api_list:
return ApiList.from_json_dict(data, resource_root, self._atype)
elif isinstance(data, list):
return [ self.from_json(resource_root, x) for x in data ]
elif hasattr(self._atype, 'from_json_dict'):
return self._atype.from_json_dict(data, resource_root)
else:
return data | Parses the given JSON value into an appropriate python object.
This means:
- a datetime.datetime if 'atype' is datetime.datetime
- a converted config dictionary or config list if 'atype' is ApiConfig
- if the attr is an API list, an ApiList with instances of 'atype'
- an instance of 'atype' if it has a 'from_json_dict' method
- a python list with decoded versions of the member objects if the input
is a python list.
- the raw value otherwise |
def getKeywordsForText(self, retina_name, body, ):
"""Get a list of keywords from the text
Args:
retina_name, str: The retina name (required)
body, str: The text to be evaluated (required)
Returns: Array[str]
"""
resourcePath = '/text/keywords'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.json() | Get a list of keywords from the text
Args:
retina_name, str: The retina name (required)
body, str: The text to be evaluated (required)
Returns: Array[str] |
def pack_image(filename, max_size, form_field='image'):
"""Pack an image from file into multipart-formdata post body"""
try:
if os.path.getsize(filename) > (max_size * 1024):
raise IdeaScalyError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise IdeaScalyError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise IdeaScalyError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise IdeaScalyError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Id34Sc4ly'
body = list()
body.append(b'--' + BOUNDARY)
body.append('content-disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('content-type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
body_length = str(len(body))
# build headers
headers = {
'content-type': 'multipart/form-data; boundary={0}'.format(BOUNDARY),
'content-length': body_length
}
return headers, body | Pack an image from file into multipart-formdata post body |
def _convert_types(schema, col_type_dict, row):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types
"""
converted_row = []
for col_name, col_val in zip(schema, row):
if type(col_val) in (datetime, date):
col_val = time.mktime(col_val.timetuple())
elif isinstance(col_val, Decimal):
col_val = float(col_val)
elif col_type_dict.get(col_name) == "BYTES":
col_val = base64.standard_b64encode(col_val).decode('ascii')
else:
col_val = col_val
converted_row.append(col_val)
return converted_row | Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types |
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
"Plot signal channels"
# Extend signal style if necesary
if len(sig_style) == 1:
sig_style = n_sig * sig_style
# Figure out time indices
if time_units == 'samples':
t = np.linspace(0, sig_len-1, sig_len)
else:
downsample_factor = {'seconds':fs, 'minutes':fs * 60,
'hours':fs * 3600}
t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units]
# Plot the signals
if signal.ndim == 1:
axes[0].plot(t, signal, sig_style[0], zorder=3)
else:
for ch in range(n_sig):
axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3) | Plot signal channels |
def is_scalar(value):
"""Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not
"""
return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0)) | Test if the given value is a scalar.
This function also works with memory mapped array values, in contrast to the numpy is_scalar method.
Args:
value: the value to test for being a scalar value
Returns:
boolean: if the given value is a scalar or not |
def noisy_operation(self, operation: 'cirq.Operation') -> 'cirq.OP_TREE':
"""Adds noise to an individual operation.
Args:
operation: The operation to make noisy.
Returns:
An OP_TREE corresponding to the noisy operations implementing the
noisy version of the given operation.
"""
if not hasattr(self.noisy_moments, '_not_overridden'):
return self.noisy_moments([ops.Moment([operation])],
operation.qubits)
if not hasattr(self.noisy_moment, '_not_overridden'):
return self.noisy_moment(ops.Moment([operation]), operation.qubits)
assert False, 'Should be unreachable.' | Adds noise to an individual operation.
Args:
operation: The operation to make noisy.
Returns:
An OP_TREE corresponding to the noisy operations implementing the
noisy version of the given operation. |
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes | Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache. |
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)) | Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True |
def set_fixed_image(self, image):
"""
Set Fixed ANTsImage for metric
"""
if not isinstance(image, iio.ANTsImage):
raise ValueError('image must be ANTsImage type')
if image.dimension != self.dimension:
raise ValueError('image dim (%i) does not match metric dim (%i)' % (image.dimension, self.dimension))
self._metric.setFixedImage(image.pointer, False)
self.fixed_image = image | Set Fixed ANTsImage for metric |
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key | if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is |
def _generate_processed_key_name(process_to, upload_name):
"""Returns a key name to use after processing based on timestamp and
upload key name."""
timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
name, extension = os.path.splitext(upload_name)
digest = md5(''.join([timestamp, upload_name])).hexdigest()
return os.path.join(process_to, '{0}.{1}'.format(digest, extension)) | Returns a key name to use after processing based on timestamp and
upload key name. |
def get_relative_from_paths(self, filepath, paths):
"""
Find the relative filepath from the most relevant multiple paths.
This is somewhat like a ``os.path.relpath(path[, start])`` but where
``start`` is a list. The most relevant item from ``paths`` will be used
to apply the relative transform.
Args:
filepath (str): Path to transform to relative.
paths (list): List of absolute paths to use to find and remove the
start path from ``filepath`` argument. If there is multiple
path starting with the same directories, the biggest will
match.
Raises:
boussole.exception.FinderException: If no ``filepath`` start could
be finded.
Returns:
str: Relative filepath where the start coming from ``paths`` is
removed.
"""
for systempath in paths_by_depth(paths):
if filepath.startswith(systempath):
return os.path.relpath(filepath, systempath)
raise FinderException("'Finder.get_relative_from_paths()' could not "
"find filepath start from '{}'".format(filepath)) | Find the relative filepath from the most relevant multiple paths.
This is somewhat like a ``os.path.relpath(path[, start])`` but where
``start`` is a list. The most relevant item from ``paths`` will be used
to apply the relative transform.
Args:
filepath (str): Path to transform to relative.
paths (list): List of absolute paths to use to find and remove the
start path from ``filepath`` argument. If there is multiple
path starting with the same directories, the biggest will
match.
Raises:
boussole.exception.FinderException: If no ``filepath`` start could
be finded.
Returns:
str: Relative filepath where the start coming from ``paths`` is
removed. |
def find_dangerous_changes(
old_schema: GraphQLSchema, new_schema: GraphQLSchema
) -> List[DangerousChange]:
"""Find dangerous changes.
Given two schemas, returns a list containing descriptions of all the types of
potentially dangerous changes covered by the other functions down below.
"""
return (
find_arg_changes(old_schema, new_schema).dangerous_changes
+ find_values_added_to_enums(old_schema, new_schema)
+ find_interfaces_added_to_object_types(old_schema, new_schema)
+ find_types_added_to_unions(old_schema, new_schema)
+ find_fields_that_changed_type_on_input_object_types(
old_schema, new_schema
).dangerous_changes
) | Find dangerous changes.
Given two schemas, returns a list containing descriptions of all the types of
potentially dangerous changes covered by the other functions down below. |
def desc(self) -> str:
"""A helper property to describe a token as a string for debugging"""
kind, value = self.kind.value, self.value
return f"{kind} {value!r}" if value else kind | A helper property to describe a token as a string for debugging |
def _get_content_type(self, filename):
""" gets the content type of a file """
mntype = mimetypes.guess_type(filename)[0]
filename, fileExtension = os.path.splitext(filename)
if mntype is None and\
fileExtension.lower() == ".csv":
mntype = "text/csv"
elif mntype is None and \
fileExtension.lower() == ".sd":
mntype = "File/sd"
elif mntype is None:
#mntype = 'application/octet-stream'
mntype= "File/%s" % fileExtension.replace('.', '')
return mntype | gets the content type of a file |
def set(self, project, date, data, data_ts):
"""
Set the cache data for a specified project for the specified date.
:param project: project name to set data for
:type project: str
:param date: date to set data for
:type date: datetime.datetime
:param data: data to cache
:type data: dict
:param data_ts: maximum timestamp in the BigQuery data table
:type data_ts: int
"""
data['cache_metadata'] = {
'project': project,
'date': date.strftime('%Y%m%d'),
'updated': time.time(),
'version': VERSION,
'data_ts': data_ts
}
fpath = self._path_for_file(project, date)
logger.debug('Cache SET project=%s date=%s - path=%s',
project, date.strftime('%Y-%m-%d'), fpath)
with open(fpath, 'w') as fh:
fh.write(json.dumps(data)) | Set the cache data for a specified project for the specified date.
:param project: project name to set data for
:type project: str
:param date: date to set data for
:type date: datetime.datetime
:param data: data to cache
:type data: dict
:param data_ts: maximum timestamp in the BigQuery data table
:type data_ts: int |
def _button_plus_clicked(self, n):
"""
Create a new colorpoint.
"""
self._button_save.setEnabled(True)
self.insert_colorpoint(self._colorpoint_list[n][0],
self._colorpoint_list[n][1],
self._colorpoint_list[n][2])
self._build_gui() | Create a new colorpoint. |
def Unlock(fd, path):
"""Release the lock on the file.
Args:
fd: int, the file descriptor of the file to unlock.
path: string, the name of the file to lock.
Raises:
IOError, raised from flock while attempting to release a file lock.
"""
try:
fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise IOError('Exception unlocking %s. Locked by another process.' % path)
else:
raise IOError('Exception unlocking %s. %s.' % (path, str(e))) | Release the lock on the file.
Args:
fd: int, the file descriptor of the file to unlock.
path: string, the name of the file to lock.
Raises:
IOError, raised from flock while attempting to release a file lock. |
def _set_linkinfo_domain_reachable(self, v, load=False):
"""
Setter method for linkinfo_domain_reachable, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_domain_reachable (linkinfo-domain-reachable-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_linkinfo_domain_reachable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_linkinfo_domain_reachable() directly.
YANG Description: Indicates whether the RBridge is reachable
or not.
Yes - Indicates RBridge is reachable
No - Indicates RBridge is not reachable.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Yes|No|yes|no', 'length': [u'0..3']}), is_leaf=True, yang_name="linkinfo-domain-reachable", rest_name="linkinfo-domain-reachable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Indicates whether the RBridge is reachable\nor not'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='linkinfo-domain-reachable-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """linkinfo_domain_reachable must be of a type compatible with linkinfo-domain-reachable-type""",
'defined-type': "brocade-fabric-service:linkinfo-domain-reachable-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Yes|No|yes|no', 'length': [u'0..3']}), is_leaf=True, yang_name="linkinfo-domain-reachable", rest_name="linkinfo-domain-reachable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Indicates whether the RBridge is reachable\nor not'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='linkinfo-domain-reachable-type', is_config=True)""",
})
self.__linkinfo_domain_reachable = t
if hasattr(self, '_set'):
self._set() | Setter method for linkinfo_domain_reachable, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_domain_reachable (linkinfo-domain-reachable-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_linkinfo_domain_reachable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_linkinfo_domain_reachable() directly.
YANG Description: Indicates whether the RBridge is reachable
or not.
Yes - Indicates RBridge is reachable
No - Indicates RBridge is not reachable. |
def linear_exprs(A, x, b=None, rref=False, Matrix=None):
""" Returns Ax - b
Parameters
----------
A : matrix_like of numbers
Of shape (len(b), len(x)).
x : iterable of symbols
b : array_like of numbers (default: None)
When ``None``, assume zeros of length ``len(x)``.
Matrix : class
When ``rref == True``: A matrix class which supports slicing,
and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``.
rref : bool
Calculate the reduced row echelon form of (A | -b).
Returns
-------
A list of the elements in the resulting column vector.
"""
if b is None:
b = [0]*len(x)
if rref:
rA, rb = linear_rref(A, b, Matrix)
if Matrix is None:
from sympy import Matrix
return [lhs - rhs for lhs, rhs in zip(rA * Matrix(len(x), 1, x), rb)]
else:
return [sum([x0*x1 for x0, x1 in zip(row, x)]) - v
for row, v in zip(A, b)] | Returns Ax - b
Parameters
----------
A : matrix_like of numbers
Of shape (len(b), len(x)).
x : iterable of symbols
b : array_like of numbers (default: None)
When ``None``, assume zeros of length ``len(x)``.
Matrix : class
When ``rref == True``: A matrix class which supports slicing,
and methods ``__mul__`` and ``rref``. Defaults to ``sympy.Matrix``.
rref : bool
Calculate the reduced row echelon form of (A | -b).
Returns
-------
A list of the elements in the resulting column vector. |
def decompressBuffer(buffer):
"complements the compressBuffer function in CacheClient"
zbuf = cStringIO.StringIO(buffer)
zfile = gzip.GzipFile(fileobj=zbuf)
deflated = zfile.read()
zfile.close()
return deflated | complements the compressBuffer function in CacheClient |
def login(self, username, json_document):
"""Send user identity information to the identity manager.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param json_document The JSON payload for login.
"""
url = '{}u/{}'.format(self.url, username)
make_request(
url, method='PUT', body=json_document, timeout=self.timeout) | Send user identity information to the identity manager.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param json_document The JSON payload for login. |
def extract_largest(self, inplace=False):
"""
Extract largest connected set in mesh.
Can be used to reduce residues obtained when generating an isosurface.
Works only if residues are not connected (share at least one point with)
the main component of the image.
Parameters
----------
inplace : bool, optional
Updates mesh in-place while returning nothing.
Returns
-------
mesh : vtki.PolyData
Largest connected set in mesh
"""
mesh = self.connectivity(largest=True)
if inplace:
self.overwrite(mesh)
else:
return mesh | Extract largest connected set in mesh.
Can be used to reduce residues obtained when generating an isosurface.
Works only if residues are not connected (share at least one point with)
the main component of the image.
Parameters
----------
inplace : bool, optional
Updates mesh in-place while returning nothing.
Returns
-------
mesh : vtki.PolyData
Largest connected set in mesh |
def to_image_list(tensors, size_divisible=0):
"""
tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape
"""
if isinstance(tensors, torch.Tensor) and size_divisible > 0:
tensors = [tensors]
if isinstance(tensors, ImageList):
return tensors
elif isinstance(tensors, torch.Tensor):
# single tensor shape can be inferred
assert tensors.dim() == 4
image_sizes = [tensor.shape[-2:] for tensor in tensors]
return ImageList(tensors, image_sizes)
elif isinstance(tensors, (tuple, list)):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
# TODO Ideally, just remove this and let me model handle arbitrary
# input sizs
if size_divisible > 0:
import math
stride = size_divisible
max_size = list(max_size)
max_size[1] = int(math.ceil(max_size[1] / stride) * stride)
max_size[2] = int(math.ceil(max_size[2] / stride) * stride)
max_size = tuple(max_size)
batch_shape = (len(tensors),) + max_size
batched_imgs = tensors[0].new(*batch_shape).zero_()
for img, pad_img in zip(tensors, batched_imgs):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
image_sizes = [im.shape[-2:] for im in tensors]
return ImageList(batched_imgs, image_sizes)
else:
raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors))) | tensors can be an ImageList, a torch.Tensor or
an iterable of Tensors. It can't be a numpy array.
When tensors is an iterable of Tensors, it pads
the Tensors with zeros so that they have the same
shape |
def getSolutionIter(self):
"""
Return an iterator to the solutions of the problem
Example:
>>> problem = Problem()
>>> list(problem.getSolutionIter()) == []
True
>>> problem.addVariables(["a"], [42])
>>> iter = problem.getSolutionIter()
>>> next(iter)
{'a': 42}
>>> next(iter)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"""
domains, constraints, vconstraints = self._getArgs()
if not domains:
return iter(())
return self._solver.getSolutionIter(domains, constraints, vconstraints) | Return an iterator to the solutions of the problem
Example:
>>> problem = Problem()
>>> list(problem.getSolutionIter()) == []
True
>>> problem.addVariables(["a"], [42])
>>> iter = problem.getSolutionIter()
>>> next(iter)
{'a': 42}
>>> next(iter)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration |
def find_multiplex_by_name(self, multiplex_name: str) -> Multiplex:
"""
Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name.
"""
for multiplex in self.multiplexes:
if multiplex.name == multiplex_name:
return multiplex
raise AttributeError(f'multiplex "{multiplex_name}" does not exist') | Find and return a multiplex in the influence graph with the given name.
Raise an AttributeError if there is no multiplex in the graph with the given name. |
def get_suppliers_per_page(self, per_page=1000, page=1, params=None):
"""
Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=SUPPLIERS, per_page=per_page, page=page, params=params) | Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list |
def key_exists(self, section, key):
"""
Checks if given key exists.
:param section: Current section to check key in.
:type section: unicode
:param key: Current key to check.
:type key: unicode
:return: Key existence.
:rtype: bool
"""
LOGGER.debug("> Checking '{0}' key existence in '{1}' section.".format(key, section))
self.__settings.beginGroup(section)
exists = self.__settings.contains(key)
self.__settings.endGroup()
return exists | Checks if given key exists.
:param section: Current section to check key in.
:type section: unicode
:param key: Current key to check.
:type key: unicode
:return: Key existence.
:rtype: bool |
def _decode_embedded_dict(src):
'''
Convert enbedded bytes to strings if possible.
Dict helper.
'''
output = {}
for key, val in six.iteritems(src):
if isinstance(val, dict):
val = _decode_embedded_dict(val)
elif isinstance(val, list):
val = _decode_embedded_list(val) # pylint: disable=redefined-variable-type
elif isinstance(val, bytes):
try:
val = val.decode()
except UnicodeError:
pass
if isinstance(key, bytes):
try:
key = key.decode()
except UnicodeError:
pass
output[key] = val
return output | Convert enbedded bytes to strings if possible.
Dict helper. |
def _filter_headers(self):
"""
Add headers designed for filtering messages based on objects.
Returns:
dict: Filter-related headers to be combined with the existing headers
"""
headers = {}
for user in self.usernames:
headers["fedora_messaging_user_{}".format(user)] = True
for package in self.packages:
headers["fedora_messaging_rpm_{}".format(package)] = True
for container in self.containers:
headers["fedora_messaging_container_{}".format(container)] = True
for module in self.modules:
headers["fedora_messaging_module_{}".format(module)] = True
for flatpak in self.flatpaks:
headers["fedora_messaging_flatpak_{}".format(flatpak)] = True
return headers | Add headers designed for filtering messages based on objects.
Returns:
dict: Filter-related headers to be combined with the existing headers |
def import_app_module(app_name, module_name):
"""Returns a module from a given app by its name.
:param str app_name:
:param str module_name:
:rtype: module or None
"""
name_split = app_name.split('.')
if name_split[-1][0].isupper(): # Seems that we have app config class path here.
app_name = '.'.join(name_split[:-2])
module = import_module(app_name)
try:
sub_module = import_module('%s.%s' % (app_name, module_name))
return sub_module
except:
# The same bubbling strategy as in autodiscover_modules().
if module_has_submodule(module, module_name): # Module is in a package.
raise
return None | Returns a module from a given app by its name.
:param str app_name:
:param str module_name:
:rtype: module or None |
def dig(host):
'''
Performs a DNS lookup with dig
CLI Example:
.. code-block:: bash
salt '*' network.dig archlinux.org
'''
cmd = 'dig {0}'.format(salt.utils.network.sanitize_host(host))
return __salt__['cmd.run'](cmd) | Performs a DNS lookup with dig
CLI Example:
.. code-block:: bash
salt '*' network.dig archlinux.org |
def create_filter(self):
"""Get an instance of filter services facade."""
return Filter(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of filter services facade. |
def _check_connections(self):
"""Checks if all configured redis servers are reachable"""
for server in self._servers:
if self._is_reachable(server):
server['down_until'] = 0
else:
server['down_until'] = time.time() + 5 | Checks if all configured redis servers are reachable |
def clean(self):
"""
Empties the cache
"""
self._table.clear()
for item in self._usage_recency:
self._usage_recency.remove(item) | Empties the cache |
def fit(self, X, y, **fit_params):
"""Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator.
"""
return default_client().sync(self._fit, X, y, **fit_params) | Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator. |
def json_data(self):
"""Returns data as JSON
Returns:
json_data (str): JSON representation of data, as created in make_data
"""
def stringify_keys(d):
if not isinstance(d, dict):
return d
return dict((str(k), stringify_keys(v)) for k, v in d.items())
data = self.make_data()
json_data = json.dumps(stringify_keys(data))
return json_data | Returns data as JSON
Returns:
json_data (str): JSON representation of data, as created in make_data |
def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) | download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping. |
def _serialize_value_for_xml(self, value):
"""See base class."""
if value is not None:
value_serialized = self.serializer.serialize(value)
else:
value_serialized = ''
return value_serialized | See base class. |
def applyIndex(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right)) | Apply a list to something else. |
def indication(self, pdu):
"""Requests are queued for delivery."""
if _debug: TCPServer._debug("indication %r", pdu)
self.request += pdu.pduData | Requests are queued for delivery. |
def get_partition_dciId(self, org_name, part_name, part_info=None):
"""get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if part_info is not None and "dciId" in part_info:
return part_info.get("dciId") | get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition |
def load_vectors(self, vectors, **kwargs):
"""
Arguments:
vectors: one of or a list containing instantiations of the
GloVe, CharNGram, or Vectors classes. Alternatively, one
of or a list of available pretrained vectors:
charngram.100d
fasttext.en.300d
fasttext.simple.300d
glove.42B.300d
glove.840B.300d
glove.twitter.27B.25d
glove.twitter.27B.50d
glove.twitter.27B.100d
glove.twitter.27B.200d
glove.6B.50d
glove.6B.100d
glove.6B.200d
glove.6B.300d
Remaining keyword arguments: Passed to the constructor of Vectors classes.
"""
if not isinstance(vectors, list):
vectors = [vectors]
for idx, vector in enumerate(vectors):
if six.PY2 and isinstance(vector, str):
vector = six.text_type(vector)
if isinstance(vector, six.string_types):
# Convert the string pretrained vector identifier
# to a Vectors object
if vector not in pretrained_aliases:
raise ValueError(
"Got string input vector {}, but allowed pretrained "
"vectors are {}".format(
vector, list(pretrained_aliases.keys())))
vectors[idx] = pretrained_aliases[vector](**kwargs)
elif not isinstance(vector, Vectors):
raise ValueError(
"Got input vectors of type {}, expected str or "
"Vectors object".format(type(vector)))
tot_dim = sum(v.dim for v in vectors)
self.vectors = torch.Tensor(len(self), tot_dim)
for i, token in enumerate(self.itos):
start_dim = 0
for v in vectors:
end_dim = start_dim + v.dim
self.vectors[i][start_dim:end_dim] = v[token.strip()]
start_dim = end_dim
assert(start_dim == tot_dim) | Arguments:
vectors: one of or a list containing instantiations of the
GloVe, CharNGram, or Vectors classes. Alternatively, one
of or a list of available pretrained vectors:
charngram.100d
fasttext.en.300d
fasttext.simple.300d
glove.42B.300d
glove.840B.300d
glove.twitter.27B.25d
glove.twitter.27B.50d
glove.twitter.27B.100d
glove.twitter.27B.200d
glove.6B.50d
glove.6B.100d
glove.6B.200d
glove.6B.300d
Remaining keyword arguments: Passed to the constructor of Vectors classes. |
def detach(self, ids=None, touch=True):
"""
Detach models from the relationship.
"""
if isinstance(ids, orator.orm.model.Model):
ids = ids.get_key()
if ids is None:
ids = []
query = self._new_pivot_query()
if not isinstance(ids, list):
ids = [ids]
if len(ids) > 0:
query.where_in(self._other_key, ids)
if touch:
self.touch_if_touching()
results = query.delete()
return results | Detach models from the relationship. |
def tValueForPoint(self, point):
"""
get a t values for a given point
required:
the point must be a point on the curve.
in an overlap cause the point will be an intersection points wich is alwasy a point on the curve
"""
if self.segmentType == "curve":
on1 = self.previousOnCurve
off1 = self.points[0].coordinates
off2 = self.points[1].coordinates
on2 = self.points[2].coordinates
return _tValueForPointOnCubicCurve(point, (on1, off1, off2, on2))
elif self.segmentType == "line":
return _tValueForPointOnLine(point, (self.previousOnCurve, self.points[0].coordinates))
elif self.segmentType == "qcurve":
raise NotImplementedError
else:
raise NotImplementedError | get a t values for a given point
required:
the point must be a point on the curve.
in an overlap cause the point will be an intersection points wich is alwasy a point on the curve |
def delete_messages(self, messages):
"""
Delete existing messages.
http://dev.wheniwork.com/#delete-existing-message
"""
url = "/2/messages/?%s" % urlencode([('ids', ",".join(messages))])
data = self._delete_resource(url)
return data | Delete existing messages.
http://dev.wheniwork.com/#delete-existing-message |
def getTypeName(data_type_oid, type_modifier):
"""Returns the base type name according to data_type_oid and type_modifier"""
if data_type_oid == VerticaType.BOOL:
return "Boolean"
elif data_type_oid == VerticaType.INT8:
return "Integer"
elif data_type_oid == VerticaType.FLOAT8:
return "Float"
elif data_type_oid == VerticaType.CHAR:
return "Char"
elif data_type_oid in (VerticaType.VARCHAR, VerticaType.UNKNOWN):
return "Varchar"
elif data_type_oid == VerticaType.LONGVARCHAR:
return "Long Varchar"
elif data_type_oid == VerticaType.DATE:
return "Date"
elif data_type_oid == VerticaType.TIME:
return "Time"
elif data_type_oid == VerticaType.TIMETZ:
return "TimeTz"
elif data_type_oid == VerticaType.TIMESTAMP:
return "Timestamp"
elif data_type_oid == VerticaType.TIMESTAMPTZ:
return "TimestampTz"
elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM):
return "Interval " + getIntervalRange(data_type_oid, type_modifier)
elif data_type_oid == VerticaType.BINARY:
return "Binary"
elif data_type_oid == VerticaType.VARBINARY:
return "Varbinary"
elif data_type_oid == VerticaType.LONGVARBINARY:
return "Long Varbinary"
elif data_type_oid == VerticaType.NUMERIC:
return "Numeric"
elif data_type_oid == VerticaType.UUID:
return "Uuid"
else:
return "Unknown" | Returns the base type name according to data_type_oid and type_modifier |
def parse(self, args=None):
'''Parse a list of arguments, returning a dict.
Flags are only boolean if they are not followed by a non-flag argument.
All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field.
'''
opts = dict()
if args is None:
import sys
# skip over the program name with the [1:] slice
args = sys.argv[1:]
# arglist is a tuple of (is_flag, name) pairs
arglist = peekable(parse_tokens(args))
for is_flag, name in arglist:
if is_flag is True:
# .peek will return the default argument iff there are no more entries
next_is_flag, next_name = arglist.peek(default=(None, None))
# next_is_flag will be None if there are no more items, but True/False if there is a next item
# if this argument looks for a subsequent (is set as boolean),
# and the subsequent is not a flag, consume it
if next_is_flag is False:
opts[name] = next_name
# finally, advance our iterator, but since we already have the next values, just discard it
arglist.next()
else:
# if there is no next thing, or the next thing is a flag,
# all the boolean=False's in the world can't save you then
opts[name] = True
else:
# add positional argument
opts.setdefault('_', []).append(name)
return opts | Parse a list of arguments, returning a dict.
Flags are only boolean if they are not followed by a non-flag argument.
All positional arguments not associable with a flag will be added to the return dictionary's `['_']` field. |
def readline(self):
"""Get the next line including the newline or '' on EOF."""
self.lineno += 1
if self._buffer:
return self._buffer.pop()
else:
return self.input.readline() | Get the next line including the newline or '' on EOF. |
def max_intensity(item_a, time_a, item_b, time_b, max_value):
"""
RMS difference in maximum intensity
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
intensity_a = item_a.max_intensity(time_a)
intensity_b = item_b.max_intensity(time_b)
diff = np.sqrt((intensity_a - intensity_b) ** 2)
return np.minimum(diff, max_value) / float(max_value) | RMS difference in maximum intensity
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. |
def parser(self):
"""Returns the appropriate parser to use for adding arguments to your command."""
if self._command_parser is None:
parents = []
if self.need_verbose:
parents.append(_verbose_parser)
if self.need_settings:
parents.append(_settings_parser)
self._command_parser = self._main_parser.add_parser(self.name,
help=self.help,
parents=parents,
formatter_class=argparse.RawDescriptionHelpFormatter)
return self._command_parser | Returns the appropriate parser to use for adding arguments to your command. |
def read(self, filename):
"""
Read a tribe of templates from a tar formatted file.
:type filename: str
:param filename: File to read templates from.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
>>> tribe_back = Tribe().read('test_tribe.tgz')
>>> tribe_back == tribe
True
"""
with tarfile.open(filename, "r:*") as arc:
temp_dir = tempfile.mkdtemp()
arc.extractall(path=temp_dir, members=_safemembers(arc))
tribe_dir = glob.glob(temp_dir + os.sep + '*')[0]
self._read_from_folder(dirname=tribe_dir)
shutil.rmtree(temp_dir)
return self | Read a tribe of templates from a tar formatted file.
:type filename: str
:param filename: File to read templates from.
.. rubric:: Example
>>> tribe = Tribe(templates=[Template(name='c', st=read())])
>>> tribe.write('test_tribe')
Tribe of 1 templates
>>> tribe_back = Tribe().read('test_tribe.tgz')
>>> tribe_back == tribe
True |
def build(path, query=None, fragment=''):
"""
Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url
"""
url = nstr(path)
# replace the optional arguments in the url
keys = projex.text.findkeys(path)
if keys:
if query is None:
query = {}
opts = {}
for key in keys:
opts[key] = query.pop(key, '%({})s'.format(key))
url %= opts
# add the query
if query:
if type(query) is dict:
mapped_query = {}
for key, value in query.items():
mapped_query[nstr(key)] = nstr(value)
query_str = urllib.urlencode(mapped_query)
else:
query_str = nstr(query)
url += '?' + query_str
# include the fragment
if fragment:
url += '#' + fragment
return url | Generates a URL based on the inputted path and given query options and
fragment. The query should be a dictionary of terms that will be
generated into the URL, while the fragment is the anchor point within the
target path that will be navigated to. If there are any wildcards within
the path that are found within the query, they will be inserted into the
path itself and removed from the query string.
:example |>>> import skyline.gui
|>>> skyline.gui.build_url('sky://projects/%(project)s',
| {'project': 'Test', 'asset': 'Bob'})
|'sky://projects/Test/?asset=Bob'
:param path | <str>
query | <dict> || None
fragment | <str> || None
:return <str> | url |
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
After unpacking, the abscence of a `tpid` value causes the assignment
of None to the field values to indicate that there is no VLAN
information.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
if self.tpid.value:
self._validate()
self.tpid = self.tpid.value
self.pcp = self._tci.value >> 13
self.cfi = (self._tci.value >> 12) & 1
self.vid = self._tci.value & 4095
else:
self.tpid = EtherType.VLAN
self.pcp = None
self.cfi = None
self.vid = None | Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
After unpacking, the abscence of a `tpid` value causes the assignment
of None to the field values to indicate that there is no VLAN
information.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. |
Subsets and Splits