code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def maybeDeferred(f, *args, **kw):
try:
result = f(*args, **kw)
except Exception:
return fail(failure.Failure())
if IFiber.providedBy(result):
import traceback
frames = traceback.extract_stack()
msg = "%s returned a fiber instead of a deferred" % (f, )
if len(frames) > 1:
msg += "; called from %s" % (frames[-2], )
raise RuntimeError(msg)
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
|
Copied from twsited.internet.defer and add a check to detect fibers.
|
def update_ddl(self, ddl_statements, operation_id=""):
client = self._instance._client
api = client.database_admin_api
metadata = _metadata_with_prefix(self.name)
future = api.update_database_ddl(
self.name, ddl_statements, operation_id=operation_id, metadata=metadata
)
return future
|
Update DDL for this database.
Apply any configured schema from :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase
:type ddl_statements: Sequence[str]
:param ddl_statements: a list of DDL statements to use on this database
:type operation_id: str
:param operation_id: (optional) a string ID for the long-running operation
:rtype: :class:`google.api_core.operation.Operation`
:returns: an operation instance
:raises NotFound: if the database does not exist
|
def fail_api(channel):
gui = ui_embed.UI(
channel,
"Couldn't get stats off RLTrackerNetwork.",
"Maybe the API changed, please tell Infraxion.",
modulename=modulename,
colour=0x0088FF
)
return gui
|
Creates an embed UI for when the API call didn't work
Args:
channel (discord.Channel): The Discord channel to bind the embed to
Returns:
ui (ui_embed.UI): The embed UI object
|
def filtered(self):
if not is_tagged(self.tags, self.opt.tags):
LOG.info("Skipping %s as it does not have requested tags",
self.path)
return False
if not specific_path_check(self.path, self.opt):
LOG.info("Skipping %s as it does not match specified paths",
self.path)
return False
return True
|
Determines whether or not resource is filtered.
Resources may be filtered if the tags do not match
or the user has specified explict paths to include
or exclude via command line options
|
def get_job_ids(self):
if not self.parsed_response:
return None
try:
job_ids = self.parsed_response["files"]["results.xml"]["job-ids"]
except KeyError:
return None
if not job_ids or job_ids == [0]:
return None
return job_ids
|
Returns job IDs of the import.
|
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None):
possibles = self.topology if only is None else {k: self[k] for k in only}
_ = lambda key: [key] if key in possibles else []
if 'RoW' not in self and key == 'RoW':
answer = [] + _('RoW') + _('GLO')
return list(reversed(answer)) if biggest_first else answer
faces = self[key]
lst = [
(k, len(v))
for k, v in possibles.items()
if faces.issubset(v)
]
return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
|
Get all locations that completely contain this location.
If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
|
def do_resource(self,args):
parser = CommandArgumentParser("resource")
parser.add_argument('-i','--logical-id',dest='logical-id',help='logical id of the child resource');
args = vars(parser.parse_args(args))
stackName = self.wrappedStack['rawStack'].name
logicalId = args['logical-id']
self.stackResource(stackName,logicalId)
|
Go to the specified resource. resource -h for detailed help
|
def curse_add_line(self, msg, decoration="DEFAULT",
optional=False, additional=False,
splittable=False):
return {'msg': msg, 'decoration': decoration, 'optional': optional, 'additional': additional, 'splittable': splittable}
|
Return a dict with.
Where:
msg: string
decoration:
DEFAULT: no decoration
UNDERLINE: underline
BOLD: bold
TITLE: for stat title
PROCESS: for process name
STATUS: for process status
NICE: for process niceness
CPU_TIME: for process cpu time
OK: Value is OK and non logged
OK_LOG: Value is OK and logged
CAREFUL: Value is CAREFUL and non logged
CAREFUL_LOG: Value is CAREFUL and logged
WARNING: Value is WARINING and non logged
WARNING_LOG: Value is WARINING and logged
CRITICAL: Value is CRITICAL and non logged
CRITICAL_LOG: Value is CRITICAL and logged
optional: True if the stat is optional (display only if space is available)
additional: True if the stat is additional (display only if space is available after optional)
spittable: Line can be splitted to fit on the screen (default is not)
|
def login(self) -> bool:
response = self.get(self.LOGIN_URL)
login_url = get_base_url(response.text)
login_data = {'email': self._login, 'pass': self._password}
login_response = self.post(login_url, login_data)
url_params = get_url_params(login_response.url)
self.check_for_additional_actions(url_params,
login_response.text,
login_data)
if 'remixsid' in self.cookies or 'remixsid6' in self.cookies:
return True
|
Authorizes a user and returns a bool value of the result
|
def load_completions(self):
try:
index_str = self.load_index(utils.AWSCLI_VERSION)
except IndexLoadError:
return
index_str = self.load_index(utils.AWSCLI_VERSION)
index_data = json.loads(index_str)
index_root = index_data['aws']
self.commands = index_root['commands']
self.global_opts = index_root['arguments']
for command in self.commands:
subcommands_current = index_root['children'] \
.get(command)['commands']
self.subcommands.extend(subcommands_current)
for subcommand_current in subcommands_current:
args_opts_current = index_root['children'] \
.get(command)['children'] \
.get(subcommand_current)['arguments']
self.args_opts.update(args_opts_current)
|
Load completions from the completion index.
Updates the following attributes:
* commands
* subcommands
* global_opts
* args_opts
|
def tweet_list_handler(request, tweet_list_builder, msg_prefix=""):
tweets = tweet_list_builder(request.access_token())
print (len(tweets), 'tweets found')
if tweets:
twitter_cache.initialize_user_queue(user_id=request.access_token(),
queue=tweets)
text_to_read_out = twitter_cache.user_queue(request.access_token()).read_out_next(MAX_RESPONSE_TWEETS)
message = msg_prefix + text_to_read_out + ", say 'next' to hear more, or reply to a tweet by number."
return alexa.create_response(message=message,
end_session=False)
else:
return alexa.create_response(message="Sorry, no tweets found, please try something else",
end_session=False)
|
This is a generic function to handle any intent that reads out a list of tweets
|
async def get_supported_playback_functions(
self, uri=""
) -> List[SupportedFunctions]:
return [
SupportedFunctions.make(**x)
for x in await self.services["avContent"]["getSupportedPlaybackFunction"](
uri=uri
)
]
|
Return list of inputs and their supported functions.
|
def print_event_count():
for source in archive.list_event_sources():
event_count = 0
for group in archive.list_event_histogram(source):
for rec in group.records:
event_count += rec.count
print(' {: <40} {: >20}'.format(source, event_count))
|
Print the number of events grouped by source.
|
def is_generator(self, node):
if not isinstance(node.body, list):
return False
for item in node.body:
if isinstance(item, (ast.Assign, ast.Expr)):
if isinstance(item.value, ast.Yield):
return True
elif not isinstance(item, ast.FunctionDef) and hasattr(item, 'body'):
if self.is_generator(item):
return True
return False
|
Checks whether a function is a generator by looking for a yield
statement or expression.
|
def tokenize_text(string):
string = six.text_type(string)
rez = []
for part in string.split('\n'):
par = []
for sent in tokenize_sents(part):
par.append(tokenize_words(sent))
if par:
rez.append(par)
return rez
|
Tokenize input text to paragraphs, sentences and words.
Tokenization to paragraphs is done using simple Newline algorithm
For sentences and words tokenizers above are used
:param string: Text to tokenize
:type string: str or unicode
:return: text, tokenized into paragraphs, sentences and words
:rtype: list of list of list of words
|
def set_sound_mode(self, sound_mode):
if sound_mode == ALL_ZONE_STEREO:
if self._set_all_zone_stereo(True):
self._sound_mode_raw = ALL_ZONE_STEREO
return True
else:
return False
if self._sound_mode_raw == ALL_ZONE_STEREO:
if not self._set_all_zone_stereo(False):
return False
command_url = self._urls.command_sel_sound_mode + sound_mode
try:
if self.send_get_command(command_url):
self._sound_mode_raw = self._sound_mode_dict[sound_mode][0]
return True
else:
return False
except requests.exceptions.RequestException:
_LOGGER.error("Connection error: sound mode function %s not set.",
sound_mode)
return False
|
Set sound_mode of device.
Valid values depend on the device and should be taken from
"sound_mode_list".
Return "True" on success and "False" on fail.
|
def i2c_read_data(self, address):
task = asyncio.ensure_future(self.core.i2c_read_data(address))
value = self.loop.run_until_complete(task)
return value
|
Retrieve result of last data read from i2c device.
i2c_read_request should be called before trying to retrieve data.
It is intended for use by a polling application.
:param address: i2c
:returns: last data read or None if no data is present.
|
def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:
return any(q in qubits for q in self.qubits)
|
Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits.
|
def sudo(self, command, **kwargs):
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs)
|
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
|
def fmt_val(val, shorten=True):
val = repr(val)
max = 50
if shorten:
if len(val) > max:
close = val[-1]
val = val[0:max-4] + "..."
if close in (">", "'", '"', ']', '}', ')'):
val = val + close
return val
|
Format a value for inclusion in an
informative text string.
|
def add_list_opt(self, opt, values):
self.add_opt(opt)
for val in values:
self.add_opt(val)
|
Add an option with a list of non-file parameters.
|
def _standard_params(klass, ids, metric_groups, **kwargs):
end_time = kwargs.get('end_time', datetime.utcnow())
start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))
granularity = kwargs.get('granularity', GRANULARITY.HOUR)
placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER)
params = {
'metric_groups': ','.join(metric_groups),
'start_time': to_time(start_time, granularity),
'end_time': to_time(end_time, granularity),
'granularity': granularity.upper(),
'entity': klass.ANALYTICS_MAP[klass.__name__],
'placement': placement
}
params['entity_ids'] = ','.join(ids)
return params
|
Sets the standard params for a stats request
|
def derivative(self, point):
if self.pad_mode == 'constant' and self.pad_const != 0:
return ResizingOperator(
domain=self.domain, range=self.range, pad_mode='constant',
pad_const=0.0)
else:
return self
|
Derivative of this operator at ``point``.
For the particular case of constant padding with non-zero
constant, the derivative is the corresponding zero-padding
variant. In all other cases, this operator is linear, i.e.
the derivative is equal to ``self``.
|
def score_segmentation(segmentation, table):
stroke_nr = sum(1 for symbol in segmentation for stroke in symbol)
score = 1
for i in range(stroke_nr):
for j in range(i+1, stroke_nr):
qval = q(segmentation, i, j)
if qval:
score *= table[i][j]
else:
score *= table[j][i]
return score
|
Get the score of a segmentation.
|
def add_css_class(css_classes, css_class, prepend=False):
classes_list = split_css_classes(css_classes)
classes_to_add = [c for c in split_css_classes(css_class) if c not in classes_list]
if prepend:
classes_list = classes_to_add + classes_list
else:
classes_list += classes_to_add
return " ".join(classes_list)
|
Add a CSS class to a string of CSS classes
|
def format_datetime(dt, usegmt=False):
now = dt.timetuple()
if usegmt:
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
raise ValueError("usegmt option requires a UTC datetime")
zone = 'GMT'
elif dt.tzinfo is None:
zone = '-0000'
else:
zone = dt.strftime("%z")
return _format_timetuple_and_zone(now, zone)
|
Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps.
|
def _instantiate_players(self, player_dict):
home_players = []
away_players = []
for player_id, details in player_dict.items():
player = BoxscorePlayer(player_id,
details['name'],
details['data'])
if details['team'] == HOME:
home_players.append(player)
else:
away_players.append(player)
return away_players, home_players
|
Create a list of player instances for both the home and away teams.
For every player listed on the boxscores page, create an instance of
the BoxscorePlayer class for that player and add them to a list of
players for their respective team.
Parameters
----------
player_dict : dictionary
A dictionary containing information for every player on the
boxscores page. Each key is a string containing the player's ID
and each value is a dictionary with the player's full name, a
string representation of their HTML stats, and a string constant
denoting which team they play for as the values.
Returns
-------
tuple
Returns a ``tuple`` in the format (away_players, home_players)
where each element is a list of player instances for the away and
home teams, respectively.
|
def attach_session(self):
assert self.session is None
root = self.find_root()
session = self.Session(root)
root.inject_context(session=session)
return session
|
Create a session and inject it as context for this command and any
subcommands.
|
def _get_api_events(self, function):
if not (function.valid() and
isinstance(function.properties, dict) and
isinstance(function.properties.get("Events"), dict)
):
return {}
api_events = {}
for event_id, event in function.properties["Events"].items():
if event and isinstance(event, dict) and event.get("Type") == "Api":
api_events[event_id] = event
return api_events
|
Method to return a dictionary of API Events on the function
:param SamResource function: Function Resource object
:return dict: Dictionary of API events along with any other configuration passed to it.
Example: {
FooEvent: {Path: "/foo", Method: "post", RestApiId: blah, MethodSettings: {<something>},
Cors: {<something>}, Auth: {<something>}},
BarEvent: {Path: "/bar", Method: "any", MethodSettings: {<something>}, Cors: {<something>},
Auth: {<something>}}"
}
|
def dict(self):
return {
'title': self.title,
'description': self.description,
'time': self.time.isoformat(),
'data': self.data()
}
|
the dict representation.
:return: the dict
:rtype: dict
|
def virtual_interface_create(name, net_name, **kwargs):
conn = get_conn()
return conn.virtual_interface_create(name, net_name)
|
Create private networks
|
def get_var(var, default='""'):
ret = os.environ.get('NBCONVERT_' + var)
if ret is None:
return default
return json.loads(ret)
|
get var inside notebook
|
def intern(self, text):
if self.table_type.is_shared:
raise TypeError('Cannot intern on shared symbol table')
if not isinstance(text, six.text_type):
raise TypeError('Cannot intern non-Unicode sequence into symbol table: %r' % text)
token = self.get(text)
if token is None:
token = self.__add_text(text)
return token
|
Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table.
|
def create_pred2common(self):
self.pred2common = {}
for common_name, ext_preds in self.common2preds.items():
for pred in ext_preds:
pred = pred.lower().strip()
self.pred2common[pred] = common_name
|
Takes list linked to common name and maps common name to accepted predicate
and their respected suffixes to decrease sensitivity.
|
def mapping_get(uri, mapping):
ln = localname(uri)
for k, v in mapping.items():
if k == uri:
return v
for k, v in mapping.items():
if k == ln:
return v
l = list(mapping.items())
l.sort(key=lambda i: len(i[0]), reverse=True)
for k, v in l:
if k[0] == '*' and ln.endswith(k[1:]):
return v
raise KeyError(uri)
|
Look up the URI in the given mapping and return the result.
Throws KeyError if no matching mapping was found.
|
def download_quad(self, quad, callback=None):
download_url = quad['_links']['download']
return self._get(download_url, models.Body, callback=callback)
|
Download the specified mosaic quad. If provided, the callback will
be invoked asynchronously. Otherwise it is up to the caller to handle
the response Body.
:param asset dict: A mosaic quad representation from the API
:param callback: An optional function to aysnchronsously handle the
download. See :py:func:`planet.api.write_to_file`
:returns: :py:Class:`planet.api.models.Response` containing a
:py:Class:`planet.api.models.Body` of the asset.
:raises planet.api.exceptions.APIException: On API error.
|
def getName(self):
name=self.findattr('name')
if not name:
name="_directinput_"
if self.classname().lower()=="line":
name+="."+str(self).replace(" ","_").lower()
else:
name=name.replace('.txt','')
while name.startswith("."):
name=name[1:]
return name
|
Return a Name string for this object.
|
def combine_columns(columns):
columns_zipped = itertools.zip_longest(*columns)
return ''.join(x for zipped in columns_zipped for x in zipped if x)
|
Combine ``columns`` into a single string.
Example:
>>> combine_columns(['eape', 'xml'])
'example'
Args:
columns (iterable): ordered columns to combine
Returns:
String of combined columns
|
def find_arg(self, name):
name = self.normalize_name(name)
return self.args.get(name)
|
Find arg by normalized arg name or parameter name.
|
def wait_until_element_stops(self, element, times=1000, timeout=None):
return self._wait_until(self._expected_condition_find_element_stopped, (element, times), timeout)
|
Search element and wait until it has stopped moving
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param times: number of iterations checking the element's location that must be the same for all of them
in order to considering the element has stopped
:returns: the web element if the element is stopped
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element does not stop after the timeout
|
def _get_weekly_date_range(self, metric_date, delta):
dates = [metric_date]
end_date = metric_date + delta
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
|
Gets the range of years that we need to use as keys to get metrics from redis.
|
def _send(self, message):
params = {
'from': message.from_phone,
'to': ",".join(message.to),
'text': message.body,
'api_key': self.get_api_key(),
'api_secret': self.get_api_secret(),
}
print(params)
logger.debug("POST to %r with body: %r", NEXMO_API_URL, params)
return self.parse(NEXMO_API_URL, requests.post(NEXMO_API_URL, data=params))
|
A helper method that does the actual sending
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool
|
def serror(message, *args, **kwargs):
if args or kwargs:
message = message.format(*args, **kwargs)
return secho(message, fg='white', bg='red', bold=True)
|
Print a styled error message, while using any arguments to format the message.
|
def write_file(self, filename='HEADER'):
with open(filename, "w") as f:
f.write(str(self) + "\n")
|
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
|
def find_all(self, name=None, **attrs):
r
for descendant in self.__descendants():
if hasattr(descendant, '__match__') and \
descendant.__match__(name, attrs):
yield descendant
|
r"""Return all descendant nodes matching criteria.
:param Union[None,str] name: name of LaTeX expression
:param attrs: LaTeX expression attributes, such as item text.
:return: All descendant nodes matching criteria
:rtype: Iterator[TexNode]
>>> from TexSoup import TexSoup
>>> soup = TexSoup(r'''
... \section{Ooo}
... \textit{eee}
... \textit{ooo}''')
>>> gen = soup.find_all('textit')
>>> next(gen)
\textit{eee}
>>> next(gen)
\textit{ooo}
>>> next(soup.find_all('textbf'))
Traceback (most recent call last):
...
StopIteration
|
def read(self):
self.found_visible = False
is_multi_quote_header = self.MULTI_QUOTE_HDR_REGEX_MULTILINE.search(self.text)
if is_multi_quote_header:
self.text = self.MULTI_QUOTE_HDR_REGEX.sub(is_multi_quote_header.groups()[0].replace('\n', ''), self.text)
self.text = re.sub('([^\n])(?=\n ?[_-]{7,})', '\\1\n', self.text, re.MULTILINE)
self.lines = self.text.split('\n')
self.lines.reverse()
for line in self.lines:
self._scan_line(line)
self._finish_fragment()
self.fragments.reverse()
return self
|
Creates new fragment for each line
and labels as a signature, quote, or hidden.
Returns EmailMessage instance
|
def _profiles_index(self):
prof_ind_name = self.prof_ind_name
f = open(self.sldir+'/'+prof_ind_name,'r')
line = f.readline()
numlines=int(line.split()[0])
print(str(numlines)+' in profiles.index file ...')
model=[]
log_file_num=[]
for line in f:
model.append(int(line.split()[0]))
log_file_num.append(int(line.split()[2]))
log_ind={}
for a,b in zip(model,log_file_num):
log_ind[a] = b
self.log_ind=log_ind
self.model=model
|
read profiles.index and make hash array
Notes
-----
sets the attributes.
log_ind : hash array that returns profile.data or log.data
file number from model number.
model : the models for which profile.data or log.data is
available
|
def to_utc(some_time):
if some_time.tzinfo and some_time.utcoffset():
some_time = some_time.astimezone(tzutc())
return some_time.replace(tzinfo=None)
|
Convert the given date to UTC, if the date contains a timezone.
Parameters
----------
some_time : datetime.datetime
datetime object to convert to UTC
Returns
-------
datetime.datetime
Converted datetime object
|
def get_article_properties_per_page(self, per_page=1000, page=1, params=None):
return self._get_resource_per_page(resource=ARTICLE_PROPERTIES, per_page=per_page, page=page, params=params)
|
Get article properties per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
|
def _expected_condition_find_element_stopped(self, element_times):
element, times = element_times
web_element = self._expected_condition_find_element(element)
try:
locations_list = [tuple(web_element.location.values()) for i in range(int(times)) if not time.sleep(0.001)]
return web_element if set(locations_list) == set(locations_list[-1:]) else False
except StaleElementReferenceException:
return False
|
Tries to find the element and checks that it has stopped moving, but does not thrown an exception if the element
is not found
:param element_times: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] times: number of iterations checking the element's location that must be the same for all of them
in order to considering the element has stopped
:returns: the web element if it is clickable or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
|
def _print_duration(self):
duration = int(time.time() - self._start_time)
self._print(datetime.timedelta(seconds=duration))
|
Print the elapsed download time.
|
def check_used(self, pkg):
used = []
dep_path = self.meta.log_path + "dep/"
logs = find_package("", dep_path)
for log in logs:
deps = Utils().read_file(dep_path + log)
for dep in deps.splitlines():
if pkg == dep:
used.append(log)
return used
|
Check if dependencies used
|
def _parse_peer_link(self, config):
match = re.search(r'peer-link (\S+)', config)
value = match.group(1) if match else None
return dict(peer_link=value)
|
Scans the config block and parses the peer-link value
Args:
config (str): The config block to scan
Returns:
dict: A dict object that is intended to be merged into the
resource dict
|
def runway_config(self):
if not self._runway_config:
self._runway_config = self.parse_runway_config()
return self._runway_config
|
Return parsed runway.yml.
|
def _init_notes(self):
self.cached_json = {
'ver': self.schema,
'users': {},
'constants': {
'users': [x.name for x in self.subreddit.moderator()],
'warnings': Note.warnings
}
}
self.set_json('Initializing JSON via puni', True)
|
Set up the UserNotes page with the initial JSON schema.
|
def _do_api_call(self, method, data):
data.update({
"key": self.api_key,
"token": self.api_auth_token,
})
url = "%s%s" % (self.endpoint, method)
response = requests.get(url, params=data)
root = etree.fromstring(response.content)
status_code = root.find("header/status/code").text
exc_class = _get_exception_class_from_status_code(status_code)
if exc_class:
error_message = root.find("header/status/message").text
raise exc_class(error_message)
return root
|
Convenience method to carry out a standard API call against the
Petfinder API.
:param basestring method: The API method name to call.
:param dict data: Key/value parameters to send to the API method.
This varies based on the method.
:raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError``
sub-classes, depending on what went wrong.
:rtype: lxml.etree._Element
:returns: The parsed document.
|
def count_emails(self, conditions={}):
url = self.EMAILS_COUNT_URL + "?"
for key, value in conditions.items():
if key is 'ids':
value = ",".join(value)
url += '&%s=%s' % (key, value)
connection = Connection(self.token)
connection.set_url(self.production, url)
connection.set_url(self.production, url)
return connection.get_request()
|
Count all certified emails
|
def from_serializable(cls, object_dict):
key_class = cls._from_visible(cls.STARTS_WITH + 'class' + cls.ENDS_WITH)
key_module = cls._from_visible(cls.STARTS_WITH + 'module' + cls.ENDS_WITH)
obj_class = object_dict.pop(key_class)
obj_module = object_dict.pop(key_module) if key_module in object_dict else None
obj = cls._from_class(obj_class, obj_module)
obj.modify_object(object_dict)
return obj
|
core class method to create visible objects from a dictionary
|
def unassign_assessment_from_bank(self, assessment_id, bank_id):
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
lookup_session.get_bank(bank_id)
self._unassign_object_from_catalog(assessment_id, bank_id)
|
Removes an ``Assessment`` from a ``Bank``.
arg: assessment_id (osid.id.Id): the ``Id`` of the
``Assessment``
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
raise: NotFound - ``assessment_id`` or ``bank_id`` not found or
``assessment_id`` not assigned to ``bank_id``
raise: NullArgument - ``assessment_id`` or ``bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
|
def parent(self, parent):
self._parent = parent
if parent:
pctx = dict((x, getattr(parent, x)) for x in parent.context_keys)
self.inject_context(pctx)
self.depth = parent.depth + 1
for command in self.subcommands.values():
command.parent = self
else:
self.depth = 0
|
Copy context from the parent into this instance as well as
adjusting or depth value to indicate where we exist in a command
tree.
|
def append_child(self, name, child):
temp = ArTree(name, child)
self._array.append(temp)
return temp
|
Append new child and return it.
|
def execute(self, *args, **kwargs):
self.walk(*args, **kwargs)
failed_steps = [step for step in self.steps if step.status == FAILED]
if failed_steps:
raise PlanFailed(failed_steps)
|
Walks each step in the underlying graph, and raises an exception if
any of the steps fail.
Raises:
PlanFailed: Raised if any of the steps fail.
|
def numberOfYTilesAtZoom(self, zoom):
"Retruns the number of tiles over y at a given zoom level"
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
return maxRow - minRow + 1
|
Retruns the number of tiles over y at a given zoom level
|
def fetch(self, webfonts):
sorted_keys = sorted(webfonts.keys())
for webfont_name in sorted_keys:
self.get(webfont_name, webfonts[webfont_name])
|
Store every defined webfonts.
Webfont are stored with sort on their name.
Args:
webfonts (dict): Dictionnary of webfont settings from
``settings.ICOMOON_WEBFONTS``.
|
def transformation_matrix(x_vector, y_vector, translation, spacing):
matrix = numpy.zeros((4, 4), dtype=numpy.float)
matrix[:3, 0] = x_vector
matrix[:3, 1] = y_vector
z_vector = numpy.cross(x_vector, y_vector)
matrix[:3, 2] = z_vector
matrix[:3, 3] = numpy.array(translation)
matrix[3, 3] = 1.0
spacing = list(spacing)
while len(spacing) < 4:
spacing.append(1.0)
for i in range(4):
for j in range(4):
matrix[i, j] *= spacing[j]
return matrix
|
Creates a transformation matrix which will convert from a specified
coordinate system to the scanner frame of reference.
:param x_vector: The unit vector along the space X axis in scanner coordinates
:param y_vector: The unit vector along the space Y axis in scanner coordinates
:param translation: The origin of the space in scanner coordinates
:param spacing: The size of a space unit in scanner units
:return:
|
def _dup_samples_by_variantcaller(samples, require_bam=True):
samples = [utils.to_single_data(x) for x in samples]
samples = germline.split_somatic(samples)
to_process = []
extras = []
for data in samples:
added = False
for i, add in enumerate(handle_multiple_callers(data, "variantcaller", require_bam=require_bam)):
added = True
add = dd.set_variantcaller_order(add, i)
to_process.append([add])
if not added:
data = _handle_precalled(data)
data = dd.set_variantcaller_order(data, 0)
extras.append([data])
return to_process, extras
|
Prepare samples by variant callers, duplicating any with multiple callers.
|
def error(message, *args, **kwargs):
if 'end' in kwargs:
end = kwargs['end']
else:
end = '\n'
if len(args) == 0:
sys.stderr.write(message)
else:
sys.stderr.write(message % args)
sys.stderr.write(end)
sys.stderr.flush()
|
write a message to stderr
|
def abbrev_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
func = self.cmd_func(data.statement.command)
if func is None:
possible_cmds = [cmd for cmd in self.get_all_commands() if cmd.startswith(data.statement.command)]
if len(possible_cmds) == 1:
raw = data.statement.raw.replace(data.statement.command, possible_cmds[0], 1)
data.statement = self.statement_parser.parse(raw)
return data
|
Accept unique abbreviated commands
|
def image_corner(self, corner):
if corner not in self.corner_types():
raise GeoRaster2Error('corner %s invalid, expected: %s' % (corner, self.corner_types()))
x = 0 if corner[1] == 'l' else self.width
y = 0 if corner[0] == 'u' else self.height
return Point(x, y)
|
Return image corner in pixels, as shapely.Point.
|
def pack(chunks, r=32):
if r < 1:
raise ValueError('pack needs r > 0')
n = shift = 0
for c in chunks:
n += c << shift
shift += r
return n
|
Return integer concatenating integer chunks of r > 0 bit-length.
>>> pack([0, 1, 0, 1, 0, 1], 1)
42
>>> pack([0, 1], 8)
256
>>> pack([0, 1], 0)
Traceback (most recent call last):
...
ValueError: pack needs r > 0
|
def get_create_foreign_key_sql(self, foreign_key, table):
if isinstance(table, Table):
table = table.get_quoted_name(self)
query = "ALTER TABLE %s ADD %s" % (
table,
self.get_foreign_key_declaration_sql(foreign_key),
)
return query
|
Returns the SQL to create a new foreign key.
:rtype: sql
|
def libvlc_video_get_track_description(p_mi):
f = _Cfunctions.get('libvlc_video_get_track_description', None) or \
_Cfunction('libvlc_video_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
|
Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error.
|
def get_source_files(target, build_context) -> list:
all_sources = list(target.props.sources)
for proto_dep_name in target.props.protos:
proto_dep = build_context.targets[proto_dep_name]
all_sources.extend(proto_dep.artifacts.get(AT.gen_cc).keys())
return all_sources
|
Return list of source files for `target`.
|
def mk_complex_format_func(fmt):
fmt = fmt + u"+i" + fmt
def complex_format_func(z):
return fmt % (z.real, z.imag)
return complex_format_func
|
Function used internally to generate functions to format complex
valued data.
|
def inbound_message_filter(f):
if asyncio.iscoroutinefunction(f):
raise TypeError(
"inbound_message_filter must not be a coroutine function"
)
add_handler_spec(
f,
HandlerSpec(
(_apply_inbound_message_filter, ())
),
)
return f
|
Register the decorated function as a service-level inbound message filter.
:raise TypeError: if the decorated object is a coroutine function
.. seealso::
:class:`StanzaStream`
for important remarks regarding the use of stanza filters.
|
def filter(self, value, table=None):
if table is not None:
filterable = self.filterable_func(value, table)
else:
filterable = self.filterable_func(value)
return filterable
|
Return True if the value should be pruned; False otherwise.
If a `table` argument was provided, pass it to filterable_func.
|
def to_attrs(args, nocreate_if_none=['id', 'for', 'class']):
if not args:
return ''
s = ['']
for k, v in sorted(args.items()):
k = u_str(k)
v = u_str(v)
if k.startswith('_'):
k = k[1:]
if v is None:
if k not in nocreate_if_none:
s.append(k)
else:
if k.lower() in __noescape_attrs__:
t = u_str(v)
else:
t = cgi.escape(u_str(v))
t = '"%s"' % t.replace('"', '"')
s.append('%s=%s' % (k, t))
return ' '.join(s)
|
Make python dict to k="v" format
|
def get_gradebook_column_lookup_session(self, proxy):
if not self.supports_gradebook_column_lookup():
raise errors.Unimplemented()
return sessions.GradebookColumnLookupSession(proxy=proxy, runtime=self._runtime)
|
Gets the ``OsidSession`` associated with the gradebook column lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnLookupSession) - a
``GradebookColumnLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` is ``true``.*
|
def addOntology(self):
self._openRepo()
name = self._args.name
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
if name is None:
name = getNameFromPath(filePath)
ontology = ontologies.Ontology(name)
ontology.populateFromFile(filePath)
self._updateRepo(self._repo.insertOntology, ontology)
|
Adds a new Ontology to this repo.
|
def get_gemini_files(data):
try:
from gemini import annotations, config
except ImportError:
return {}
return {"base": config.read_gemini_config()["annotation_dir"],
"files": annotations.get_anno_files().values()}
|
Enumerate available gemini data files in a standard installation.
|
def network_sub_create_notif(self, tenant_id, tenant_name, cidr):
if not self.fw_init:
return
self.network_create_notif(tenant_id, tenant_name, cidr)
|
Network create notification.
|
def set_subresource(self, subresource, value, key_name = '', headers=None,
version_id=None):
if not subresource:
raise TypeError('set_subresource called with subresource=None')
query_args = subresource
if version_id:
query_args += '&versionId=%s' % version_id
response = self.connection.make_request('PUT', self.name, key_name,
data=value.encode('UTF-8'),
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
|
Set a subresource for a bucket or key.
:type subresource: string
:param subresource: The subresource to set.
:type value: string
:param value: The value of the subresource.
:type key_name: string
:param key_name: The key to operate on, or None to operate on the
bucket.
:type headers: dict
:param headers: Additional HTTP headers to include in the request.
:type src_version_id: string
:param src_version_id: Optional. The version id of the key to operate
on. If not specified, operate on the newest
version.
|
def crypto_withdraw(self, amount, currency, crypto_address):
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
|
Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
|
def get_date_bounds(self):
start = end = None
date_gt = '>='
date_lt = '<='
if self:
if self.start:
start = self.start
if self.end:
end = self.end
if self.startopen:
date_gt = '>'
if self.endopen:
date_lt = '<'
return date_gt, start, date_lt, end
|
Return the upper and lower bounds along
with operators that are needed to do an 'in range' test.
Useful for SQL commands.
Returns
-------
tuple: (`str`, `date`, `str`, `date`)
(date_gt, start, date_lt, end)
e.g.:
('>=', start_date, '<', end_date)
|
def GetMetricMetadata():
return [
stats_utils.CreateCounterMetadata("grr_client_unknown"),
stats_utils.CreateCounterMetadata("grr_decoding_error"),
stats_utils.CreateCounterMetadata("grr_decryption_error"),
stats_utils.CreateCounterMetadata("grr_authenticated_messages"),
stats_utils.CreateCounterMetadata("grr_unauthenticated_messages"),
stats_utils.CreateCounterMetadata("grr_rsa_operations"),
stats_utils.CreateCounterMetadata(
"grr_encrypted_cipher_cache", fields=[("type", str)]),
]
|
Returns a list of MetricMetadata for communicator-related metrics.
|
def delete(self):
self._pre_action_check('delete')
self.cypher("MATCH (self) WHERE id(self)={self} "
"OPTIONAL MATCH (self)-[r]-()"
" DELETE r, self")
delattr(self, 'id')
self.deleted = True
return True
|
Delete a node and it's relationships
:return: True
|
def _validate_metadata(metadata_props):
if len(CaseInsensitiveDict(metadata_props)) != len(metadata_props):
raise RuntimeError('Duplicate metadata props found')
for key, value in metadata_props.items():
valid_values = KNOWN_METADATA_PROPS.get(key)
if valid_values and value.lower() not in valid_values:
warnings.warn('Key {} has invalid value {}. Valid values are {}'.format(key, value, valid_values))
|
Validate metadata properties and possibly show warnings or throw exceptions.
:param metadata_props: A dictionary of metadata properties, with property names and values (see :func:`~onnxmltools.utils.metadata_props.add_metadata_props` for examples)
|
def is_binary_address(value: Any) -> bool:
if not is_bytes(value):
return False
elif len(value) != 20:
return False
else:
return True
|
Checks if the given string is an address in raw bytes form.
|
def _confirm_pos(self, pos):
candidate = None
if self._get_node(self._treelist, pos) is not None:
candidate = pos
return candidate
|
look up widget for pos and default to None
|
def push(self, metric_type, metric_id, value, timestamp=None):
if type(timestamp) is datetime:
timestamp = datetime_to_time_millis(timestamp)
item = create_metric(metric_type, metric_id, create_datapoint(value, timestamp))
self.put(item)
|
Pushes a single metric_id, datapoint combination to the server.
This method is an assistant method for the put method by removing the need to
create data structures first.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id
:param value: Datapoint value (depending on the MetricType)
:param timestamp: Timestamp of the datapoint. If left empty, uses current client time. Can be milliseconds since epoch or datetime instance
|
def toggle_logo_path(self):
is_checked = self.custom_organisation_logo_check_box.isChecked()
if is_checked:
path = setting(
key='organisation_logo_path',
default=supporters_logo_path(),
expected_type=str,
qsettings=self.settings)
else:
path = supporters_logo_path()
self.organisation_logo_path_line_edit.setText(path)
self.organisation_logo_path_line_edit.setEnabled(is_checked)
self.open_organisation_logo_path_button.setEnabled(is_checked)
|
Set state of logo path line edit and button.
|
def list_files(path, extension=".cpp", exclude="S.cpp"):
return ["%s/%s" % (path, f) for f in listdir(path) if f.endswith(extension) and (not f.endswith(exclude))]
|
List paths to all files that ends with a given extension
|
def rabin_miller(p):
if p < 2:
return False
if p != 2 and p & 1 == 0:
return False
s = p - 1
while s & 1 == 0:
s >>= 1
for x in range(10):
a = random.randrange(p - 1) + 1
temp = s
mod = pow(a, temp, p)
while temp != p - 1 and mod != 1 and mod != p - 1:
mod = (mod * mod) % p
temp = temp * 2
if mod != p - 1 and temp % 2 == 0:
return False
return True
|
Performs a rabin-miller primality test
:param p: Number to test
:return: Bool of whether num is prime
|
def floatize(self):
self.x = float(self.x)
self.y = float(self.y)
|
Convert co-ordinate values to floats.
|
def find_all(root, path):
path = parse_path(path)
if len(path) == 1:
yield from get_children(root, path[0])
else:
for child in get_children(root, path[0]):
yield from find_all(child, path[1:])
|
Get all children that satisfy the path.
|
def _patch(self, doc, source, patches, setter=None):
old = self._saved_copy()
for name, patch in patches.items():
for ind, value in patch:
if isinstance(ind, (int, slice)):
self[name][ind] = value
else:
shape = self[name][ind[0]][tuple(ind[1:])].shape
self[name][ind[0]][tuple(ind[1:])] = np.array(value, copy=False).reshape(shape)
from ...document.events import ColumnsPatchedEvent
self._notify_owners(old,
hint=ColumnsPatchedEvent(doc, source, patches, setter))
|
Internal implementation to handle special-casing patch events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.patch`` method on column data sources exists to provide a
more efficient way to perform patching (i.e. random access) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsPatchedEvent`` hint to the message containing
only the small patched data that BokehJS needs in order to efficiently
synchronize.
.. warning::
This function assumes the integrity of ``patches`` has already
been verified.
|
def assert_list(obj, expected_type=string_types, can_be_none=True, default=(), key_arg=None,
allowable=(list, Fileset, OrderedSet, set, tuple), raise_type=ValueError):
def get_key_msg(key=None):
if key is None:
return ''
else:
return "In key '{}': ".format(key)
allowable = tuple(allowable)
key_msg = get_key_msg(key_arg)
val = obj
if val is None:
if can_be_none:
val = list(default)
else:
raise raise_type(
'{}Expected an object of acceptable type {}, received None and can_be_none is False'
.format(key_msg, allowable))
if isinstance(val, allowable):
lst = list(val)
for e in lst:
if not isinstance(e, expected_type):
raise raise_type(
'{}Expected a list containing values of type {}, instead got a value {} of {}'
.format(key_msg, expected_type, e, e.__class__))
return lst
else:
raise raise_type(
'{}Expected an object of acceptable type {}, received {} instead'
.format(key_msg, allowable, val))
|
This function is used to ensure that parameters set by users in BUILD files are of acceptable types.
:API: public
:param obj : the object that may be a list. It will pass if it is of type in allowable.
:param expected_type : this is the expected type of the returned list contents.
:param can_be_none : this defines whether or not the obj can be None. If True, return default.
:param default : this is the default to return if can_be_none is True and obj is None.
:param key_arg : this is the name of the key to which obj belongs to
:param allowable : the acceptable types for obj. We do not want to allow any iterable (eg string).
:param raise_type : the error to throw if the type is not correct.
|
def restart_required(self):
response = self.get("messages").body.read()
messages = data.load(response)['feed']
if 'entry' not in messages:
result = False
else:
if isinstance(messages['entry'], dict):
titles = [messages['entry']['title']]
else:
titles = [x['title'] for x in messages['entry']]
result = 'restart_required' in titles
return result
|
Indicates whether splunkd is in a state that requires a restart.
:return: A ``boolean`` that indicates whether a restart is required.
|
def transpose(self, *dims) -> 'DataArray':
variable = self.variable.transpose(*dims)
return self._replace(variable)
|
Return a new DataArray object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : DataArray
The returned DataArray's array is transposed.
Notes
-----
This operation returns a view of this array's data. It is
lazy for dask-backed DataArrays but not for numpy-backed DataArrays
-- the data will be fully loaded.
See Also
--------
numpy.transpose
Dataset.transpose
|
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True):
a = QAction(text, parent)
if icon is not None:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a
|
Create a new action and assign callbacks, shortcuts, etc.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.