max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
pyglet/canvas/base.py | AnantTiwari-Naman/pyglet | 1,160 | 11065001 | <gh_stars>1000+
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from pyglet import gl
from pyglet import app
from pyglet import window
from pyglet import canvas
class Display:
"""A display device supporting one or more screens.
.. versionadded:: 1.2
"""
name = None
"""Name of this display, if applicable.
:type: str
"""
x_screen = None
"""The X11 screen number of this display, if applicable.
:type: int
"""
def __init__(self, name=None, x_screen=None):
"""Create a display connection for the given name and screen.
On X11, :attr:`name` is of the form ``"hostname:display"``, where the
default is usually ``":1"``. On X11, :attr:`x_screen` gives the X
screen number to use with this display. A pyglet display can only be
used with one X screen; open multiple display connections to access
multiple X screens.
Note that TwinView, Xinerama, xrandr and other extensions present
multiple monitors on a single X screen; this is usually the preferred
mechanism for working with multiple monitors under X11 and allows each
screen to be accessed through a single pyglet`~pyglet.canvas.Display`
On platforms other than X11, :attr:`name` and :attr:`x_screen` are
ignored; there is only a single display device on these systems.
:Parameters:
name : str
The name of the display to connect to.
x_screen : int
The X11 screen number to use.
"""
canvas._displays.add(self)
def get_screens(self):
"""Get the available screens.
A typical multi-monitor workstation comprises one :class:`Display`
with multiple :class:`Screen` s. This method returns a list of
screens which can be enumerated to select one for full-screen display.
For the purposes of creating an OpenGL config, the default screen
will suffice.
:rtype: list of :class:`Screen`
"""
raise NotImplementedError('abstract')
def get_default_screen(self):
"""Get the default screen as specified by the user's operating system
preferences.
:rtype: :class:`Screen`
"""
return self.get_screens()[0]
def get_windows(self):
"""Get the windows currently attached to this display.
:rtype: sequence of :class:`~pyglet.window.Window`
"""
return [window for window in app.windows if window.display is self]
class Screen:
"""A virtual monitor that supports fullscreen windows.
Screens typically map onto a physical display such as a
monitor, television or projector. Selecting a screen for a window
has no effect unless the window is made fullscreen, in which case
the window will fill only that particular virtual screen.
The :attr:`width` and :attr:`height` attributes of a screen give the
current resolution of the screen. The :attr:`x` and :attr:`y` attributes
give the global location of the top-left corner of the screen. This is
useful for determining if screens are arranged above or next to one
another.
Use :func:`~Display.get_screens` or :func:`~Display.get_default_screen`
to obtain an instance of this class.
"""
def __init__(self, display, x, y, width, height):
"""
:parameters:
`display` : `~pyglet.canvas.Display`
:attr:`display`
`x` : int
Left edge :attr:`x`
`y` : int
Top edge :attr:`y`
`width` : int
:attr:`width`
`height` : int
:attr:`height`
"""
self.display = display
"""Display this screen belongs to."""
self.x = x
"""Left edge of the screen on the virtual desktop."""
self.y = y
"""Top edge of the screen on the virtual desktop."""
self.width = width
"""Width of the screen, in pixels."""
self.height = height
"""Height of the screen, in pixels."""
def __repr__(self):
return '{}(x={}, y={}, width={}, height={})'.format(self.__class__.__name__, self.x, self.y, self.width, self.height)
def get_best_config(self, template=None):
"""Get the best available GL config.
Any required attributes can be specified in `template`. If
no configuration matches the template,
:class:`~pyglet.window.NoSuchConfigException` will be raised.
:deprecated: Use :meth:`pyglet.gl.Config.match`.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: :class:`~pyglet.gl.Config`
:return: A configuration supported by the platform that best
fulfils the needs described by the template.
"""
configs = None
if template is None:
for template_config in [gl.Config(double_buffer=True, depth_size=24, major_version=3, minor_version=3),
gl.Config(double_buffer=True, depth_size=16, major_version=3, minor_version=3),
None]:
try:
configs = self.get_matching_configs(template_config)
break
except window.NoSuchConfigException:
pass
else:
configs = self.get_matching_configs(template)
if not configs:
raise window.NoSuchConfigException()
return configs[0]
def get_matching_configs(self, template):
"""Get a list of configs that match a specification.
Any attributes specified in `template` will have values equal
to or greater in each returned config. If no configs satisfy
the template, an empty list is returned.
:deprecated: Use :meth:`pyglet.gl.Config.match`.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: list of :class:`~pyglet.gl.Config`
:return: A list of matching configs.
"""
raise NotImplementedError('abstract')
def get_modes(self):
"""Get a list of screen modes supported by this screen.
:rtype: list of :class:`ScreenMode`
.. versionadded:: 1.2
"""
raise NotImplementedError('abstract')
def get_mode(self):
"""Get the current display mode for this screen.
:rtype: :class:`ScreenMode`
.. versionadded:: 1.2
"""
raise NotImplementedError('abstract')
def get_closest_mode(self, width, height):
"""Get the screen mode that best matches a given size.
If no supported mode exactly equals the requested size, a larger one
is returned; or ``None`` if no mode is large enough.
:Parameters:
`width` : int
Requested screen width.
`height` : int
Requested screen height.
:rtype: :class:`ScreenMode`
.. versionadded:: 1.2
"""
# Best mode is one with smallest resolution larger than width/height,
# with depth and refresh rate equal to current mode.
current = self.get_mode()
best = None
for mode in self.get_modes():
# Reject resolutions that are too small
if mode.width < width or mode.height < height:
continue
if best is None:
best = mode
# Must strictly dominate dimensions
if (mode.width <= best.width and mode.height <= best.height and
(mode.width < best.width or mode.height < best.height)):
best = mode
# Preferably match rate, then depth.
if mode.width == best.width and mode.height == best.height:
points = 0
if mode.rate == current.rate:
points += 2
if best.rate == current.rate:
points -= 2
if mode.depth == current.depth:
points += 1
if best.depth == current.depth:
points -= 1
if points > 0:
best = mode
return best
def set_mode(self, mode):
"""Set the display mode for this screen.
The mode must be one previously returned by :meth:`get_mode` or
:meth:`get_modes`.
:Parameters:
`mode` : `ScreenMode`
Screen mode to switch this screen to.
"""
raise NotImplementedError('abstract')
def restore_mode(self):
"""Restore the screen mode to the user's default.
"""
raise NotImplementedError('abstract')
class ScreenMode:
"""Screen resolution and display settings.
Applications should not construct `ScreenMode` instances themselves; see
:meth:`Screen.get_modes`.
The :attr:`depth` and :attr:`rate` variables may be ``None`` if the
operating system does not provide relevant data.
.. versionadded:: 1.2
"""
width = None
"""Width of screen, in pixels.
:type: int
"""
height = None
"""Height of screen, in pixels.
:type: int
"""
depth = None
"""Pixel color depth, in bits per pixel.
:type: int
"""
rate = None
"""Screen refresh rate in Hz.
:type: int
"""
def __init__(self, screen):
"""
:parameters:
`screen` : `Screen`
"""
self.screen = screen
def __repr__(self):
return '%s(width=%r, height=%r, depth=%r, rate=%r)' % (
self.__class__.__name__,
self.width, self.height, self.depth, self.rate)
class Canvas:
"""Abstract drawing area.
Canvases are used internally by pyglet to represent drawing areas --
either within a window or full-screen.
.. versionadded:: 1.2
"""
def __init__(self, display):
"""
:parameters:
`display` : `Display`
:attr:`display`
"""
self.display = display
"""Display this canvas was created on."""
|
venv/lib/python3.8/site-packages/pip/_internal/vcs/mercurial.py | Joshua-Barawa/My-Photos | 102 | 11065020 | <gh_stars>100-1000
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import BadCommand, InstallationError
from pip._internal.utils.misc import display_path
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs.versioncontrol import (
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import RevOptions
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = (
'hg', 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http',
)
@staticmethod
def get_base_rev_args(rev):
return [rev]
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Hg repository at the url to the destination location"""
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir.path
)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(make_command('clone', '--noupdate', '-q', url, dest))
self.run_command(
make_command('update', '-q', rev_options.to_args()),
cwd=dest,
)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url.secret)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(['pull', '-q'], cwd=dest)
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_remote_url(cls, location):
url = cls.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if cls._is_local_repository(url):
url = path_to_url(url)
return url.strip()
@classmethod
def get_revision(cls, location):
"""
Return the repository-local changeset revision number, as an integer.
"""
current_revision = cls.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
@classmethod
def get_requirement_revision(cls, location):
"""
Return the changeset identification hash, as a 40-character
hexadecimal string
"""
current_rev_hash = cls.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
repo_root = cls.run_command(
['root'], show_stdout=False, cwd=location).strip()
if not os.path.isabs(repo_root):
repo_root = os.path.abspath(os.path.join(location, repo_root))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def controls_location(cls, location):
if super(Mercurial, cls).controls_location(location):
return True
try:
cls.run_command(
['identify'],
cwd=location,
show_stdout=False,
on_returncode='raise',
log_failed_cmd=False)
return True
except (BadCommand, InstallationError):
return False
vcs.register(Mercurial)
|
onmt/keyphrase/utils.py | memray/OpenNMT-kpg-release | 152 | 11065041 | <filename>onmt/keyphrase/utils.py
import re
import string
from os.path import join, dirname
import numpy as np
import time
import sys,logging
import matplotlib
SEP_token = "<sep>"
DIGIT_token = "<digit>"
import time
from nltk.stem.porter import *
stemmer = PorterStemmer()
# matplotlib.use('agg')
# import matplotlib.pyplot as plt
def stem_word_list(word_list):
return [stemmer.stem(w.strip()) for w in word_list]
def validate_phrases(pred_seqs, unk_token):
'''
:param pred_seqs:
:param src_str:
:param oov:
:param id2word:
:param opt:
:return:
'''
valid_flags = []
for seq in pred_seqs:
keep_flag = True
if len(seq) == 0:
keep_flag = False
if keep_flag and any([w == unk_token for w in seq]):
keep_flag = False
if keep_flag and any([w == '.' or w == ',' for w in seq]):
keep_flag = False
valid_flags.append(keep_flag)
return np.asarray(valid_flags)
def if_present_duplicate_phrases(src_seq, tgt_seqs, stemming=True, lowercase=True):
"""
Check if each given target sequence verbatim appears in the source sequence
:param src_seq:
:param tgt_seqs:
:param stemming:
:param lowercase:
:param check_duplicate:
:return:
"""
if lowercase:
src_seq = [w.lower() for w in src_seq]
if stemming:
src_seq = stem_word_list(src_seq)
present_indices = []
present_flags = []
duplicate_flags = []
phrase_set = set() # some phrases are duplicate after stemming, like "model" and "models" would be same after stemming, thus we ignore the following ones
for tgt_seq in tgt_seqs:
if lowercase:
tgt_seq = [w.lower() for w in tgt_seq]
if stemming:
tgt_seq = stem_word_list(tgt_seq)
# check if the phrase appears in source text
# iterate each word in source
match_flag, match_pos_idx = if_present_phrase(src_seq, tgt_seq)
# if it reaches the end of source and no match, means it doesn't appear in the source
present_flags.append(match_flag)
present_indices.append(match_pos_idx)
# check if it is duplicate
if '_'.join(tgt_seq) in phrase_set:
duplicate_flags.append(True)
else:
duplicate_flags.append(False)
phrase_set.add('_'.join(tgt_seq))
assert len(present_flags) == len(present_indices)
return np.asarray(present_flags), \
np.asarray(present_indices), \
np.asarray(duplicate_flags)
def if_present_phrase(src_str_tokens, phrase_str_tokens):
"""
:param src_str_tokens: a list of strings (words) of source text
:param phrase_str_tokens: a list of strings (words) of a phrase
:return:
"""
match_flag = False
match_pos_idx = -1
for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1):
match_flag = True
# iterate each word in target, if one word does not match, set match=False and break
for seq_idx, seq_w in enumerate(phrase_str_tokens):
src_w = src_str_tokens[src_start_idx + seq_idx]
if src_w != seq_w:
match_flag = False
break
if match_flag:
match_pos_idx = src_start_idx
break
return match_flag, match_pos_idx
def gather_scores(gathered_scores, results_names, results_dicts):
for result_name, result_dict in zip(results_names, results_dicts):
for metric_name, score in result_dict.items():
if metric_name.endswith('_num'):
# if it's 'present_tgt_num' or 'absent_tgt_num', leave as is
field_name = result_name
else:
# if it's other score like 'precision@5' is renamed to like 'present_exact_precision@'
field_name = result_name + '_' + metric_name
if field_name not in gathered_scores:
gathered_scores[field_name] = []
gathered_scores[field_name].append(score)
return gathered_scores
def print_predeval_result(i, src_text, tgt_seqs, present_tgt_flags,
pred_seqs, pred_scores, pred_idxs, copied_flags,
present_pred_flags, valid_pred_flags,
valid_and_present_flags, valid_and_absent_flags,
match_scores_exact, match_scores_partial,
results_names, results_list, score_dict):
'''
Print and export predictions
'''
# src, src_str, tgt, tgt_str_seqs, tgt_copy, pred_seq, oov
print_out = '====================== %d =========================' % (i)
print_out += '\n[Source]: %s \n' % (src_text) if src_text is not None else ''
# print_out += '[Abstract]: %s \n' % (src_dict["abstract"])
# print_out += '[Source tokenized][%d]: %s \n' % (len(src_seq), ' '.join(src_seq))
# print_out += 'Real Target [%d] \n\t\t%s \n' % (len(tgt_seqs), str(tgt_seqs))
print_out += '[GROUND-TRUTH] #(all)=%d, #(present)=%d, #(absent)=%d\n' % \
(len(present_tgt_flags), sum(present_tgt_flags), len(present_tgt_flags)-sum(present_tgt_flags))
print_out += '\n'.join(
['\t\t[%s]' % ' '.join(phrase) if is_present else '\t\t%s' % ' '.join(phrase) for phrase, is_present in
zip(tgt_seqs, present_tgt_flags)])
print_out += '\n[PREDICTION] #(all)=%d, #(valid)=%d, #(present)=%d, ' \
'#(valid&present)=%d, #(valid&absent)=%d\n' % (
len(pred_seqs), sum(valid_pred_flags), sum(present_pred_flags),
sum(valid_and_present_flags), sum(valid_and_absent_flags))
print_out += ''
preds_out = ''
for p_id, (word, match, match_soft,
is_valid, is_present) in enumerate(
zip(pred_seqs, match_scores_exact, match_scores_partial,
valid_pred_flags, present_pred_flags)):
score = pred_scores[p_id] if pred_scores else "Score N/A"
pred_idx = pred_idxs[p_id] if pred_idxs else "Index N/A"
copied_flag = copied_flags[p_id] if copied_flags else "CopyFlag N/A"
preds_out += '%s\n' % (' '.join(word))
if is_present:
print_phrase = '[%s]' % ' '.join(word)
else:
print_phrase = ' '.join(word)
if match == 1.0:
correct_str = '[correct!]'
else:
correct_str = ''
if any(copied_flag):
copy_str = '[copied!]'
else:
copy_str = ''
pred_str = '\t\t%s\t%s \t %s %s%s\n' % ('[%.4f]' % (-score) if pred_scores else "Score N/A",
print_phrase, str(pred_idx),
correct_str, copy_str)
if not is_valid:
pred_str = '\t%s' % pred_str
print_out += pred_str
print_out += "\n ======================================================= \n"
print_out += '[GROUND-TRUTH] #(all)=%d, #(present)=%d, #(absent)=%d\n' % \
(len(present_tgt_flags), sum(present_tgt_flags), len(present_tgt_flags)-sum(present_tgt_flags))
print_out += '\n[PREDICTION] #(all)=%d, #(valid)=%d, #(present)=%d, ' \
'#(valid&present)=%d, #(valid&absent)=%d\n' % (
len(pred_seqs), sum(valid_pred_flags), sum(present_pred_flags),
sum(valid_and_present_flags), sum(valid_and_absent_flags))
for name, results in zip(results_names, results_list):
# print @5@10@O@M for present_exact, print @50@M for absent_exact
if name in ['all_exact', 'present_exact', 'absent_exact']:
if name.startswith('all') or name.startswith('present'):
topk_list = ['1', '3', '10', 'k']
else:
topk_list = ['50', 'M']
for topk in topk_list:
print_out += "\n --- batch {} Corr/P/R/F1 @{}: \t".format(name, topk) \
+ " {:6} , {:.4f} , {:.4f} , {:.4f}".format(int(results['correct@{}'.format(topk)]),
results['precision@{}'.format(topk)],
results['recall@{}'.format(topk)],
results['f_score@{}'.format(topk)],
)
# note the reported results might be different from the numbers here
# since we remove data points that have zero valid targets in average (see kp_report.summarize_scores)
print_out += "\n --- total {} Corr/P/R/F1 @{}: \t".format(name, topk) \
+ " {:6} , {:.4f} , {:.4f} , {:.4f}".format(
int(np.sum(score_dict['{}_correct@{}'.format(name, topk)])),
np.average(score_dict['{}_precision@{}'.format(name, topk)]),
np.average(score_dict['{}_recall@{}'.format(name, topk)]),
np.average(score_dict['{}_f_score@{}'.format(name, topk)]),)
elif name in ['present_exact_advanced', 'absent_exact_advanced']:
print_out += "\n --- batch {} AUC/SADR/α-nDCG@5/α-nDCG@10/nDCG/AP/MRR: \t".format(name[: name.rfind('_')]) \
+ " {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f}".format(
results['auc'], results['sadr'], results['alpha_ndcg@5'], results['alpha_ndcg@10'],
results['ndcg'], results['ap'], results['mrr'],)
print_out += "\n --- total {} AUC/SADR/α-nDCG@5/α-nDCG@10/nDCG/AP/MRR: \t".format(name[: name.rfind('_')]) \
+ " {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f} , {:.4f}".format(
np.average(score_dict['{}_{}'.format(name, 'auc')]),
np.average(score_dict['{}_{}'.format(name, 'sadr')]),
np.average(score_dict['{}_{}'.format(name, 'alpha_ndcg@5')]),
np.average(score_dict['{}_{}'.format(name, 'alpha_ndcg@10')]),
np.average(score_dict['{}_{}'.format(name, 'ndcg')]),
np.average(score_dict['{}_{}'.format(name, 'ap')]),
np.average(score_dict['{}_{}'.format(name, 'mrr')]),
)
else:
# ignore partial for now
continue
print_out += "\n ======================================================="
return print_out
def meng17_tokenize(text):
'''
The tokenizer used in Meng et al. ACL 2017
parse the feed-in text, filtering and tokenization
keep [_<>,\(\)\.\'%], replace digits with <digit>, split by [^a-zA-Z0-9_<>,\(\)\.\'%]
:param text:
:return: a list of tokens
'''
# remove line breakers
text = re.sub(r'[\r\n\t]', ' ', text)
# pad spaces to the left and right of special punctuations
text = re.sub(r'[_<>,\(\)\.\'%]', ' \g<0> ', text)
# tokenize by non-letters (new-added + # & *, but don't pad spaces, to make them as one whole word)
tokens = list(filter(lambda w: len(w) > 0, re.split(r'[^a-zA-Z0-9_<>,#&\+\*\(\)\.\']', text)))
return tokens
def retain_punc_tokenize(raw_text):
'''
Keep almost all punctuations except ?, as ? is often caused by encoding error.
Pad underlines before and after each punctuation.
:param text:
:return: a list of tokens
'''
puncs = string.punctuation
pattern = r"[{}]".format(puncs) # create the pattern
# remove line breakers
text = re.sub(r'[\r\n\t]', ' ', raw_text)
# pad spaces&underlines to the left and right of special punctuations
text = re.sub(pattern, ' _\g<0>_ ', text)
# tokenize by whitespaces
tokens = []
for token in re.split(r'\s', text):
# split strings that contain letters and digits
if re.match(r'[A-Za-z]+\d+|\d+[A-Za-z]+', token):
token = re.findall(r'[A-Za-z]+|\d+', token)
else:
token = [token]
tokens.extend(token)
tokens = list(filter(lambda w: len(w) > 0 and w!='_?_', tokens))
tokens = [t[1] if len(t)==3 and t[0]=='_' and t[2]=='_' else t for t in tokens]
return tokens
def replace_numbers_to_DIGIT(tokens, k=2):
# replace big numbers (contain more than k digit) with <digit>
tokens = [w if not re.match('^\d{%d,}$' % k, w) else DIGIT_token for w in tokens]
return tokens
def time_usage(func):
def wrapper(*args, **kwargs):
beg_ts = time.time()
retval = func(*args, **kwargs)
end_ts = time.time()
print("elapsed time: %f" % (end_ts - beg_ts))
return retval
return wrapper
DATA_DIR = join(dirname(dirname(__file__)), 'data')
MODELS_DIR = join(dirname(dirname(__file__)), 'models')
MODEL_NAME = ("{:s}_model.{:s}.{:s}_contextsize.{:d}_numnoisewords.{:d}"
"_vecdim.{:d}_batchsize.{:d}_lr.{:f}_epoch.{:d}_loss.{:f}"
".pth.tar")
def current_milli_time():
return int(round(time.time() * 1000))
class LoggerWriter:
def __init__(self, level):
# self.level is really like using log.debug(message)
# at least in my case
self.level = level
def write(self, message):
# if statement reduces the amount of newlines that are
# printed to the logger
if message != '\n':
self.level(message)
def flush(self):
# create a flush method so things can be flushed when
# the system wants to. Not sure if simply 'printing'
# sys.stderr is the correct way to do it, but it seemed
# to work properly for me.
self.level(sys.stderr)
def tally_parameters(model):
if logging.getLogger() == None:
printer = print
else:
printer = logging.getLogger().info
n_params = sum([p.nelement() for p in model.parameters()])
printer('Model name: %s' % type(model).__name__)
printer('number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
printer('encoder: %d' % enc)
printer('decoder: %d' % dec)
def _print_progress(epoch_i, batch_i, num_batches):
progress = round((batch_i + 1) / num_batches * 100)
print("\rEpoch {:d}".format(epoch_i + 1), end='')
sys.stdout.write(" - {:d}%".format(progress))
sys.stdout.flush()
class Progbar(object):
def __init__(self, logger, title, target, width=30, batch_size = None, total_examples = None, verbose=1):
'''
@param target: total number of steps expected
'''
self.logger = logger
self.title = title
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
self.batch_size = batch_size
self.last_batch = 0
self.total_examples = total_examples
self.start_time = time.time() - 0.00001
self.last_time = self.start_time
self.report_delay = 10
self.last_report = self.start_time
def update(self, current_epoch, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
epoch_info = '%s Epoch=%d -' % (self.title, current_epoch) if current_epoch else '%s -' % (self.title)
barstr = epoch_info + '%%%dd/%%%dd' % (numdigits, numdigits, ) + ' (%.2f%%)['
bar = barstr % (current, self.target, float(current)/float(self.target) * 100.0)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('.'*(prog_width-1))
if current < self.target:
bar += '(-w-)'
else:
bar += '(-v-)!!'
bar += ('~' * (self.width-prog_width))
bar += ']'
# sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
# info = ''
info = bar
if current < self.target:
info += ' - Run-time: %ds - ETA: %ds' % (now - self.start, eta)
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
# info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
if k == 'perplexity' or k == 'PPL':
info += ' - %s: %.4f' % (k, np.exp(self.sum_values[k][0] / max(1, self.sum_values[k][1])))
else:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
# update progress stats
'''
current_time = time.time()
elapsed = current_time - self.last_report
if elapsed > self.report_delay:
trained_word_count = self.batch_size * current # only words in vocab & sampled
new_trained_word_count = self.batch_size * (current - self.last_batch) # only words in vocab & sampled
info += " - new processed %d words, %.0f words/s" % (new_trained_word_count, new_trained_word_count / elapsed)
self.last_time = current_time
self.last_report = current_time
self.last_batch = current
'''
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
# sys.stdout.write(info)
# sys.stdout.flush()
self.logger.info(info)
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
# sys.stdout.write(info + "\n")
self.logger.critical(info + "\n")
print(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far + n, values)
def clear(self):
self.sum_values = {}
self.unique_values = []
self.total_width = 0
self.seen_so_far = 0
'''
def plot_learning_curve_and_write_csv(scores, curve_names, checkpoint_names, title, ylim=None, save_path=None):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
title : string
Title for the chart.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
"""
train_sizes=np.linspace(1, len(scores[0]), len(scores[0]))
plt.figure(dpi=500)
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
# print(train_scores)
# print(test_scores)
plt.grid()
means = {}
stds = {}
# colors = "rgbcmykw"
colors = matplotlib.cm.rainbow(np.linspace(0, 1, len(curve_names)))
for i, (name, score) in enumerate(zip(curve_names, scores)):
# get the mean and std of score along the time step
mean = np.asarray([np.mean(s) for s in score])
means[name] = mean
std = np.asarray([np.std(s) for s in score])
stds[name] = std
if name.lower().startswith('training ml'):
score_ = [np.asarray(s) / 20.0 for s in score]
mean = np.asarray([np.mean(s) for s in score_])
std = np.asarray([np.std(s) for s in score_])
plt.fill_between(train_sizes, mean - std,
mean + std, alpha=0.1,
color=colors[i])
plt.plot(train_sizes, mean, 'o-', color=colors[i],
label=name)
plt.legend(loc="best", prop={'size': 6})
# plt.show()
if save_path:
plt.savefig(save_path + '.png', bbox_inches='tight')
csv_lines = ['time, ' + ','.join(curve_names)]
for t_id, time in enumerate(checkpoint_names):
csv_line = time + ',' + ','.join([str(means[c_name][t_id]) for c_name in curve_names])
csv_lines.append(csv_line)
with open(save_path + '.csv', 'w') as result_csv:
result_csv.write('\n'.join(csv_lines))
plt.close()
return plt
'''
|
libp2p/pubsub/floodsub.py | Jacquelinevv0693/py-libp2p | 315 | 11065042 | <gh_stars>100-1000
import logging
from typing import Iterable, List, Sequence
import trio
from libp2p.network.stream.exceptions import StreamClosed
from libp2p.peer.id import ID
from libp2p.typing import TProtocol
from libp2p.utils import encode_varint_prefixed
from .abc import IPubsubRouter
from .pb import rpc_pb2
from .pubsub import Pubsub
PROTOCOL_ID = TProtocol("/floodsub/1.0.0")
logger = logging.getLogger("libp2p.pubsub.floodsub")
class FloodSub(IPubsubRouter):
protocols: List[TProtocol]
pubsub: Pubsub
def __init__(self, protocols: Sequence[TProtocol]) -> None:
self.protocols = list(protocols)
self.pubsub = None
def get_protocols(self) -> List[TProtocol]:
"""
:return: the list of protocols supported by the router
"""
return self.protocols
def attach(self, pubsub: Pubsub) -> None:
"""
Attach is invoked by the PubSub constructor to attach the router to a
freshly initialized PubSub instance.
:param pubsub: pubsub instance to attach to
"""
self.pubsub = pubsub
def add_peer(self, peer_id: ID, protocol_id: TProtocol) -> None:
"""
Notifies the router that a new peer has been connected.
:param peer_id: id of peer to add
"""
def remove_peer(self, peer_id: ID) -> None:
"""
Notifies the router that a peer has been disconnected.
:param peer_id: id of peer to remove
"""
async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None:
"""
Invoked to process control messages in the RPC envelope. It is invoked
after subscriptions and payload messages have been processed.
:param rpc: rpc message
"""
# Checkpoint
await trio.lowlevel.checkpoint()
async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None:
"""
Invoked to forward a new message that has been validated. This is where
the "flooding" part of floodsub happens.
With flooding, routing is almost trivial: for each incoming message,
forward to all known peers in the topic. There is a bit of logic,
as the router maintains a timed cache of previous messages,
so that seen messages are not further forwarded.
It also never forwards a message back to the source
or the peer that forwarded the message.
:param msg_forwarder: peer ID of the peer who forwards the message to us
:param pubsub_msg: pubsub message in protobuf.
"""
peers_gen = set(
self._get_peers_to_send(
pubsub_msg.topicIDs,
msg_forwarder=msg_forwarder,
origin=ID(pubsub_msg.from_id),
)
)
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
logger.debug("publishing message %s", pubsub_msg)
for peer_id in peers_gen:
if peer_id not in self.pubsub.peers:
continue
stream = self.pubsub.peers[peer_id]
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
try:
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
except StreamClosed:
logger.debug("Fail to publish message to %s: stream closed", peer_id)
self.pubsub._handle_dead_peer(peer_id)
async def join(self, topic: str) -> None:
"""
Join notifies the router that we want to receive and forward messages
in a topic. It is invoked after the subscription announcement.
:param topic: topic to join
"""
# Checkpoint
await trio.lowlevel.checkpoint()
async def leave(self, topic: str) -> None:
"""
Leave notifies the router that we are no longer interested in a topic.
It is invoked after the unsubscription announcement.
:param topic: topic to leave
"""
# Checkpoint
await trio.lowlevel.checkpoint()
def _get_peers_to_send(
self, topic_ids: Iterable[str], msg_forwarder: ID, origin: ID
) -> Iterable[ID]:
"""
Get the eligible peers to send the data to.
:param msg_forwarder: peer ID of the peer who forwards the message to us.
:param origin: peer id of the peer the message originate from.
:return: a generator of the peer ids who we send data to.
"""
for topic in topic_ids:
if topic not in self.pubsub.peer_topics:
continue
for peer_id in self.pubsub.peer_topics[topic]:
if peer_id in (msg_forwarder, origin):
continue
if peer_id not in self.pubsub.peers:
continue
yield peer_id
|
src/finn/util/create.py | AlexMontgomerie/finn | 283 | 11065046 | # Copyright (c) 2020 Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from onnx import TensorProto, helper
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.util.basic import calculate_signed_dot_prod_range, gen_finn_dt_tensor
def hls_random_mlp_maker(layer_spec):
"""Create an MLP of given specification using HLSCustomOp instances.
Generate random weights/thresholds of appropriate size."""
ret = []
for lyr in layer_spec:
idt = lyr["idt"]
wdt = lyr["wdt"]
mw = lyr["mw"]
mh = lyr["mh"]
act = lyr["act"]
lyr["W"] = gen_finn_dt_tensor(wdt, (mw, mh))
if act is None:
# no activation, produce accumulators
T = None
tdt = None
if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
odt = DataType.UINT32
else:
odt = DataType.INT32
else:
odt = act
(min, max) = calculate_signed_dot_prod_range(idt, wdt, mw)
n_steps = act.get_num_possible_values() - 1
T = np.random.randint(min, max - 1, (mh, n_steps)).astype(np.float32)
# provide non-decreasing thresholds
T = np.sort(T, axis=1)
# generate thresholds for activation
if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
tdt = DataType.UINT32
# bias thresholds to be positive
T = np.ceil((T + mw) / 2)
assert (T >= 0).all()
else:
tdt = DataType.INT32
lyr["T"] = T
lyr["tdt"] = tdt
lyr["odt"] = odt
ret.append(lyr)
return hls_mlp_maker(ret)
def hls_mlp_maker(layer_spec):
"""Create an MLP of given specification using HLSCustomOp instances."""
current_in_name = ""
current_out_name = ""
i = 0
graph = helper.make_graph(nodes=[], name="mlp", inputs=[], outputs=[])
model = helper.make_model(graph, producer_name="finn")
model = ModelWrapper(model)
for lyr in layer_spec:
current_W_name = "W_%d" % i
current_T_name = "T_%d" % i
current_in_name = "act_%d" % i
current_out_name = "act_%d" % (i + 1)
W = lyr["W"]
(mw, mh) = W.shape
T = lyr["T"]
pe = lyr["pe"]
simd = lyr["simd"]
wdt = lyr["wdt"]
idt = lyr["idt"]
tdt = lyr["tdt"]
odt = lyr["odt"]
if i == 0:
global_in = helper.make_tensor_value_info(
current_in_name, TensorProto.FLOAT, [1, mw]
)
model.graph.input.append(global_in)
if i == len(layer_spec) - 1:
global_out = helper.make_tensor_value_info(
current_out_name, TensorProto.FLOAT, [1, mh]
)
model.graph.output.append(global_out)
# there are two ways to implement bipolar weights and inputs for
# StreamingFC:
# - specify their datatypes as such
# - specify their datatypes as BINARY as use binaryXnorMode
if wdt == DataType.BIPOLAR and idt == DataType.BIPOLAR:
# we'll internally convert weights/inputs to binary and specify the
# datatypes as such, and also set the binaryXnorMode attribute to 1
export_wdt = DataType.BINARY
export_idt = DataType.BINARY
binary_xnor_mode = 1
else:
export_wdt = wdt
export_idt = idt
binary_xnor_mode = 0
if T is not None:
no_act = 0
node_inp_list = [current_in_name, current_W_name, current_T_name]
if odt == DataType.BIPOLAR:
actval = 0
else:
actval = odt.min()
else:
# no thresholds
node_inp_list = [current_in_name, current_W_name]
actval = 0
no_act = 1
FCLayer_node = helper.make_node(
"StreamingFCLayer_Batch",
node_inp_list,
[current_out_name],
domain="finn.custom_op.fpgadataflow",
backend="fpgadataflow",
MW=mw,
MH=mh,
SIMD=simd,
PE=pe,
inputDataType=export_idt.name,
weightDataType=export_wdt.name,
outputDataType=odt.name,
ActVal=actval,
binaryXnorMode=binary_xnor_mode,
noActivation=no_act,
)
model.graph.node.append(FCLayer_node)
model.set_tensor_datatype(current_in_name, idt)
model.set_tensor_datatype(current_out_name, odt)
model.set_tensor_datatype(current_W_name, wdt)
if binary_xnor_mode:
# convert bipolar to binary
model.set_initializer(current_W_name, (W + 1) / 2)
else:
model.set_initializer(current_W_name, W)
if T is not None:
model.set_tensor_datatype(current_T_name, tdt)
model.set_initializer(current_T_name, T)
i += 1
return model
|
tf_quant_finance/experimental/instruments/overnight_index_linked_futures_test.py | slowy07/tf-quant-finance | 3,138 | 11065061 | <filename>tf_quant_finance/experimental/instruments/overnight_index_linked_futures_test.py
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for overnight_index_linked_futures.py."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
dates = tff.datetime
instruments = tff.experimental.instruments
@test_util.run_all_in_graph_and_eager_modes
class OvernightIndexLinkedFuturesTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_fut_compounded(self, dtype):
cal = dates.create_holiday_calendar(weekend_mask=dates.WeekendMask.NONE)
start_date = dates.convert_to_date_tensor([(2020, 5, 1)])
end_date = dates.convert_to_date_tensor([(2020, 5, 31)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
indexfuture = instruments.OvernightIndexLinkedFutures(
start_date,
end_date,
holiday_calendar=cal,
averaging_type=instruments.AverageType.COMPOUNDING,
dtype=dtype)
curve_dates = valuation_date + dates.months([1, 2, 6])
reference_curve = instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.015], dtype=dtype),
valuation_date=valuation_date,
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=None)
price = self.evaluate(indexfuture.price(valuation_date, market))
np.testing.assert_allclose(price, 98.64101997, atol=1e-6)
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_fut_averaged(self, dtype):
cal = dates.create_holiday_calendar(weekend_mask=dates.WeekendMask.NONE)
start_date = dates.convert_to_date_tensor([(2020, 5, 1)])
end_date = dates.convert_to_date_tensor([(2020, 5, 31)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
indexfuture = instruments.OvernightIndexLinkedFutures(
start_date,
end_date,
averaging_type=instruments.AverageType.ARITHMETIC_AVERAGE,
holiday_calendar=cal,
dtype=dtype)
curve_dates = valuation_date + dates.months([1, 2, 6])
reference_curve = instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.015], dtype=dtype),
valuation_date=valuation_date,
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=None)
price = self.evaluate(indexfuture.price(valuation_date, market))
np.testing.assert_allclose(price, 98.6417886, atol=1e-6)
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_fut_compounded_calendar(self, dtype):
cal = dates.create_holiday_calendar(
weekend_mask=dates.WeekendMask.SATURDAY_SUNDAY)
start_date = dates.convert_to_date_tensor([(2020, 5, 1)])
end_date = dates.convert_to_date_tensor([(2020, 5, 31)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
indexfuture = instruments.OvernightIndexLinkedFutures(
start_date,
end_date,
holiday_calendar=cal,
averaging_type=instruments.AverageType.COMPOUNDING,
dtype=dtype)
curve_dates = valuation_date + dates.months([1, 2, 6])
reference_curve = instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.015], dtype=dtype),
valuation_date=valuation_date,
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=None)
price = self.evaluate(indexfuture.price(valuation_date, market))
np.testing.assert_allclose(price, 98.6332129, atol=1e-6)
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_fut_averaged_calendar(self, dtype):
cal = dates.create_holiday_calendar(
weekend_mask=dates.WeekendMask.SATURDAY_SUNDAY)
start_date = dates.convert_to_date_tensor([(2020, 5, 1)])
end_date = dates.convert_to_date_tensor([(2020, 5, 31)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
indexfuture = instruments.OvernightIndexLinkedFutures(
start_date,
end_date,
averaging_type=instruments.AverageType.ARITHMETIC_AVERAGE,
holiday_calendar=cal,
dtype=dtype)
curve_dates = valuation_date + dates.months([1, 2, 6])
reference_curve = instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.015], dtype=dtype),
valuation_date=valuation_date,
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=None)
price = self.evaluate(indexfuture.price(valuation_date, market))
np.testing.assert_allclose(price, 98.63396465, atol=1e-6)
@parameterized.named_parameters(
('DoublePrecision', np.float64),
)
def test_fut_many(self, dtype):
cal = dates.create_holiday_calendar(weekend_mask=dates.WeekendMask.NONE)
start_date = dates.convert_to_date_tensor([(2020, 5, 1), (2020, 5, 1)])
end_date = dates.convert_to_date_tensor([(2020, 5, 31), (2020, 5, 31)])
valuation_date = dates.convert_to_date_tensor([(2020, 2, 8)])
indexfuture = instruments.OvernightIndexLinkedFutures(
start_date,
end_date,
holiday_calendar=cal,
averaging_type=instruments.AverageType.COMPOUNDING,
dtype=dtype)
curve_dates = valuation_date + dates.months([1, 2, 6])
reference_curve = instruments.RateCurve(
curve_dates,
np.array([0.02, 0.025, 0.015], dtype=dtype),
valuation_date=valuation_date,
dtype=dtype)
market = tff.experimental.instruments.InterestRateMarket(
reference_curve=reference_curve, discount_curve=None)
price = self.evaluate(indexfuture.price(valuation_date, market))
np.testing.assert_allclose(price, [98.64101997, 98.64101997], atol=1e-6)
if __name__ == '__main__':
tf.test.main()
|
libtbx/test_utils/pytest.py | dperl-sol/cctbx_project | 155 | 11065082 | from __future__ import absolute_import, division, print_function
import atexit
import os
import sys
import itertools
import libtbx.load_env
from libtbx.test_utils.parallel import run_command
_first_pytest_collection = True
_pytest_unique_counter = itertools.count(1)
def discover(module=None, pytestargs=None):
'''
pytest compatibility layer
This function discovers pytest tests in a module directory so that they can
be run via libtbx.run_tests_parallel without being named explicitly in the
module's run_tests.py.
To use this function, add
from libtbx.test_utils.pytest import discover
tst_list = tst_list + discover()
to your run_tests.py.
You can pass further arguments to the pytest discovery process as a list in
the pytestargs parameter, and you can specify the cctbx module name if the
automatic discovery does not work for any reason.
The function tests for the presence of pytest and mock, and generates a
helpful warning if either is missing. You can skip the discovery of tests
by setting the environment variable LIBTBX_SKIP_PYTEST.
'''
if 'LIBTBX_SKIP_PYTEST' in os.environ:
'''If LIBTBX_SKIP_PYTEST is set then the user is running libtbx testing but
does not want any pytests to be run.
Alternatively the user is running pytest, and pytest wants to find all
legacy libtbx tests. In this case we also do not attempt to recursively
find pytests.
'''
# Buck stops here in either case
return []
if pytestargs is None:
pytestargs = []
try:
import pytest
except ImportError:
def pytest_warning():
print("=" * 55)
print(" WARNING: Skipping some tests for %s\n" % module)
print(" To run all available tests you need to install pytest.")
print(" libtbx.python -m pip install pytest")
print("=" * 55)
pytest_warning()
atexit.register(pytest_warning)
return []
if not module:
# Try to determine the name of the calling module, and thus the name of the
# cctbx module. Use exception trick to pick up the current frame.
try:
raise Exception()
except Exception:
frame = sys.exc_info()[2].tb_frame.f_back
caller = frame.f_globals['__name__']
if not caller.endswith('.run_tests'):
raise RuntimeError('Only use discover() from within run_tests.py ' \
+ 'or specify the module name manually.')
module = caller[:-10]
class L(list):
"""Subclass list so that it can accept additional attributes."""
print("Discovering pytest tests for %s:" % module)
test_list = []
dist_dir = libtbx.env.dist_path(module)
class TestDiscoveryPlugin:
def pytest_itemcollected(self, item):
global _pytest_unique_counter
testarray = L([ "libtbx.python", "-m", "pytest", '-rsxX', '--basetemp=pytest%st%03d' % (os.path.sep, next(_pytest_unique_counter)),
'"%s"' % (item.fspath + '::' + item.nodeid.split('::', 1)[1]) ])
testclass = module + '.' + item.location[0].replace(os.path.sep, '.')
if testclass.endswith('.py'):
testclass = testclass[:-3]
testarray.test_class = testclass
testarray.test_name = item.name
test_list.append(testarray)
pytest_parameters = ['-qq', '--collect-only']
# Only show pytest warnings during first collection
global _first_pytest_collection
if not _first_pytest_collection:
pytest_parameters.append('--disable-pytest-warnings')
_first_pytest_collection = False
try:
# Now set LIBTBX_SKIP_PYTEST so we can collect all pytests without pytest
# recursively trying to find legacy libtbx tests.
os.environ['LIBTBX_SKIP_PYTEST'] = "1"
pytest.main(pytest_parameters + [ dist_dir ] + pytestargs, plugins=[TestDiscoveryPlugin()])
finally:
del os.environ['LIBTBX_SKIP_PYTEST']
if test_list:
# Ensure the common basetemp directory pytest/ exists
try:
os.mkdir('pytest')
except OSError:
pass
return test_list
def libtbx_collector():
'''
libtbx compatibility layer:
return a function that enables pytest to collect and run all libtbx legacy tests of a module
To use this you need to add
import libtbx.test_utils.pytest
pytest_collect_file = libtbx.test_utils.pytest.libtbx_collector()
to your conftest.py in the module root directory.
'''
import pytest
class LibtbxTestException(Exception):
'''Custom exception for error reporting.'''
def __init__(self, stdout, stderr):
self.stdout = stdout
self.stderr = stderr
class LibtbxTest(pytest.Item):
def __init__(self, name, parent, test_command, test_parameters):
super(LibtbxTest, self).__init__(name, parent)
self.test_cmd = test_command
if test_command.endswith('.py'):
self.test_cmd = 'libtbx.python "%s"' % self.test_cmd
self.test_parms = test_parameters
self.full_cmd = ' '.join([self.test_cmd] + self.test_parms)
if not hasattr(self, 'module'):
self.module = None
if not hasattr(self, '_fixtureinfo'):
self._fixtureinfo = self.session._fixturemanager.getfixtureinfo(self, self.runtest, self)
def runtest(self):
rc = run_command(self.full_cmd)
if rc is None:
# run_command only returns None if CTRL+C pressed
raise KeyboardInterrupt()
self.add_report_section('call', 'stdout', '\n'.join(rc.stdout_lines))
self.add_report_section('call', 'stderr', '\n'.join(rc.stderr_lines))
if rc.stderr_lines or rc.return_code != 0:
raise LibtbxTestException(rc.stdout_lines, rc.stderr_lines)
def repr_failure(self, excinfo):
'''called when self.runtest() raises an exception.'''
if isinstance(excinfo.value, LibtbxTestException):
return "\n".join(excinfo.value.stderr)
def reportinfo(self):
return self.fspath, 0, self.full_cmd
def pytest_collect_file(path, parent):
if 'LIBTBX_SKIP_PYTEST' in os.environ:
'''The pytest discovery process is ran from within libtbx, so do not
attempt to find libtbx legacy tests.'''
return
class LibtbxRunTestsFile(pytest.File):
def collect(self):
try:
os.environ['LIBTBX_SKIP_PYTEST'] = '1'
import importlib
# Guess the module import path from the location of this file
test_module = self.fspath.dirpath().basename
# We must be directly inside the root of a configured module.
# If this module isn't configured, then we don't want to run tests.
if not libtbx.env.has_module(test_module):
return
run_tests_module = test_module + "." + self.fspath.purebasename
run_tests = importlib.import_module(run_tests_module)
finally:
del os.environ['LIBTBX_SKIP_PYTEST']
for test in run_tests.tst_list:
from six import string_types
if isinstance(test, string_types):
testfile = test
testparams = []
testname = 'main'
else:
testfile = test[0]
testparams = [str(s) for s in test[1:]]
testname = "_".join(str(p) for p in testparams)
full_command = testfile.replace("$D", os.path.dirname(run_tests.__file__)). \
replace("$B", libtbx.env.under_build(test_module))
shortpath = testfile.replace("$D/", "").replace("$B/", "build/")
pytest_file_object = pytest.File(shortpath, self.session)
yield LibtbxTest(testname, pytest_file_object, full_command, testparams)
if path.basename == 'run_tests.py':
return LibtbxRunTestsFile(path, parent)
return pytest_collect_file
|
tests/errors/codegen/fortran/randint.py | dina-fouad/pyccel | 206 | 11065087 | # pylint: disable=missing-function-docstring, missing-module-docstring/
from numpy.random import randint
a = randint(10, size = 5)
|
coal_mine/memory_store.py | aberja/coal-mine | 108 | 11065120 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
In-memory store for Coal Mine, primarily for use by tests
"""
from .abstract_store import AbstractStore
from copy import deepcopy
import re
class MemoryStore(AbstractStore):
def __init__(self):
self.canaries = {}
def create(self, canary):
self.canaries[canary['id']] = deepcopy(canary)
def update(self, identifier, updates):
canary = self.canaries[identifier]
for key, value in ((k, v) for k, v in updates.items()):
if value is None:
if key in canary: # pragma: no branch
del canary[key]
else:
canary[key] = value
def get(self, identifier):
return deepcopy(self.canaries[identifier])
def list(self, *, verbose=False, paused=None, late=None, search=None):
iterator = self.canaries.values()
if paused is not None:
iterator = (i for i in iterator if i['paused'] == paused)
if late is not None:
iterator = (i for i in iterator if i['late'] == late)
if search is not None:
regex = re.compile(search)
iterator = (i for i in iterator
if regex.search(i['name']) or
regex.search(i['slug']) or
regex.search(i['id']))
if not verbose:
iterator = ({'id': i['id'], 'name': i['name']} for i in iterator)
return (deepcopy(i) for i in iterator)
def upcoming_deadlines(self):
iterator = self.canaries.values()
iterator = (i for i in iterator if not i['paused'])
iterator = (i for i in iterator if not i['late'])
return (deepcopy(i)
for i in sorted(iterator, key=lambda i: i['deadline']))
def delete(self, identifier):
del self.canaries[identifier]
def find_identifier(self, slug):
matches = (i for i in self.canaries.values() if i['slug'] == slug)
try:
return next(matches)['id']
except StopIteration:
raise KeyError()
|
python-package/xlearn/xlearn.py | DavyMorgan/xlearn | 3,144 | 11065145 | # Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from numpy import ndarray
from .base import Series, DataFrame
import numpy as np
from .base import _LIB, XLearnHandle
from .base import _check_call, c_str
from .data import DMatrix
class XLearn(object):
"""XLearn is the core interface used by python API."""
def __init__(self, handle):
"""Initalizes a new XLearn
Parameters
----------
handle : XLearnHandle
'XLearn' handle of C API.
"""
assert isinstance(handle, XLearnHandle)
self.handle = handle
def __del__(self):
_check_call(_LIB.XLearnHandleFree(ctypes.byref(self.handle)))
def _set_Param(self, param):
"""Set hyper-parameter for xlearn handle
Parameters
----------
param : dict
xlearn hyper-parameters
"""
for (key, value) in param.items():
if key == 'task':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'metric':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'opt':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'log':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'lr':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'k':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'lambda':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'init':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'epoch':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'fold':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'alpha':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'beta':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'lambda_1':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'lambda_2':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'nthread':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'block_size':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'stop_window':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'seed':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
else:
raise Exception("Invalid key!", key)
def show(self):
"""Show model information
"""
_check_call(_LIB.XLearnShow(ctypes.byref(self.handle)))
def setTrain(self, train_path):
"""Set file path of training data.
Parameters
----------
train_path : str
the path of training data
"""
if isinstance(train_path, str):
_check_call(_LIB.XLearnSetTrain(ctypes.byref(self.handle), c_str(train_path)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(True)))
elif isinstance(train_path, DMatrix):
key = "train"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(train_path.handle)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(False)))
else:
raise Exception("Invalid train.Can be test file path or xLearn DMatrix", type(train_path))
def setTest(self, test_path):
"""Set file path of test data.
Parameters
----------
test_path : str
the path of test data.
"""
if isinstance(test_path, str):
_check_call(_LIB.XLearnSetTest(ctypes.byref(self.handle), c_str(test_path)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(True)))
elif isinstance(test_path, DMatrix):
key = "test"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(test_path.handle)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(False)))
else:
raise Exception("Invalid test.Can be test file path or xLearn DMatrix", type(test_path))
def setPreModel(self, pre_model_path):
""" Set file path of pre-trained model.
Parameters
----------
pre_model_path : str
the path of pre-trained model.
"""
_check_call(_LIB.XLearnSetPreModel(ctypes.byref(self.handle), c_str(pre_model_path)))
def setValidate(self, val_path):
"""Set file path of validation data.
Parameters
----------
val_path : str
the path of validation data.
"""
if isinstance(val_path, str):
_check_call(_LIB.XLearnSetValidate(ctypes.byref(self.handle), c_str(val_path)))
elif isinstance(val_path, DMatrix):
key = "validate"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(val_path.handle)))
else:
raise Exception("Invalid validation.Can be test file path or xLearn DMatrix", type(val_path))
def setTXTModel(self, model_path):
"""Set the path of TXT model file.
Parameters
----------
model_path : str
the path of the TXT model file.
"""
_check_call(_LIB.XLearnSetTXTModel(ctypes.byref(self.handle), c_str(model_path)))
def setQuiet(self):
"""Set xlearn to quiet model"""
key = 'quiet'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setOnDisk(self):
"""Set xlearn to use on-disk training"""
key = 'on_disk'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setNoBin(self):
"""Do not generate bin file"""
key = 'bin_out'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableNorm(self):
"""Disable instance-wise normalization"""
key = 'norm'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableLockFree(self):
"""Disable lock free training"""
key = 'lock_free'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableEarlyStop(self):
"""Disable early-stopping"""
key = 'early_stop'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def setSign(self):
"""Convert output to 0 and 1"""
key = 'sign'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setSigmoid(self):
"""Convert output by using sigmoid"""
key = 'sigmoid'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def fit(self, param, model_path):
"""Check hyper-parameters, train model, and dump model.
Parameters
----------
param : dict
hyper-parameter used by xlearn.
model_path : str
path of model checkpoint.
"""
self._set_Param(param)
_check_call(_LIB.XLearnFit(ctypes.byref(self.handle), c_str(model_path)))
def cv(self, param):
""" Do cross-validation
Parameters
----------
param : dict
hyper-parameter used by xlearn
"""
self._set_Param(param)
_check_call(_LIB.XLearnCV(ctypes.byref(self.handle)))
def predict(self, model_path, out_path=None):
"""Predict output
Parameters
----------
model_path : str. path of model checkpoint.
out_path : str, default None. if a path of output result is set, then will save result to local file,
and will not return numpy res.
"""
if out_path is None:
length = ctypes.c_uint64()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XLearnPredictForMat(ctypes.byref(self.handle),
c_str(model_path),
ctypes.byref(length),
ctypes.byref(preds)))
res = np.zeros(length.value, dtype=np.float32)
ctypes.memmove(res.ctypes.data, preds, length.value * res.strides[0])
return res
else:
_check_call(_LIB.XLearnPredictForFile(ctypes.byref(self.handle),
c_str(model_path),
c_str(out_path)))
def create_linear():
"""
Create a linear model.
"""
model_type = 'linear'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def create_fm():
"""
Create a factorization machine.
"""
model_type = 'fm'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def create_ffm():
"""
Create a field-aware factorization machine.
"""
model_type = 'ffm'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def hello():
"""
Say hello to user
"""
_check_call(_LIB.XLearnHello())
|
pyppeteer/connection.py | olivierdalang/pyppeteer | 3,747 | 11065159 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Connection/Session management module."""
import asyncio
import json
import logging
from typing import Awaitable, Callable, Dict, Union, TYPE_CHECKING
from pyee import EventEmitter
import websockets
from pyppeteer.errors import NetworkError
if TYPE_CHECKING:
from typing import Optional # noqa: F401
logger = logging.getLogger(__name__)
logger_connection = logging.getLogger(__name__ + '.Connection')
logger_session = logging.getLogger(__name__ + '.CDPSession')
class Connection(EventEmitter):
"""Connection management class."""
def __init__(self, url: str, loop: asyncio.AbstractEventLoop,
delay: int = 0) -> None:
"""Make connection.
:arg str url: WebSocket url to connect devtool.
:arg int delay: delay to wait before processing received messages.
"""
super().__init__()
self._url = url
self._lastId = 0
self._callbacks: Dict[int, asyncio.Future] = dict()
self._delay = delay / 1000
self._loop = loop
self._sessions: Dict[str, CDPSession] = dict()
self.connection: CDPSession
self._connected = False
self._ws = websockets.client.connect(
self._url, max_size=None, loop=self._loop)
self._recv_fut = self._loop.create_task(self._recv_loop())
self._closeCallback: Optional[Callable[[], None]] = None
@property
def url(self) -> str:
"""Get connected WebSocket url."""
return self._url
async def _recv_loop(self) -> None:
async with self._ws as connection:
self._connected = True
self.connection = connection
while self._connected:
try:
resp = await self.connection.recv()
if resp:
await self._on_message(resp)
except (websockets.ConnectionClosed, ConnectionResetError):
logger.info('connection closed')
break
await asyncio.sleep(0)
if self._connected:
self._loop.create_task(self.dispose())
async def _async_send(self, msg: str, callback_id: int) -> None:
while not self._connected:
await asyncio.sleep(self._delay)
try:
await self.connection.send(msg)
except websockets.ConnectionClosed:
logger.error('connection unexpectedly closed')
callback = self._callbacks.get(callback_id, None)
if callback and not callback.done():
callback.set_result(None)
await self.dispose()
def send(self, method: str, params: dict = None) -> Awaitable:
"""Send message via the connection."""
# Detect connection availability from the second transmission
if self._lastId and not self._connected:
raise ConnectionError('Connection is closed')
if params is None:
params = dict()
self._lastId += 1
_id = self._lastId
msg = json.dumps(dict(
id=_id,
method=method,
params=params,
))
logger_connection.debug(f'SEND: {msg}')
self._loop.create_task(self._async_send(msg, _id))
callback = self._loop.create_future()
self._callbacks[_id] = callback
callback.error: Exception = NetworkError() # type: ignore
callback.method: str = method # type: ignore
return callback
def _on_response(self, msg: dict) -> None:
callback = self._callbacks.pop(msg.get('id', -1))
if msg.get('error'):
callback.set_exception(
_createProtocolError(
callback.error, # type: ignore
callback.method, # type: ignore
msg
)
)
else:
callback.set_result(msg.get('result'))
def _on_query(self, msg: dict) -> None:
params = msg.get('params', {})
method = msg.get('method', '')
sessionId = params.get('sessionId')
if method == 'Target.receivedMessageFromTarget':
session = self._sessions.get(sessionId)
if session:
session._on_message(params.get('message'))
elif method == 'Target.detachedFromTarget':
session = self._sessions.get(sessionId)
if session:
session._on_closed()
del self._sessions[sessionId]
else:
self.emit(method, params)
def setClosedCallback(self, callback: Callable[[], None]) -> None:
"""Set closed callback."""
self._closeCallback = callback
async def _on_message(self, message: str) -> None:
await asyncio.sleep(self._delay)
logger_connection.debug(f'RECV: {message}')
msg = json.loads(message)
if msg.get('id') in self._callbacks:
self._on_response(msg)
else:
self._on_query(msg)
async def _on_close(self) -> None:
if self._closeCallback:
self._closeCallback()
self._closeCallback = None
for cb in self._callbacks.values():
cb.set_exception(_rewriteError(
cb.error, # type: ignore
f'Protocol error {cb.method}: Target closed.', # type: ignore
))
self._callbacks.clear()
for session in self._sessions.values():
session._on_closed()
self._sessions.clear()
# close connection
if hasattr(self, 'connection'): # may not have connection
await self.connection.close()
if not self._recv_fut.done():
self._recv_fut.cancel()
async def dispose(self) -> None:
"""Close all connection."""
self._connected = False
await self._on_close()
async def createSession(self, targetInfo: Dict) -> 'CDPSession':
"""Create new session."""
resp = await self.send(
'Target.attachToTarget',
{'targetId': targetInfo['targetId']}
)
sessionId = resp.get('sessionId')
session = CDPSession(self, targetInfo['type'], sessionId, self._loop)
self._sessions[sessionId] = session
return session
class CDPSession(EventEmitter):
"""Chrome Devtools Protocol Session.
The :class:`CDPSession` instances are used to talk raw Chrome Devtools
Protocol:
* protocol methods can be called with :meth:`send` method.
* protocol events can be subscribed to with :meth:`on` method.
Documentation on DevTools Protocol can be found
`here <https://chromedevtools.github.io/devtools-protocol/>`__.
"""
def __init__(self, connection: Union[Connection, 'CDPSession'],
targetType: str, sessionId: str,
loop: asyncio.AbstractEventLoop) -> None:
"""Make new session."""
super().__init__()
self._lastId = 0
self._callbacks: Dict[int, asyncio.Future] = {}
self._connection: Optional[Connection] = connection
self._targetType = targetType
self._sessionId = sessionId
self._sessions: Dict[str, CDPSession] = dict()
self._loop = loop
def send(self, method: str, params: dict = None) -> Awaitable:
"""Send message to the connected session.
:arg str method: Protocol method name.
:arg dict params: Optional method parameters.
"""
if not self._connection:
raise NetworkError(
f'Protocol Error ({method}): Session closed. Most likely the '
f'{self._targetType} has been closed.'
)
self._lastId += 1
_id = self._lastId
msg = json.dumps(dict(id=_id, method=method, params=params))
logger_session.debug(f'SEND: {msg}')
callback = self._loop.create_future()
self._callbacks[_id] = callback
callback.error: Exception = NetworkError() # type: ignore
callback.method: str = method # type: ignore
try:
self._connection.send('Target.sendMessageToTarget', {
'sessionId': self._sessionId,
'message': msg,
})
except Exception as e:
# The response from target might have been already dispatched
if _id in self._callbacks:
del self._callbacks[_id]
_callback = self._callbacks[_id]
_callback.set_exception(_rewriteError(
_callback.error, # type: ignore
e.args[0],
))
return callback
def _on_message(self, msg: str) -> None: # noqa: C901
logger_session.debug(f'RECV: {msg}')
obj = json.loads(msg)
_id = obj.get('id')
if _id:
callback = self._callbacks.get(_id)
if callback:
del self._callbacks[_id]
if obj.get('error'):
callback.set_exception(_createProtocolError(
callback.error, # type: ignore
callback.method, # type: ignore
obj,
))
else:
result = obj.get('result')
if callback and not callback.done():
callback.set_result(result)
else:
params = obj.get('params', {})
if obj.get('method') == 'Target.receivedMessageFromTarget':
session = self._sessions.get(params.get('sessionId'))
if session:
session._on_message(params.get('message'))
elif obj.get('method') == 'Target.detachFromTarget':
sessionId = params.get('sessionId')
session = self._sessions.get(sessionId)
if session:
session._on_closed()
del self._sessions[sessionId]
self.emit(obj.get('method'), obj.get('params'))
async def detach(self) -> None:
"""Detach session from target.
Once detached, session won't emit any events and can't be used to send
messages.
"""
if not self._connection:
raise NetworkError('Connection already closed.')
await self._connection.send('Target.detachFromTarget',
{'sessionId': self._sessionId})
def _on_closed(self) -> None:
for cb in self._callbacks.values():
cb.set_exception(_rewriteError(
cb.error, # type: ignore
f'Protocol error {cb.method}: Target closed.', # type: ignore
))
self._callbacks.clear()
self._connection = None
def _createSession(self, targetType: str, sessionId: str) -> 'CDPSession':
session = CDPSession(self, targetType, sessionId, self._loop)
self._sessions[sessionId] = session
return session
def _createProtocolError(error: Exception, method: str, obj: Dict
) -> Exception:
message = f'Protocol error ({method}): {obj["error"]["message"]}'
if 'data' in obj['error']:
message += f' {obj["error"]["data"]}'
return _rewriteError(error, message)
def _rewriteError(error: Exception, message: str) -> Exception:
error.args = (message, )
return error
|
PhysicsTools/TagAndProbe/test/testTagProbeFitTreeAnalyzer_Toy.py | ckamtsikis/cmssw | 852 | 11065187 | <filename>PhysicsTools/TagAndProbe/test/testTagProbeFitTreeAnalyzer_Toy.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("TagProbe")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.TagProbeFitTreeAnalyzer = cms.EDAnalyzer("TagProbeFitTreeAnalyzer",
InputFileNames = cms.vstring("testTagProbeFitTreeProducer_Toy.root"),
InputDirectoryName = cms.string("Test"),
InputTreeName = cms.string("fitter_tree"),
OutputFileName = cms.string("testTagProbeFitTreeAnalyzer_Toy.root"),
NumCPU = cms.uint32(1),
SaveWorkspace = cms.bool(True),
Variables = cms.PSet(
mass = cms.vstring("Tag-Probe Mass", "2.6", "3.6", "GeV/c^{2}"),
pt = cms.vstring("Probe p_{T}", "0", "100", "GeV/c"),
eta = cms.vstring("Probe #eta", "-2.4", "2.4", ""),
),
Categories = cms.PSet(
mcTrue_idx = cms.vstring("MC true", "dummy[true=1,false=0]"),
passing_idx = cms.vstring("isPassing", "dummy[true=1,false=0]"),
),
PDFs = cms.PSet(
gaussPlusLinear = cms.vstring(
"Gaussian::signal(mass, mean[3.1,3.0,3.2], sigma[0.03,0.01,0.05])",
"Chebychev::backgroundPass(mass, cPass[0,-1,1])",
"Chebychev::backgroundFail(mass, cFail[0,-1,1])",
"efficiency[0.9,0,1]",
"signalFractionInPassing[0.9]"
),
),
Efficiencies = cms.PSet(
pt_eta = cms.PSet(
EfficiencyCategoryAndState = cms.vstring("passing_idx","true"),
UnbinnedVariables = cms.vstring("mass"),
BinnedVariables = cms.PSet(
pt = cms.vdouble(2.0, 4.0, 6.0, 8.0, 10.0),
eta = cms.vdouble(-2.4, -1.6, -0.8, 0.0, 0.8, 1.6, 2.4),
),
BinToPDFmap = cms.vstring("gaussPlusLinear"),
),
pt_eta_mcTrue = cms.PSet(
EfficiencyCategoryAndState = cms.vstring("passing_idx","true"),
UnbinnedVariables = cms.vstring("mass"),
BinnedVariables = cms.PSet(
mcTrue_idx = cms.vstring("true"),
pt = cms.vdouble(2.0, 4.0, 6.0, 8.0, 10.0),
eta = cms.vdouble(-2.4, -1.2, 0.0, 1.2, 2.4),
),
),
),
)
process.fitness = cms.Path(
process.TagProbeFitTreeAnalyzer
)
|
pipeline_plugins/components/collections/remote_plugin/v1_0_0.py | qqqqqie/bk-sops | 881 | 11065193 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME> Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from pipeline.component_framework.component import Component
from pipeline.core.flow import Service, StaticIntervalGenerator
from pipeline.core.flow.io import StringItemSchema
from plugin_service.conf import PLUGIN_LOGGER
from plugin_service.exceptions import PluginServiceException
from plugin_service.plugin_client import PluginServiceApiClient
logger = logging.getLogger(PLUGIN_LOGGER)
class State:
EMPTY = 1
POLL = 2
CALLBACK = 3
SUCCESS = 4
FAIL = 5
class RemotePluginService(Service):
interval = StaticIntervalGenerator(5)
def outputs_format(self):
return [
self.OutputItem(
name="Trace ID", key="trace_id", type="string", schema=StringItemSchema(description="Trace ID")
),
]
def execute(self, data, parent_data):
plugin_code = data.get_one_of_inputs("plugin_code")
plugin_version = data.get_one_of_inputs("plugin_version")
try:
plugin_client = PluginServiceApiClient(plugin_code)
except PluginServiceException as e:
message = f"[remote plugin service client] error: {e}"
logger.error(message)
data.set_outputs("ex_data", message)
return False
detail_result = plugin_client.get_detail(plugin_version)
if not detail_result["result"]:
message = f"[remote plugin service detail] error: {detail_result['message']}"
logger.error(message)
data.set_outputs("ex_data", message)
return False
plugin_context = dict(
[
(key, parent_data.inputs[key])
for key in detail_result["data"]["context_inputs"]["properties"].keys()
if key in parent_data.inputs
]
)
ok, result_data = plugin_client.invoke(plugin_version, {"inputs": data.inputs, "context": plugin_context})
if not ok:
message = (
f"[remote plugin service invoke] error: {result_data['message']}, "
f"trace_id: {result_data.get('trace_id')}"
)
logger.error(message)
data.set_outputs("ex_data", message)
return False
state = result_data["state"]
data.set_outputs("trace_id", result_data["trace_id"])
if state == State.FAIL:
data.set_outputs("ex_data", result_data["err"])
return False
if state == State.POLL:
setattr(self, "__need_schedule__", True)
if state in [State.SUCCESS, State.POLL]:
for key, output in result_data["outputs"].items():
data.set_outputs(key, output)
return True
def schedule(self, data, parent_data, callback_data=None):
plugin_code = data.get_one_of_inputs("plugin_code")
trace_id = data.get_one_of_outputs("trace_id")
try:
plugin_client = PluginServiceApiClient(plugin_code)
except PluginServiceException as e:
message = f"[remote plugin service client] error: {e}"
logger.error(message)
data.set_outputs("ex_data", message)
return False
ok, result_data = plugin_client.get_schedule(trace_id)
if not ok:
message = (
f"remote plugin service schedule error: {result_data['message']}, "
f"trace_id: {result_data.get('trace_id') or trace_id}"
)
logger.error(message)
data.set_outputs("ex_data", message)
return False
state = result_data["state"]
if state == State.FAIL:
default_message = "please check the logs for the reason of task failure."
logger.error(f"[remote plugin service state failed]: {result_data}")
data.set_outputs("ex_data", result_data["outputs"].get("ex_data") or default_message)
return False
if state == State.POLL:
setattr(self, "__need_schedule__", True)
if state in [State.SUCCESS, State.POLL]:
for key, output in result_data["outputs"].items():
data.set_outputs(key, output)
if state == State.SUCCESS:
self.finish_schedule()
return True
class RemotePluginComponent(Component):
code = "remote_plugin"
name = "RemotePlugin"
bound_service = RemotePluginService
version = "1.0.0"
|
tests/utils.py | corylevine/okta-sdk-python | 145 | 11065203 | import string
import random
"""
File for utility functions used in testing
"""
def random_string_of_length(length):
"""
Generates a random string of ASCII letters of a fixed length.
Args:
length (int): fixed length desired
Returns:
string: random string generated
"""
return "".join(random.choices(string.ascii_letters, k=length))
|
dataloader/mr.py | mvemoon/TextClassificationBenchmark | 576 | 11065206 | <filename>dataloader/mr.py
# -*- coding: utf-8 -*-
from .Dataset import Dataset
import os
import pandas as pd
import numpy as np
from codecs import open
class MRDataset(Dataset):
def __init__(self,opt=None,**kwargs):
super(MRDataset,self).__init__(opt,**kwargs)
self.urls=['https://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz']
def process(self):
root=self.download()
root = os.path.join(root,"rt-polaritydata")
print("processing into: "+ root)
## root = "D:\code\git\TextClassificationBenchmark\.data_waby\\imdb\\aclImdb"
if not os.path.exists(self.saved_path):
print("mkdir " + self.saved_path)
os.makedirs(self.saved_path) # better than os.mkdir
#
datas=[]
for polarity in ("neg","pos"):
filename = os.path.join(root,"rt-polarity."+polarity)
records=[]
with open(filename,encoding="utf-8",errors="replace") as f:
for i,line in enumerate(f):
print(i)
print(line)
records.append({"text":line.strip(),"label": 1 if polarity == "pos" else 0})
datas.append(pd.DataFrame(records))
df = pd.concat(datas)
from sklearn.utils import shuffle
df = shuffle(df).reset_index()
split_index = [True] * int (len(df) *0.8) + [False] *(len(df)-int (len(df) *0.8))
# train=df.sample(frac=0.8)
train = df[split_index]
test = df[~np.array(split_index)]
train_filename=os.path.join(self.saved_path,"train.csv")
test_filename = os.path.join(self.saved_path,"test.csv")
train[["text","label"]].to_csv(train_filename,encoding="utf-8",sep="\t",index=False,header=None)
test[["text","label"]].to_csv(test_filename,encoding="utf-8",sep="\t",index=False,header=None)
#
# for data_folder in ("train","test"):
# data = []
# for polarity in ("pos","neg"):
# diranme=os.path.join( os.path.join(root,data_folder), polarity)
# for rt, dirs, files in os.walk(diranme):
# for f in files:
# filename= os.path.join(rt,f)
# data.append( {"text": open(filename,encoding="utf-8").read().strip(),"label":int(polarity=="pos")})
# df=pd.DataFrame(data)
# saved_filename=os.path.join(self.saved_path,data_folder+".csv")
#
# df[["text","label"]].to_csv(saved_filename,index=False,header=None,sep="\t",encoding="utf-8")
# print("finished %s"%saved_filename)
print("processing into formated files over")
return [train_filename,test_filename]
if __name__=="__main__":
import opts
opt = opts.parse_opt()
opt.dataset="mr"
import dataloader
dataset= dataloader.getDataset(opt)
dataset.process()
|
api/python_semrush/errors.py | jmelm93/MLTS | 117 | 11065219 | <gh_stars>100-1000
from __future__ import absolute_import, print_function, unicode_literals
class BaseSemrushError(Exception):
pass
class SemRushKeyError(BaseSemrushError):
pass
class SemRushRegionalDatabaseError(BaseSemrushError):
pass |
applications/SwimmingDEMApplication/tests/ValidationTests.py | lkusch/Kratos | 778 | 11065228 | # Definition of the classes for the VALIDATION TESTS
#Iimport Kratos
import KratosMultiphysics
import KratosMultiphysics.DEMApplication
import KratosMultiphysics.SwimmingDEMApplication
# Import TestFactory
import SPFEMTestFactory as SPFEMTF
# Import KratosUnittest
import KratosMultiphysics.KratosUnittest as KratosUnittest
class sdem_pfem_coupling_one_way_test(SPFEMTF.TestFactory):
file_name = "PFEM-DEM_tests/sdem_pfem_coupling_one_way_test"
file_parameters = "PFEM-DEM_tests/ProjectParameters.json"
def SetTestSuite(suites):
validation_suite = suites['validation']
validation_suite.addTests(
KratosUnittest.TestLoader().loadTestsFromTestCases([
fluid_dem_coupling_one_way_test,
sdem_pfem_coupling_one_way_test
])
)
return validation_suite
def AssembleTestSuites():
suites = KratosUnittest.KratosSuites
night_suite = SetTestSuite(suites)
suites['all'].addTests(night_suite)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)
KratosUnittest.runTests(AssembleTestSuites())
|
Custom/events/Zabbix/API/requests/packages/chardet/universaldetector.py | aplishka-az/Deadline | 113 | 11065236 | <reponame>aplishka-az/Deadline
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> - port to Python
# <NAME> - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
scripts/west_commands/build.py | Trifunik/zephyr | 6,224 | 11065237 | # Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
import pathlib
import shlex
import sys
import yaml
from west import log
from west.configuration import config
from zcmake import DEFAULT_CMAKE_GENERATOR, run_cmake, run_build, CMakeCache
from build_helpers import is_zephyr_build, find_build_dir, \
FIND_BUILD_DIR_DESCRIPTION
from zephyr_ext_common import Forceable
_ARG_SEPARATOR = '--'
BUILD_USAGE = '''\
west build [-h] [-b BOARD] [-d BUILD_DIR]
[-t TARGET] [-p {auto, always, never}] [-c] [--cmake-only]
[-n] [-o BUILD_OPT] [-f]
[source_dir] -- [cmake_opt [cmake_opt ...]]
'''
BUILD_DESCRIPTION = f'''\
Convenience wrapper for building Zephyr applications.
{FIND_BUILD_DIR_DESCRIPTION}
positional arguments:
source_dir application source directory
cmake_opt extra options to pass to cmake; implies -c
(these must come after "--" as shown above)
'''
PRISTINE_DESCRIPTION = """\
A "pristine" build directory is empty. The -p option controls
whether the build directory is made pristine before the build
is done. A bare '--pristine' with no value is the same as
--pristine=always. Setting --pristine=auto uses heuristics to
guess if a pristine build may be necessary."""
def _banner(msg):
log.inf('-- west build: ' + msg, colorize=True)
def config_get(option, fallback):
return config.get('build', option, fallback=fallback)
def config_getboolean(option, fallback):
return config.getboolean('build', option, fallback=fallback)
class AlwaysIfMissing(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values or 'always')
class Build(Forceable):
def __init__(self):
super(Build, self).__init__(
'build',
# Keep this in sync with the string in west-commands.yml.
'compile a Zephyr application',
BUILD_DESCRIPTION,
accepts_unknown_args=True)
self.source_dir = None
'''Source directory for the build, or None on error.'''
self.build_dir = None
'''Final build directory used to run the build, or None on error.'''
self.created_build_dir = False
'''True if the build directory was created; False otherwise.'''
self.run_cmake = False
'''True if CMake was run; False otherwise.
Note: this only describes CMake runs done by this command. The
build system generated by CMake may also update itself due to
internal logic.'''
self.cmake_cache = None
'''Final parsed CMake cache for the build, or None on error.'''
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(
self.name,
help=self.help,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.description,
usage=BUILD_USAGE)
# Remember to update west-completion.bash if you add or remove
# flags
parser.add_argument('-b', '--board', help='board to build for')
# Hidden option for backwards compatibility
parser.add_argument('-s', '--source-dir', help=argparse.SUPPRESS)
parser.add_argument('-d', '--build-dir',
help='build directory to create or use')
self.add_force_arg(parser)
group = parser.add_argument_group('cmake and build tool')
group.add_argument('-c', '--cmake', action='store_true',
help='force a cmake run')
group.add_argument('--cmake-only', action='store_true',
help="just run cmake; don't build (implies -c)")
group.add_argument('-t', '--target',
help='''run build system target TARGET
(try "-t usage")''')
group.add_argument('-T', '--test-item',
help='''Build based on test data in testcase.yaml
or sample.yaml''')
group.add_argument('-o', '--build-opt', default=[], action='append',
help='''options to pass to the build tool
(make or ninja); may be given more than once''')
group.add_argument('-n', '--just-print', '--dry-run', '--recon',
dest='dry_run', action='store_true',
help="just print build commands; don't run them")
group = parser.add_argument_group('pristine builds',
PRISTINE_DESCRIPTION)
group.add_argument('-p', '--pristine', choices=['auto', 'always',
'never'], action=AlwaysIfMissing, nargs='?',
help='pristine build folder setting')
return parser
def do_run(self, args, remainder):
self.args = args # Avoid having to pass them around
self.config_board = config_get('board', None)
log.dbg('args: {} remainder: {}'.format(args, remainder),
level=log.VERBOSE_EXTREME)
# Store legacy -s option locally
source_dir = self.args.source_dir
self._parse_remainder(remainder)
# Parse testcase.yaml or sample.yaml files for additional options.
if self.args.test_item:
self._parse_test_item()
if source_dir:
if self.args.source_dir:
log.die("source directory specified twice:({} and {})".format(
source_dir, self.args.source_dir))
self.args.source_dir = source_dir
log.dbg('source_dir: {} cmake_opts: {}'.format(self.args.source_dir,
self.args.cmake_opts),
level=log.VERBOSE_EXTREME)
self._sanity_precheck()
self._setup_build_dir()
if args.pristine is not None:
pristine = args.pristine
else:
# Load the pristine={auto, always, never} configuration value
pristine = config_get('pristine', 'never')
if pristine not in ['auto', 'always', 'never']:
log.wrn(
'treating unknown build.pristine value "{}" as "never"'.
format(pristine))
pristine = 'never'
self.auto_pristine = (pristine == 'auto')
log.dbg('pristine: {} auto_pristine: {}'.format(pristine,
self.auto_pristine),
level=log.VERBOSE_VERY)
if is_zephyr_build(self.build_dir):
if pristine == 'always':
self._run_pristine()
self.run_cmake = True
else:
self._update_cache()
if (self.args.cmake or self.args.cmake_opts or
self.args.cmake_only):
self.run_cmake = True
else:
self.run_cmake = True
self.source_dir = self._find_source_dir()
self._sanity_check()
board, origin = self._find_board()
self._run_cmake(board, origin, self.args.cmake_opts)
if args.cmake_only:
return
self._sanity_check()
self._update_cache()
self._run_build(args.target)
def _find_board(self):
board, origin = None, None
if self.cmake_cache:
board, origin = (self.cmake_cache.get('CACHED_BOARD'),
'CMakeCache.txt')
# A malformed CMake cache may exist, but not have a board.
# This happens if there's a build error from a previous run.
if board is not None:
return (board, origin)
if self.args.board:
board, origin = self.args.board, 'command line'
elif 'BOARD' in os.environ:
board, origin = os.environ['BOARD'], 'env'
elif self.config_board is not None:
board, origin = self.config_board, 'configfile'
return board, origin
def _parse_remainder(self, remainder):
self.args.source_dir = None
self.args.cmake_opts = None
try:
# Only one source_dir is allowed, as the first positional arg
if remainder[0] != _ARG_SEPARATOR:
self.args.source_dir = remainder[0]
remainder = remainder[1:]
# Only the first argument separator is consumed, the rest are
# passed on to CMake
if remainder[0] == _ARG_SEPARATOR:
remainder = remainder[1:]
if remainder:
self.args.cmake_opts = remainder
except IndexError:
return
def _parse_test_item(self):
for yp in ['sample.yaml', 'testcase.yaml']:
yf = os.path.join(self.args.source_dir, yp)
if not os.path.exists(yf):
continue
with open(yf, 'r') as stream:
try:
y = yaml.safe_load(stream)
except yaml.YAMLError as exc:
log.die(exc)
tests = y.get('tests')
if not tests:
continue
item = tests.get(self.args.test_item)
if not item:
continue
for data in ['extra_args', 'extra_configs']:
extra = item.get(data)
if not extra:
continue
if isinstance(extra, str):
arg_list = extra.split(" ")
else:
arg_list = extra
args = ["-D{}".format(arg.replace('"', '')) for arg in arg_list]
if self.args.cmake_opts:
self.args.cmake_opts.extend(args)
else:
self.args.cmake_opts = args
def _sanity_precheck(self):
app = self.args.source_dir
if app:
self.check_force(
os.path.isdir(app),
'source directory {} does not exist'.format(app))
self.check_force(
'CMakeLists.txt' in os.listdir(app),
"{} doesn't contain a CMakeLists.txt".format(app))
def _update_cache(self):
try:
self.cmake_cache = CMakeCache.from_build_dir(self.build_dir)
except FileNotFoundError:
pass
def _setup_build_dir(self):
# Initialize build_dir and created_build_dir attributes.
# If we created the build directory, we must run CMake.
log.dbg('setting up build directory', level=log.VERBOSE_EXTREME)
# The CMake Cache has not been loaded yet, so this is safe
board, _ = self._find_board()
source_dir = self._find_source_dir()
app = os.path.split(source_dir)[1]
build_dir = find_build_dir(self.args.build_dir, board=board,
source_dir=source_dir, app=app)
if not build_dir:
log.die('Unable to determine a default build folder. Check '
'your build.dir-fmt configuration option')
if os.path.exists(build_dir):
if not os.path.isdir(build_dir):
log.die('build directory {} exists and is not a directory'.
format(build_dir))
else:
os.makedirs(build_dir, exist_ok=False)
self.created_build_dir = True
self.run_cmake = True
self.build_dir = build_dir
def _find_source_dir(self):
# Initialize source_dir attribute, either from command line argument,
# implicitly from the build directory's CMake cache, or using the
# default (current working directory).
log.dbg('setting up source directory', level=log.VERBOSE_EXTREME)
if self.args.source_dir:
source_dir = self.args.source_dir
elif self.cmake_cache:
source_dir = self.cmake_cache.get('CMAKE_HOME_DIRECTORY')
if not source_dir:
# This really ought to be there. The build directory
# must be corrupted somehow. Let's see what we can do.
log.die('build directory', self.build_dir,
'CMake cache has no CMAKE_HOME_DIRECTORY;',
'please give a source_dir')
else:
source_dir = os.getcwd()
return os.path.abspath(source_dir)
def _sanity_check_source_dir(self):
if self.source_dir == self.build_dir:
# There's no forcing this.
log.die('source and build directory {} cannot be the same; '
'use --build-dir {} to specify a build directory'.
format(self.source_dir, self.build_dir))
srcrel = os.path.relpath(self.source_dir)
self.check_force(
not is_zephyr_build(self.source_dir),
'it looks like {srcrel} is a build directory: '
'did you mean --build-dir {srcrel} instead?'.
format(srcrel=srcrel))
self.check_force(
'CMakeLists.txt' in os.listdir(self.source_dir),
'source directory "{srcrel}" does not contain '
'a CMakeLists.txt; is this really what you '
'want to build? (Use -s SOURCE_DIR to specify '
'the application source directory)'.
format(srcrel=srcrel))
def _sanity_check(self):
# Sanity check the build configuration.
# Side effect: may update cmake_cache attribute.
log.dbg('sanity checking the build', level=log.VERBOSE_EXTREME)
self._sanity_check_source_dir()
if not self.cmake_cache:
return # That's all we can check without a cache.
if "CMAKE_PROJECT_NAME" not in self.cmake_cache:
# This happens sometimes when a build system is not
# completely generated due to an error during the
# CMake configuration phase.
self.run_cmake = True
cached_app = self.cmake_cache.get('APPLICATION_SOURCE_DIR')
log.dbg('APPLICATION_SOURCE_DIR:', cached_app,
level=log.VERBOSE_EXTREME)
source_abs = (os.path.abspath(self.args.source_dir)
if self.args.source_dir else None)
cached_abs = os.path.abspath(cached_app) if cached_app else None
log.dbg('pristine:', self.auto_pristine, level=log.VERBOSE_EXTREME)
# If the build directory specifies a source app, make sure it's
# consistent with --source-dir.
apps_mismatched = (source_abs and cached_abs and
pathlib.Path(source_abs).resolve() != pathlib.Path(cached_abs).resolve())
self.check_force(
not apps_mismatched or self.auto_pristine,
'Build directory "{}" is for application "{}", but source '
'directory "{}" was specified; please clean it, use --pristine, '
'or use --build-dir to set another build directory'.
format(self.build_dir, cached_abs, source_abs))
if apps_mismatched:
self.run_cmake = True # If they insist, we need to re-run cmake.
# If CACHED_BOARD is not defined, we need some other way to
# find the board.
cached_board = self.cmake_cache.get('CACHED_BOARD')
log.dbg('CACHED_BOARD:', cached_board, level=log.VERBOSE_EXTREME)
# If apps_mismatched and self.auto_pristine are true, we will
# run pristine on the build, invalidating the cached
# board. In that case, we need some way of getting the board.
self.check_force((cached_board and
not (apps_mismatched and self.auto_pristine))
or self.args.board or self.config_board or
os.environ.get('BOARD'),
'Cached board not defined, please provide it '
'(provide --board, set default with '
'"west config build.board <BOARD>", or set '
'BOARD in the environment)')
# Check consistency between cached board and --board.
boards_mismatched = (self.args.board and cached_board and
self.args.board != cached_board)
self.check_force(
not boards_mismatched or self.auto_pristine,
'Build directory {} targets board {}, but board {} was specified. '
'(Clean the directory, use --pristine, or use --build-dir to '
'specify a different one.)'.
format(self.build_dir, cached_board, self.args.board))
if self.auto_pristine and (apps_mismatched or boards_mismatched):
self._run_pristine()
self.cmake_cache = None
log.dbg('run_cmake:', True, level=log.VERBOSE_EXTREME)
self.run_cmake = True
# Tricky corner-case: The user has not specified a build folder but
# there was one in the CMake cache. Since this is going to be
# invalidated, reset to CWD and re-run the basic tests.
if ((boards_mismatched and not apps_mismatched) and
(not source_abs and cached_abs)):
self.source_dir = self._find_source_dir()
self._sanity_check_source_dir()
def _run_cmake(self, board, origin, cmake_opts):
if board is None and config_getboolean('board_warn', True):
log.wrn('This looks like a fresh build and BOARD is unknown;',
"so it probably won't work. To fix, use",
'--board=<your-board>.')
log.inf('Note: to silence the above message, run',
"'west config build.board_warn false'")
if not self.run_cmake:
return
_banner('generating a build system')
if board is not None and origin != 'CMakeCache.txt':
cmake_opts = ['-DBOARD={}'.format(board)]
else:
cmake_opts = []
if self.args.cmake_opts:
cmake_opts.extend(self.args.cmake_opts)
user_args = config_get('cmake-args', None)
if user_args:
cmake_opts.extend(shlex.split(user_args))
# Invoke CMake from the current working directory using the
# -S and -B options (officially introduced in CMake 3.13.0).
# This is important because users expect invocations like this
# to Just Work:
#
# west build -- -DOVERLAY_CONFIG=relative-path.conf
final_cmake_args = ['-DWEST_PYTHON={}'.format(sys.executable),
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-G{}'.format(config_get('generator',
DEFAULT_CMAKE_GENERATOR))]
if cmake_opts:
final_cmake_args.extend(cmake_opts)
run_cmake(final_cmake_args, dry_run=self.args.dry_run)
def _run_pristine(self):
_banner('making build dir {} pristine'.format(self.build_dir))
if not is_zephyr_build(self.build_dir):
log.die('Refusing to run pristine on a folder that is not a '
'Zephyr build system')
cache = CMakeCache.from_build_dir(self.build_dir)
app_src_dir = cache.get('APPLICATION_SOURCE_DIR')
app_bin_dir = cache.get('APPLICATION_BINARY_DIR')
cmake_args = [f'-DBINARY_DIR={app_bin_dir}',
f'-DSOURCE_DIR={app_src_dir}',
'-P', cache['ZEPHYR_BASE'] + '/cmake/pristine.cmake']
run_cmake(cmake_args, cwd=self.build_dir, dry_run=self.args.dry_run)
def _run_build(self, target):
if target:
_banner('running target {}'.format(target))
elif self.run_cmake:
_banner('building application')
extra_args = ['--target', target] if target else []
if self.args.build_opt:
extra_args.append('--')
extra_args.extend(self.args.build_opt)
if self.args.verbose:
self._append_verbose_args(extra_args,
not bool(self.args.build_opt))
run_build(self.build_dir, extra_args=extra_args,
dry_run=self.args.dry_run)
def _append_verbose_args(self, extra_args, add_dashes):
# These hacks are only needed for CMake versions earlier than
# 3.14. When Zephyr's minimum version is at least that, we can
# drop this nonsense and just run "cmake --build BUILD -v".
self._update_cache()
if not self.cmake_cache:
return
generator = self.cmake_cache.get('CMAKE_GENERATOR')
if not generator:
return
# Substring matching is for things like "Eclipse CDT4 - Ninja".
if 'Ninja' in generator:
if add_dashes:
extra_args.append('--')
extra_args.append('-v')
elif generator == 'Unix Makefiles':
if add_dashes:
extra_args.append('--')
extra_args.append('VERBOSE=1')
|
opendr/__init__.py | foamliu/opendr | 363 | 11065254 | <reponame>foamliu/opendr<filename>opendr/__init__.py
from .version import version as __version__
def test():
from os.path import split
import unittest
test_loader = unittest.TestLoader()
test_loader = test_loader.discover(split(__file__)[0])
test_runner = unittest.TextTestRunner()
test_runner.run( test_loader )
demos = {}
demos['texture'] = """
# Create renderer
import chumpy as ch
from opendr.renderer import TexturedRenderer
rn = TexturedRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, vc=m.vc, texture_image=m.texture_image, ft=m.ft, vt=m.vt)
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['moments'] = """
from opendr.util_tests import get_earthmesh
from opendr.simple import *
import numpy as np
w, h = 320, 240
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
# Create V, A, U, f: geometry, brightness, camera, renderer
V = ch.array(m.v)
A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
components=[3.,1.,0.,0.,0.,0.,0.,0.,0.],
light_color=ch.ones(3))
U = ProjectPoints(v=V, f=[300,300.], c=[w/2.,h/2.], k=ch.zeros(5),
t=ch.zeros(3), rt=ch.zeros(3))
rn = TexturedRenderer(vc=A, camera=U, f=m.f, bgcolor=[0.,0.,0.],
texture_image=m.texture_image, vt=m.vt, ft=m.ft,
frustum={'width':w, 'height':h, 'near':1,'far':20})
i, j = ch.array([2.]), ch.array([1.])
xs, ys = ch.meshgrid(range(rn.shape[1]), range(rn.shape[0]))
ysp = ys ** j
xsp = xs ** i
rn_bw = ch.sum(rn, axis=2)
moment = ch.sum((rn_bw * ysp * xsp).ravel())
# Print our numerical result
print moment
# Note that opencv produces the same result for 'm21',
# and that other moments can be created by changing "i" and "j" above
import cv2
print cv2.moments(rn_bw.r)['m21']
# Derivatives wrt vertices and lighting
print moment.dr_wrt(V)
print moment.dr_wrt(A.components)
"""
demos['per_face_normals'] = """
# Create renderer
import chumpy as ch
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
rn = ColoredRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
# THESE ARE THE 3 CRITICAL LINES
m.v = m.v[m.f.ravel()]
m.vc = m.vc[m.f.ravel()]
m.f = np.arange(m.f.size).reshape((-1,3))
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))
# Construct point light source
rn.vc = LambertianPointLight(
f=m.f,
v=rn.v,
num_verts=len(m.v),
light_pos=ch.array([-1000,-1000,-1000]),
vc=m.vc,
light_color=ch.array([1., 1., 1.]))
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['silhouette'] = """
# Create renderer
import chumpy as ch
from opendr.renderer import ColoredRenderer
rn = ColoredRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, vc=m.vc*0+1, bgcolor=ch.zeros(3))
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['boundary'] = """
# Create renderer
import chumpy as ch
from opendr.renderer import BoundaryRenderer
rn = BoundaryRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, vc=m.vc*0+1, bgcolor=ch.zeros(3), num_channels=3)
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['point_light'] = """
# Create renderer
import chumpy as ch
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
rn = ColoredRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))
# Construct point light source
rn.vc = LambertianPointLight(
f=m.f,
v=rn.v,
num_verts=len(m.v),
light_pos=ch.array([-1000,-1000,-1000]),
vc=m.vc,
light_color=ch.array([1., 1., 1.]))
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['spherical_harmonics'] = """
# Create renderer
import chumpy as ch
from opendr.renderer import ColoredRenderer
from opendr.lighting import SphericalHarmonics
from opendr.geometry import VertNormals
rn = ColoredRenderer()
# Assign attributes to renderer
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
w, h = (320, 240)
from opendr.camera import ProjectPoints
rn.camera = ProjectPoints(v=m.v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m.v, f=m.f, bgcolor=ch.zeros(3))
vn = VertNormals(v=rn.v, f=rn.f)
sh_red = SphericalHarmonics(vn=vn, light_color=ch.array([1,0,0]), components=ch.random.randn(9))
sh_green = SphericalHarmonics(vn=vn, light_color=ch.array([0,1,0]), components=ch.random.randn(9))
sh_blue = SphericalHarmonics(vn=vn, light_color=ch.array([0,0,1]), components=ch.random.randn(9))
rn.vc = sh_red + sh_green + sh_blue
# Show it
import matplotlib.pyplot as plt
plt.ion()
plt.imshow(rn.r)
plt.show()
dr = rn.dr_wrt(rn.v) # or rn.vc, or rn.camera.rt, rn.camera.t, rn.camera.f, rn.camera.c, etc
"""
demos['optimization'] = """
from opendr.simple import *
import numpy as np
import matplotlib.pyplot as plt
w, h = 320, 240
try:
m = load_mesh('earth.obj')
except:
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,0]), rotation=ch.zeros(3))
# Create V, A, U, f: geometry, brightness, camera, renderer
V = ch.array(m.v)
A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
components=[3.,2.,0.,0.,0.,0.,0.,0.,0.],
light_color=ch.ones(3))
U = ProjectPoints(v=V, f=[w,w], c=[w/2.,h/2.], k=ch.zeros(5),
t=ch.zeros(3), rt=ch.zeros(3))
f = TexturedRenderer(vc=A, camera=U, f=m.f, bgcolor=[0.,0.,0.],
texture_image=m.texture_image, vt=m.vt, ft=m.ft,
frustum={'width':w, 'height':h, 'near':1,'far':20})
# Parameterize the vertices
translation, rotation = ch.array([0,0,8]), ch.zeros(3)
f.v = translation + V.dot(Rodrigues(rotation))
observed = f.r
np.random.seed(1)
translation[:] = translation.r + np.random.rand(3)
rotation[:] = rotation.r + np.random.rand(3) *.2
A.components[1:] = 0
# Create the energy
E_raw = f - observed
E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')
def cb(_):
import cv2
global E_raw
cv2.imshow('Absolute difference', np.abs(E_raw.r))
cv2.waitKey(1)
print 'OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS'
free_variables=[translation, rotation, A.components]
ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=cb)
ch.minimize({'raw': E_raw}, x0=free_variables, callback=cb)
"""
demos['optimization_cpl'] = """
from opendr.simple import *
import numpy as np
import matplotlib.pyplot as plt
w, h = 320, 240
try:
m = load_mesh('earth.obj')
except:
from opendr.util_tests import get_earthmesh
m = get_earthmesh(trans=ch.array([0,0,0]), rotation=ch.zeros(3))
# Create V, A, U, f: geometry, brightness, camera, renderer
V = ch.array(m.v)
A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f),
components=[3.,2.,0.,0.,0.,0.,0.,0.,0.],
light_color=ch.ones(3))
U = ProjectPoints(v=V, f=[w,w], c=[w/2.,h/2.], k=ch.zeros(5),
t=ch.zeros(3), rt=ch.zeros(3))
f = TexturedRenderer(vc=A, camera=U, f=m.f, bgcolor=[0.,0.,0.],
texture_image=m.texture_image, vt=m.vt, ft=m.ft,
frustum={'width':w, 'height':h, 'near':1,'far':20})
# Parameterize the vertices
translation, rotation = ch.array([0,0,8]), ch.zeros(3)
model_v = translation + ch.array(V.r).dot(Rodrigues(rotation))
# Simulate an observed image
V[:] = model_v.r
observed = f.r
np.random.seed(1)
translation[:] = translation.r + np.random.rand(3)
rotation[:] = rotation.r + np.random.rand(3) *.2
V[:] = model_v.r
A.components[1:] = 0
# Create the energy
E_raw = f - observed
E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size')
def cb(_):
import cv2
global E_raw
cv2.imshow('Absolute difference', np.abs(E_raw.r))
cv2.waitKey(1)
print 'OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS'
free_variables=[translation, rotation, A.components, V]
ch.minimize({'pyr': E_pyr, 'cpl': (V - model_v)*1e-4}, x0=free_variables, callback=cb)
ch.minimize({'raw': E_raw, 'cpl': V - model_v}, x0=free_variables, callback=cb)
"""
def demo(which=None):
import re
if which not in demos:
print('Please indicate which demo you want, as follows:')
for key in demos:
print("\tdemo('%s')" % (key,))
return
print('- - - - - - - - - - - <CODE> - - - - - - - - - - - -')
print(re.sub('global.*\n','',demos[which]))
print('- - - - - - - - - - - </CODE> - - - - - - - - - - - -\n')
exec('global np\n' + demos[which], globals(), locals())
|
tests/test_autogen_indexes.py | kasium/alembic | 1,324 | 11065273 | from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Numeric
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import UniqueConstraint
from sqlalchemy.sql.expression import column
from sqlalchemy.sql.expression import desc
from alembic.testing import assertions
from alembic.testing import combinations
from alembic.testing import config
from alembic.testing import eq_
from alembic.testing import schemacompare
from alembic.testing import TestBase
from alembic.testing import util
from alembic.testing.env import staging_env
from alembic.testing.suite._autogen_fixtures import AutogenFixtureTest
from alembic.util import sqla_compat
# TODO: create new suites that are taking tests from this suite, with a
# separate class for AutogenIndexes, AutogenUniqueConstraint, and a
# subset of the tests here. @zzzeek can work on this at a later point.
# (2021-06-10)
class NoUqReflection:
__requires__ = ()
def setUp(self):
staging_env()
self.bind = eng = util.testing_engine()
def unimpl(*arg, **kw):
raise NotImplementedError()
eng.dialect.get_unique_constraints = unimpl
def test_add_ix_on_table_create(self):
return super(NoUqReflection, self).test_add_ix_on_table_create()
def test_add_idx_non_col(self):
return super(NoUqReflection, self).test_add_idx_non_col()
class AutogenerateUniqueIndexTest(AutogenFixtureTest, TestBase):
reports_unique_constraints = True
reports_unique_constraints_as_indexes = False
__requires__ = ("unique_constraint_reflection",)
__only_on__ = "sqlite"
def test_index_flag_becomes_named_unique_constraint(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False, index=True),
Column("a1", String(10), server_default="x"),
)
Table(
"user",
m2,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("a1", String(10), server_default="x"),
UniqueConstraint("name", name="uq_user_name"),
)
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints:
eq_(diffs[0][0], "remove_index")
eq_(diffs[0][1].name, "ix_user_name")
eq_(diffs[1][0], "add_constraint")
eq_(diffs[1][1].name, "uq_user_name")
else:
eq_(diffs[0][0], "remove_index")
eq_(diffs[0][1].name, "ix_user_name")
def test_add_unique_constraint(self):
m1 = MetaData()
m2 = MetaData()
Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
Column("qpr", String(10), index=True),
)
Table(
"address",
m2,
Column("id", Integer, primary_key=True),
Column("email_address", String(100), nullable=False),
Column("qpr", String(10), index=True),
UniqueConstraint("email_address", name="uq_email_address"),
)
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints:
eq_(diffs[0][0], "add_constraint")
eq_(diffs[0][1].name, "uq_email_address")
else:
eq_(diffs, [])
def test_unique_flag_nothing_changed(self):
m1 = MetaData()
m2 = MetaData()
Table(
"unq_idx",
m1,
Column("id", Integer, primary_key=True),
Column("x", String(20)),
Index("x", "x", unique=True),
)
Table(
"unq_idx",
m2,
Column("id", Integer, primary_key=True),
Column("x", String(20)),
Index("x", "x", unique=True),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_index_becomes_unique(self):
m1 = MetaData()
m2 = MetaData()
Table(
"order",
m1,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
UniqueConstraint(
"order_id", "user_id", name="order_order_id_user_id_unique"
),
Index("order_user_id_amount_idx", "user_id", "amount"),
)
Table(
"order",
m2,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
UniqueConstraint(
"order_id", "user_id", name="order_order_id_user_id_unique"
),
Index(
"order_user_id_amount_idx", "user_id", "amount", unique=True
),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_index")
eq_(diffs[0][1].name, "order_user_id_amount_idx")
eq_(diffs[0][1].unique, False)
eq_(diffs[1][0], "add_index")
eq_(diffs[1][1].name, "order_user_id_amount_idx")
eq_(diffs[1][1].unique, True)
def test_mismatch_db_named_col_flag(self):
m1 = MetaData()
m2 = MetaData()
Table(
"item",
m1,
Column("x", Integer),
UniqueConstraint("x", name="db_generated_name"),
)
# test mismatch between unique=True and
# named uq constraint
Table("item", m2, Column("x", Integer, unique=True))
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_new_table_added(self):
m1 = MetaData()
m2 = MetaData()
Table(
"extra",
m2,
Column("foo", Integer, index=True),
Column("bar", Integer),
Index("newtable_idx", "bar"),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table")
eq_(diffs[1][0], "add_index")
eq_(
sqla_compat._get_constraint_final_name(
diffs[1][1], config.db.dialect
),
"ix_extra_foo",
)
eq_(diffs[2][0], "add_index")
eq_(diffs[2][1].name, "newtable_idx")
def test_named_cols_changed(self):
m1 = MetaData()
m2 = MetaData()
Table(
"col_change",
m1,
Column("x", Integer),
Column("y", Integer),
UniqueConstraint("x", name="nochange"),
)
Table(
"col_change",
m2,
Column("x", Integer),
Column("y", Integer),
UniqueConstraint("x", "y", name="nochange"),
)
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints:
eq_(diffs[0][0], "remove_constraint")
eq_(diffs[0][1].name, "nochange")
eq_(diffs[1][0], "add_constraint")
eq_(diffs[1][1].name, "nochange")
else:
eq_(diffs, [])
def test_nothing_changed_one(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("x", String(20), unique=True, index=True),
)
Table(
"nothing_changed",
m2,
Column("x", String(20), unique=True, index=True),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_implicit_uq_w_naming_conv(self):
m1 = MetaData(
naming_convention={
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(column_0_label)s",
}
)
m2 = MetaData(
naming_convention={
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(column_0_label)s",
}
)
Table(
"nothing_changed",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20), unique=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20), unique=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_uq_changed_labels_were_truncated(self):
m1 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
m2 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
Table(
"nothing_changed",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("a_long_name", String(20), unique=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("a_long_name", String(20), unique=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2, max_identifier_length=30)
eq_(diffs, [])
@config.requirements.long_names
def test_nothing_ix_changed_labels_were_truncated(self):
m1 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
m2 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
Table(
"nothing_changed",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("a_particularly_long_column_name", String(20), index=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("a_particularly_long_column_name", String(20), index=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2, max_identifier_length=30)
eq_(diffs, [])
@config.requirements.long_names
def test_nothing_changed_uq_w_mixed_case_nconv_name(self):
m1 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
m2 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
Table(
"NothingChanged",
m1,
Column("id", Integer, primary_key=True),
Column("XCol", Integer),
UniqueConstraint("XCol"),
mysql_engine="InnoDB",
)
Table(
"NothingChanged",
m2,
Column("id", Integer, primary_key=True),
Column("XCol", Integer),
UniqueConstraint("XCol"),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_uq_w_mixed_case_plain_name(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer),
UniqueConstraint("x", name="SomeConstraint"),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer),
UniqueConstraint("x", name="SomeConstraint"),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_ix_w_mixed_case_plain_name(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Index("SomeIndex", "x"),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Index("SomeIndex", "x"),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
@config.requirements.long_names
def test_nothing_changed_ix_w_mixed_case_nconv_name(self):
m1 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
m2 = MetaData(
naming_convention={
"ix": "index_%(table_name)s_%(column_0_label)s",
"uq": "unique_%(table_name)s_%(column_0_label)s",
}
)
Table(
"NothingChanged",
m1,
Column("id", Integer, primary_key=True),
Column("XCol", Integer, index=True),
mysql_engine="InnoDB",
)
Table(
"NothingChanged",
m2,
Column("id", Integer, primary_key=True),
Column("XCol", Integer, index=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_two(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20), unique=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed_related",
m1,
Column("id1", Integer),
Column("id2", Integer),
ForeignKeyConstraint(
["id1", "id2"], ["nothing_changed.id1", "nothing_changed.id2"]
),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20), unique=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed_related",
m2,
Column("id1", Integer),
Column("id2", Integer),
ForeignKeyConstraint(
["id1", "id2"], ["nothing_changed.id1", "nothing_changed.id2"]
),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_unique_w_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("x", String(20), key="nx"),
UniqueConstraint("nx"),
)
Table(
"nothing_changed",
m2,
Column("x", String(20), key="nx"),
UniqueConstraint("nx"),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_index_w_colkeys(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("x", String(20), key="nx"),
Index("foobar", "nx"),
)
Table(
"nothing_changed",
m2,
Column("x", String(20), key="nx"),
Index("foobar", "nx"),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_index_named_as_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
Index("x", "x"),
)
Table(
"nothing_changed",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
Index("x", "x"),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_implicit_fk_index_named(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id", Integer, primary_key=True),
Column(
"other_id",
ForeignKey("nc2.id", name="fk_my_table_other_table"),
nullable=False,
),
Column("foo", Integer),
mysql_engine="InnoDB",
)
Table(
"nc2",
m1,
Column("id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id", Integer, primary_key=True),
Column(
"other_id",
ForeignKey("nc2.id", name="fk_my_table_other_table"),
nullable=False,
),
Column("foo", Integer),
mysql_engine="InnoDB",
)
Table(
"nc2",
m2,
Column("id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_nothing_changed_implicit_composite_fk_index_named(self):
m1 = MetaData()
m2 = MetaData()
Table(
"nothing_changed",
m1,
Column("id", Integer, primary_key=True),
Column("other_id_1", Integer),
Column("other_id_2", Integer),
Column("foo", Integer),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["nc2.id1", "nc2.id2"],
name="fk_my_table_other_table",
),
mysql_engine="InnoDB",
)
Table(
"nc2",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"nothing_changed",
m2,
Column("id", Integer, primary_key=True),
Column("other_id_1", Integer),
Column("other_id_2", Integer),
Column("foo", Integer),
ForeignKeyConstraint(
["other_id_1", "other_id_2"],
["nc2.id1", "nc2.id2"],
name="fk_my_table_other_table",
),
mysql_engine="InnoDB",
)
Table(
"nc2",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
mysql_engine="InnoDB",
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_ix_casing_convention_changed_so_put_drops_first(self):
m1 = MetaData()
m2 = MetaData()
ix1 = Index("SomeCasingConvention", "x")
Table(
"new_idx",
m1,
Column("id1", Integer, primary_key=True),
Column("x", String(20)),
ix1,
)
ix2 = Index("somecasingconvention", "x")
Table(
"new_idx",
m2,
Column("id1", Integer, primary_key=True),
Column("x", String(20)),
ix2,
)
diffs = self._fixture(m1, m2)
eq_(
[(d[0], d[1].name) for d in diffs],
[
("remove_index", "SomeCasingConvention"),
("add_index", "somecasingconvention"),
],
)
def test_uq_casing_convention_changed_so_put_drops_first(self):
m1 = MetaData()
m2 = MetaData()
uq1 = UniqueConstraint("x", name="SomeCasingConvention")
Table(
"new_idx",
m1,
Column("id1", Integer, primary_key=True),
Column("x", String(20)),
uq1,
)
uq2 = UniqueConstraint("x", name="somecasingconvention")
Table(
"new_idx",
m2,
Column("id1", Integer, primary_key=True),
Column("x", String(20)),
uq2,
)
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints_as_indexes:
eq_(
[(d[0], d[1].name) for d in diffs],
[
("remove_index", "SomeCasingConvention"),
("add_constraint", "somecasingconvention"),
],
)
else:
eq_(
[(d[0], d[1].name) for d in diffs],
[
("remove_constraint", "SomeCasingConvention"),
("add_constraint", "somecasingconvention"),
],
)
def test_new_idx_index_named_as_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"new_idx",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
)
idx = Index("x", "x")
Table(
"new_idx",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
idx,
)
diffs = self._fixture(m1, m2)
eq_(diffs, [("add_index", schemacompare.CompareIndex(idx))])
def test_removed_idx_index_named_as_column(self):
m1 = MetaData()
m2 = MetaData()
idx = Index("x", "x")
Table(
"new_idx",
m1,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
idx,
)
Table(
"new_idx",
m2,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
Column("x", String(20)),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_index")
def test_drop_table_w_indexes(self):
m1 = MetaData()
m2 = MetaData()
t = Table(
"some_table",
m1,
Column("id", Integer, primary_key=True),
Column("x", String(20)),
Column("y", String(20)),
)
Index("xy_idx", t.c.x, t.c.y)
Index("y_idx", t.c.y)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_index")
eq_(diffs[1][0], "remove_index")
eq_(diffs[2][0], "remove_table")
eq_(
set([diffs[0][1].name, diffs[1][1].name]), set(["xy_idx", "y_idx"])
)
def test_drop_table_w_uq_constraint(self):
m1 = MetaData()
m2 = MetaData()
Table(
"some_table",
m1,
Column("id", Integer, primary_key=True),
Column("x", String(20)),
Column("y", String(20)),
UniqueConstraint("y", name="uq_y"),
)
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints_as_indexes:
# for MySQL this UQ will look like an index, so
# make sure it at least sets it up correctly
eq_(diffs[0][0], "remove_index")
eq_(diffs[1][0], "remove_table")
eq_(len(diffs), 2)
constraints = [
c
for c in diffs[1][1].constraints
if isinstance(c, UniqueConstraint)
]
eq_(len(constraints), 0)
else:
eq_(diffs[0][0], "remove_table")
eq_(len(diffs), 1)
constraints = [
c
for c in diffs[0][1].constraints
if isinstance(c, UniqueConstraint)
]
if self.reports_unique_constraints:
eq_(len(constraints), 1)
def test_unnamed_cols_changed(self):
m1 = MetaData()
m2 = MetaData()
Table(
"col_change",
m1,
Column("x", Integer),
Column("y", Integer),
UniqueConstraint("x"),
)
Table(
"col_change",
m2,
Column("x", Integer),
Column("y", Integer),
UniqueConstraint("x", "y"),
)
diffs = self._fixture(m1, m2)
diffs = set(
(
cmd,
isinstance(obj, (UniqueConstraint, Index))
if obj.name is not None
else False,
)
for cmd, obj in diffs
)
if self.reports_unnamed_constraints:
if self.reports_unique_constraints_as_indexes:
eq_(
diffs,
set([("remove_index", True), ("add_constraint", False)]),
)
else:
eq_(
diffs,
set(
[
("remove_constraint", True),
("add_constraint", False),
]
),
)
def test_remove_named_unique_index(self):
m1 = MetaData()
m2 = MetaData()
Table(
"remove_idx",
m1,
Column("x", Integer),
Index("xidx", "x", unique=True),
)
Table("remove_idx", m2, Column("x", Integer))
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints:
diffs = set((cmd, obj.name) for cmd, obj in diffs)
eq_(diffs, set([("remove_index", "xidx")]))
else:
eq_(diffs, [])
def test_remove_named_unique_constraint(self):
m1 = MetaData()
m2 = MetaData()
Table(
"remove_idx",
m1,
Column("x", Integer),
UniqueConstraint("x", name="xidx"),
)
Table("remove_idx", m2, Column("x", Integer))
diffs = self._fixture(m1, m2)
if self.reports_unique_constraints:
diffs = set((cmd, obj.name) for cmd, obj in diffs)
if self.reports_unique_constraints_as_indexes:
eq_(diffs, set([("remove_index", "xidx")]))
else:
eq_(diffs, set([("remove_constraint", "xidx")]))
else:
eq_(diffs, [])
def test_dont_add_uq_on_table_create(self):
m1 = MetaData()
m2 = MetaData()
Table("no_uq", m2, Column("x", String(50), unique=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table")
eq_(len(diffs), 1)
# checking for dupes also
eq_(
sorted(
[type(cons) for cons in diffs[0][1].constraints],
key=lambda c: c.__name__,
),
[PrimaryKeyConstraint, UniqueConstraint],
)
@config.requirements.reflects_unique_constraints_unambiguously
def test_dont_add_uq_on_reverse_table_drop(self):
m1 = MetaData()
m2 = MetaData()
Table("no_uq", m1, Column("x", String(50), unique=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_table")
eq_(len(diffs), 1)
# because the drop comes from reflection, the "unique=True" flag
# is lost in any case.
eq_(
sorted(
[type(cons) for cons in diffs[0][1].constraints],
key=lambda c: c.__name__,
),
[PrimaryKeyConstraint, UniqueConstraint],
)
def test_add_uq_ix_on_table_create(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m2, Column("x", String(50), unique=True, index=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table")
eq_(len(diffs), 2)
assert UniqueConstraint not in set(
type(c) for c in diffs[0][1].constraints
)
eq_(diffs[1][0], "add_index")
d_table = diffs[0][1]
d_idx = diffs[1][1]
eq_(d_idx.unique, True)
# check for dupes
eq_(len(diffs), 2)
assert not d_table.indexes
def test_add_ix_on_table_create(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m2, Column("x", String(50), index=True))
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_table")
eq_(len(diffs), 2)
assert UniqueConstraint not in set(
type(c) for c in diffs[0][1].constraints
)
eq_(diffs[1][0], "add_index")
eq_(diffs[1][1].unique, False)
def test_add_idx_non_col(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m1, Column("x", String(50)))
t2 = Table("add_ix", m2, Column("x", String(50)))
Index("foo_idx", t2.c.x.desc())
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_index")
@config.requirements.reflects_indexes_w_sorting
def test_idx_string_col_in_fn_no_change(self):
"""test #880"""
m1 = MetaData()
m2 = MetaData()
t1 = Table("add_ix", m1, Column("x", String(50)))
t1.append_constraint(Index("foo_idx", desc(column("x"))))
t2 = Table("add_ix", m2, Column("x", String(50)))
t2.append_constraint(Index("foo_idx", desc(column("x"))))
diffs = self._fixture(m1, m2)
eq_(diffs, [])
@config.requirements.reflects_indexes_w_sorting
def test_unchanged_idx_non_col(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table("add_ix", m1, Column("x", String(50)))
Index("foo_idx", t1.c.x.desc())
t2 = Table("add_ix", m2, Column("x", String(50)))
Index("foo_idx", t2.c.x.desc())
diffs = self._fixture(m1, m2)
eq_(diffs, [])
# fails in the 0.8 series where we have truncation rules,
# but no control over quoting. passes in 0.7.9 where we don't have
# truncation rules either. dropping these ancient versions
# is long overdue.
def test_unchanged_case_sensitive_implicit_idx(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m1, Column("regNumber", String(50), index=True))
Table("add_ix", m2, Column("regNumber", String(50), index=True))
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_unchanged_case_sensitive_explicit_idx(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table("add_ix", m1, Column("reg_number", String(50)))
Index("regNumber_idx", t1.c.reg_number)
t2 = Table("add_ix", m2, Column("reg_number", String(50)))
Index("regNumber_idx", t2.c.reg_number)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
class PGUniqueIndexTest(AutogenerateUniqueIndexTest):
reports_unnamed_constraints = True
__only_on__ = "postgresql"
__backend__ = True
def test_idx_added_schema(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m1, Column("x", String(50)), schema="test_schema")
Table(
"add_ix",
m2,
Column("x", String(50)),
Index("ix_1", "x"),
schema="test_schema",
)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs[0][0], "add_index")
eq_(diffs[0][1].name, "ix_1")
def test_idx_unchanged_schema(self):
m1 = MetaData()
m2 = MetaData()
Table(
"add_ix",
m1,
Column("x", String(50)),
Index("ix_1", "x"),
schema="test_schema",
)
Table(
"add_ix",
m2,
Column("x", String(50)),
Index("ix_1", "x"),
schema="test_schema",
)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs, [])
def test_uq_added_schema(self):
m1 = MetaData()
m2 = MetaData()
Table("add_uq", m1, Column("x", String(50)), schema="test_schema")
Table(
"add_uq",
m2,
Column("x", String(50)),
UniqueConstraint("x", name="ix_1"),
schema="test_schema",
)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs[0][0], "add_constraint")
eq_(diffs[0][1].name, "ix_1")
def test_uq_unchanged_schema(self):
m1 = MetaData()
m2 = MetaData()
Table(
"add_uq",
m1,
Column("x", String(50)),
UniqueConstraint("x", name="ix_1"),
schema="test_schema",
)
Table(
"add_uq",
m2,
Column("x", String(50)),
UniqueConstraint("x", name="ix_1"),
schema="test_schema",
)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs, [])
@config.requirements.btree_gist
def test_exclude_const_unchanged(self):
from sqlalchemy.dialects.postgresql import TSRANGE, ExcludeConstraint
m1 = MetaData()
m2 = MetaData()
Table(
"add_excl",
m1,
Column("id", Integer, primary_key=True),
Column("period", TSRANGE),
ExcludeConstraint(("period", "&&"), name="quarters_period_excl"),
)
Table(
"add_excl",
m2,
Column("id", Integer, primary_key=True),
Column("period", TSRANGE),
ExcludeConstraint(("period", "&&"), name="quarters_period_excl"),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_same_tname_two_schemas(self):
m1 = MetaData()
m2 = MetaData()
Table("add_ix", m1, Column("x", String(50)), Index("ix_1", "x"))
Table("add_ix", m2, Column("x", String(50)), Index("ix_1", "x"))
Table("add_ix", m2, Column("x", String(50)), schema="test_schema")
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs[0][0], "add_table")
eq_(len(diffs), 1)
def test_uq_dropped(self):
m1 = MetaData()
m2 = MetaData()
Table(
"add_uq",
m1,
Column("id", Integer, primary_key=True),
Column("name", String),
UniqueConstraint("name", name="uq_name"),
)
Table(
"add_uq",
m2,
Column("id", Integer, primary_key=True),
Column("name", String),
)
diffs = self._fixture(m1, m2, include_schemas=True)
eq_(diffs[0][0], "remove_constraint")
eq_(diffs[0][1].name, "uq_name")
eq_(len(diffs), 1)
def test_functional_ix_one(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table(
"foo",
m1,
Column("id", Integer, primary_key=True),
Column("email", String(50)),
)
Index("email_idx", func.lower(t1.c.email), unique=True)
t2 = Table(
"foo",
m2,
Column("id", Integer, primary_key=True),
Column("email", String(50)),
)
Index("email_idx", func.lower(t2.c.email), unique=True)
with assertions.expect_warnings(
"Skipped unsupported reflection",
"autogenerate skipping functional index",
):
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_functional_ix_two(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table(
"foo",
m1,
Column("id", Integer, primary_key=True),
Column("email", String(50)),
Column("name", String(50)),
)
Index(
"email_idx",
func.coalesce(t1.c.email, t1.c.name).desc(),
unique=True,
)
t2 = Table(
"foo",
m2,
Column("id", Integer, primary_key=True),
Column("email", String(50)),
Column("name", String(50)),
)
Index(
"email_idx",
func.coalesce(t2.c.email, t2.c.name).desc(),
unique=True,
)
with assertions.expect_warnings(
"Skipped unsupported reflection",
"autogenerate skipping functional index",
):
diffs = self._fixture(m1, m2)
eq_(diffs, [])
class MySQLUniqueIndexTest(AutogenerateUniqueIndexTest):
reports_unnamed_constraints = True
reports_unique_constraints_as_indexes = True
__only_on__ = "mysql", "mariadb"
__backend__ = True
def test_removed_idx_index_named_as_column(self):
try:
super(
MySQLUniqueIndexTest, self
).test_removed_idx_index_named_as_column()
except IndexError:
assert True
else:
assert False, "unexpected success"
class OracleUniqueIndexTest(AutogenerateUniqueIndexTest):
reports_unnamed_constraints = True
reports_unique_constraints_as_indexes = True
__only_on__ = "oracle"
__backend__ = True
class NoUqReflectionIndexTest(NoUqReflection, AutogenerateUniqueIndexTest):
reports_unique_constraints = False
__only_on__ = "sqlite"
def test_uq_casing_convention_changed_so_put_drops_first(self):
config.skip_test(
"unique constraint reflection disabled for this suite"
)
def test_dont_add_uq_on_reverse_table_drop(self):
config.skip_test(
"unique constraint reflection disabled for this suite"
)
def test_unique_not_reported(self):
m1 = MetaData()
Table(
"order",
m1,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
UniqueConstraint(
"order_id", "user_id", name="order_order_id_user_id_unique"
),
)
diffs = self._fixture(m1, m1)
eq_(diffs, [])
def test_remove_unique_index_not_reported(self):
m1 = MetaData()
Table(
"order",
m1,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
Index("oid_ix", "order_id", "user_id", unique=True),
)
m2 = MetaData()
Table(
"order",
m2,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
def test_remove_plain_index_is_reported(self):
m1 = MetaData()
Table(
"order",
m1,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
Index("oid_ix", "order_id", "user_id"),
)
m2 = MetaData()
Table(
"order",
m2,
Column("order_id", Integer, primary_key=True),
Column("amount", Numeric(10, 2), nullable=True),
Column("user_id", Integer),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_index")
class NoUqReportsIndAsUqTest(NoUqReflectionIndexTest):
"""this test suite simulates the condition where:
a. the dialect doesn't report unique constraints
b. the dialect returns unique constraints within the indexes list.
Currently the mssql dialect does this, but here we force this
condition so that we can test the behavior regardless of if/when
mssql supports unique constraint reflection.
"""
__only_on__ = "sqlite"
@classmethod
def _get_bind(cls):
eng = config.db
_get_unique_constraints = eng.dialect.get_unique_constraints
_get_indexes = eng.dialect.get_indexes
def unimpl(*arg, **kw):
raise NotImplementedError()
def get_indexes(self, connection, tablename, **kw):
indexes = _get_indexes(self, connection, tablename, **kw)
for uq in _get_unique_constraints(
self, connection, tablename, **kw
):
uq["unique"] = True
indexes.append(uq)
return indexes
eng.dialect.get_unique_constraints = unimpl
eng.dialect.get_indexes = get_indexes
return eng
class IncludeHooksTest(AutogenFixtureTest, TestBase):
__backend__ = True
@combinations(("name",), ("object",))
def test_remove_connection_index(self, hook_type):
m1 = MetaData()
m2 = MetaData()
t1 = Table("t", m1, Column("x", Integer), Column("y", Integer))
Index("ix1", t1.c.x)
Index("ix2", t1.c.y)
Table("t", m2, Column("x", Integer), Column("y", Integer))
if hook_type == "object":
def include_object(object_, name, type_, reflected, compare_to):
if type_ == "unique_constraint":
return False
return not (
isinstance(object_, Index)
and type_ == "index"
and reflected
and name == "ix1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
elif hook_type == "name":
all_names = set()
def include_name(name, type_, parent_names):
all_names.add((name, type_))
if name == "ix1":
eq_(type_, "index")
eq_(
parent_names,
{
"table_name": "t",
"schema_name": None,
"schema_qualified_table_name": "t",
},
)
return False
else:
return True
diffs = self._fixture(m1, m2, name_filters=include_name)
eq_(
all_names,
{
("ix1", "index"),
("ix2", "index"),
("y", "column"),
("t", "table"),
(None, "schema"),
("x", "column"),
},
)
eq_(diffs[0][0], "remove_index")
eq_(diffs[0][1].name, "ix2")
eq_(len(diffs), 1)
@combinations(("name",), ("object",))
@config.requirements.unique_constraint_reflection
@config.requirements.reflects_unique_constraints_unambiguously
def test_remove_connection_uq(self, hook_type):
m1 = MetaData()
m2 = MetaData()
Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
UniqueConstraint("x", name="uq1"),
UniqueConstraint("y", name="uq2"),
)
Table("t", m2, Column("x", Integer), Column("y", Integer))
if hook_type == "object":
def include_object(object_, name, type_, reflected, compare_to):
if type_ == "index":
return False
return not (
isinstance(object_, UniqueConstraint)
and type_ == "unique_constraint"
and reflected
and name == "uq1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
elif hook_type == "name":
all_names = set()
def include_name(name, type_, parent_names):
if type_ == "index":
return False # PostgreSQL thing
all_names.add((name, type_))
if name == "uq1":
eq_(type_, "unique_constraint")
eq_(
parent_names,
{
"table_name": "t",
"schema_name": None,
"schema_qualified_table_name": "t",
},
)
return False
return True
diffs = self._fixture(m1, m2, name_filters=include_name)
eq_(
all_names,
{
("t", "table"),
(None, "schema"),
("uq2", "unique_constraint"),
("x", "column"),
("y", "column"),
("uq1", "unique_constraint"),
},
)
eq_(diffs[0][0], "remove_constraint")
eq_(diffs[0][1].name, "uq2")
eq_(len(diffs), 1)
def test_add_metadata_index(self):
m1 = MetaData()
m2 = MetaData()
Table("t", m1, Column("x", Integer))
t2 = Table("t", m2, Column("x", Integer))
Index("ix1", t2.c.x)
Index("ix2", t2.c.x)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, Index)
and type_ == "index"
and not reflected
and name == "ix1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
eq_(diffs[0][0], "add_index")
eq_(diffs[0][1].name, "ix2")
eq_(len(diffs), 1)
@config.requirements.unique_constraint_reflection
def test_add_metadata_unique(self):
m1 = MetaData()
m2 = MetaData()
Table("t", m1, Column("x", Integer))
Table(
"t",
m2,
Column("x", Integer),
UniqueConstraint("x", name="uq1"),
UniqueConstraint("x", name="uq2"),
)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, UniqueConstraint)
and type_ == "unique_constraint"
and not reflected
and name == "uq1"
)
diffs = self._fixture(m1, m2, object_filters=include_object)
eq_(diffs[0][0], "add_constraint")
eq_(diffs[0][1].name, "uq2")
eq_(len(diffs), 1)
def test_change_index(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
Index("ix1", t1.c.x)
Index("ix2", t1.c.y)
t2 = Table(
"t",
m2,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
Index("ix1", t2.c.x, t2.c.y)
Index("ix2", t2.c.x, t2.c.z)
def include_object(object_, name, type_, reflected, compare_to):
return not (
isinstance(object_, Index)
and type_ == "index"
and not reflected
and name == "ix1"
and isinstance(compare_to, Index)
)
diffs = self._fixture(m1, m2, object_filters=include_object)
eq_(diffs[0][0], "remove_index")
eq_(diffs[0][1].name, "ix2")
eq_(diffs[1][0], "add_index")
eq_(diffs[1][1].name, "ix2")
eq_(len(diffs), 2)
@config.requirements.unique_constraint_reflection
def test_change_unique(self):
m1 = MetaData()
m2 = MetaData()
Table(
"t",
m1,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
UniqueConstraint("x", name="uq1"),
UniqueConstraint("y", name="uq2"),
)
Table(
"t",
m2,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
UniqueConstraint("x", "z", name="uq1"),
UniqueConstraint("y", "z", name="uq2"),
)
def include_object(object_, name, type_, reflected, compare_to):
if type_ == "index":
return False
return not (
isinstance(object_, UniqueConstraint)
and type_ == "unique_constraint"
and not reflected
and name == "uq1"
and isinstance(compare_to, UniqueConstraint)
)
diffs = self._fixture(m1, m2, object_filters=include_object)
eq_(diffs[0][0], "remove_constraint")
eq_(diffs[0][1].name, "uq2")
eq_(diffs[1][0], "add_constraint")
eq_(diffs[1][1].name, "uq2")
eq_(len(diffs), 2)
class TruncatedIdxTest(AutogenFixtureTest, TestBase):
def setUp(self):
self.bind = util.testing_engine()
self.bind.dialect.max_identifier_length = 30
def test_idx_matches_long(self):
from alembic.operations.base import conv
m1 = MetaData()
Table(
"q",
m1,
Column("id", Integer, primary_key=True),
Column("data", Integer),
Index(
conv("idx_q_table_this_is_more_than_thirty_characters"), "data"
),
)
diffs = self._fixture(m1, m1)
eq_(diffs, [])
|
different_ways_to_add_parentheses/solution.py | mahimadubey/leetcode-python | 528 | 11065274 | """
Given a string of numbers and operators, return all possible results from
computing all the different possible ways to group numbers and operators. The
valid operators are +, - and *.
Example 1
Input: "2-1-1".
((2-1)-1) = 0
(2-(1-1)) = 2
Output: [0, 2]
Example 2
Input: "2*3-4*5"
(2*(3-(4*5))) = -34
((2*3)-(4*5)) = -14
((2*(3-4))*5) = -10
(2*((3-4)*5)) = -10
(((2*3)-4)*5) = 10
Output: [-34, -14, -10, -10, 10]
"""
class Solution(object):
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
self.operators = set(['+', '-', '*'])
return self.diff_ways(input)
def calculate(self, a, b, operator):
return eval('%d %s %d' % (a, operator, b))
def diff_ways(self, inp):
if not inp:
return []
elif inp.isdigit():
return [int(inp)]
else:
res = []
for i, c in enumerate(inp):
if c in self.operators:
left = self.diff_ways(inp[:i])
right = self.diff_ways(inp[i + 1:])
for l in left:
for r in right:
s = self.calculate(l, r, c)
res.append(s)
return res
s1 = '2*3-4*5'
s2 = '11'
s = Solution()
print(s.diffWaysToCompute(s1))
print(s.diffWaysToCompute(s2))
|
Alignment/CommonAlignmentProducer/python/ALCARECOMuAlGlobalCosmics_Output_cff.py | ckamtsikis/cmssw | 852 | 11065278 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# AlCaReco output for track based muon alignment using cosmic ray tracks
OutALCARECOMuAlGlobalCosmics_noDrop = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('pathALCARECOMuAlGlobalCosmics')
),
outputCommands = cms.untracked.vstring(
'keep *_ALCARECOMuAlGlobalCosmics_*_*', # selected cosmic muons
'keep *_ALCARECOMuAlGlobalCosmicsGeneralTracks_*_*', # selected general tracks
'keep *_ALCARECOMuAlGlobalCosmicsCombinatorialTF_*_*',
'keep *_ALCARECOMuAlGlobalCosmicsCosmicTF_*_*',
'keep *_ALCARECOMuAlGlobalCosmicsRegionalTF_*_*',
'keep *_muonCSCDigis_*_*',
'keep *_muonDTDigis_*_*',
'keep *_muonRPCDigis_*_*',
'keep *_dt1DRecHits_*_*',
'keep *_dt2DSegments_*_*',
'keep *_dt4DSegments_*_*',
'keep *_csc2DRecHits_*_*',
'keep *_cscSegments_*_*',
'keep *_rpcRecHits_*_*',
'keep L1AcceptBunchCrossings_*_*_*',
'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',
'keep *_TriggerResults_*_*',
'keep DcsStatuss_scalersRawToDigi_*_*',
'keep Si*Cluster*_si*Clusters_*_*', # for cosmics keep original clusters
'keep siStripDigis_DetIdCollection_*_*',
'keep recoMuons_muons1Leg_*_*', # save muons as timing info is needed for BP corrections in deconvolution
)
)
import copy
OutALCARECOMuAlGlobalCosmics = copy.deepcopy(OutALCARECOMuAlGlobalCosmics_noDrop)
OutALCARECOMuAlGlobalCosmics.outputCommands.insert(0, "drop *")
|
chalice/deploy/appgraph.py | Chen188/chalice | 6,240 | 11065284 | import json
import os
from typing import cast
from typing import Dict, List, Tuple, Any, Set, Optional, Text, Union # noqa
from attr import asdict
from chalice.config import Config # noqa
from chalice import app
from chalice.constants import LAMBDA_TRUST_POLICY
from chalice.deploy import models
from chalice.utils import UI # noqa
StrMapAny = Dict[str, Any]
class ChaliceBuildError(Exception):
pass
class ApplicationGraphBuilder(object):
def __init__(self):
# type: () -> None
self._known_roles = {} # type: Dict[str, models.IAMRole]
self._managed_layer = None # type: Optional[models.LambdaLayer]
def build(self, config, stage_name):
# type: (Config, str) -> models.Application
resources = [] # type: List[models.Model]
deployment = models.DeploymentPackage(models.Placeholder.BUILD_STAGE)
for function in config.chalice_app.pure_lambda_functions:
resource = self._create_lambda_model(
config=config, deployment=deployment,
name=function.name, handler_name=function.handler_string,
stage_name=stage_name)
resources.append(resource)
event_resources = self._create_lambda_event_resources(
config, deployment, stage_name)
resources.extend(event_resources)
if config.chalice_app.routes:
rest_api = self._create_rest_api_model(
config, deployment, stage_name
)
resources.append(rest_api)
if config.chalice_app.websocket_handlers:
websocket_api = self._create_websocket_api_model(
config, deployment, stage_name)
resources.append(websocket_api)
return models.Application(stage_name, resources)
def _create_custom_domain_name(
self,
api_type, # type: models.APIType
domain_name_data, # type: StrMapAny
endpoint_configuration, # type: str
api_gateway_stage, # type: str
):
# type: (...) -> models.DomainName
url_prefix = domain_name_data.get("url_prefix", '(none)')
api_mapping_model = self._create_api_mapping_model(
url_prefix, api_gateway_stage)
domain_name = self._create_domain_name_model(
api_type,
domain_name_data,
endpoint_configuration,
api_mapping_model
)
return domain_name
def _create_api_mapping_model(self,
key, # type: str
stage # type: str
):
# type: (...) -> models.APIMapping
if key == '/':
key = '(none)'
return models.APIMapping(
resource_name='api_mapping',
mount_path=key,
api_gateway_stage=stage
)
def _create_lambda_event_resources(self, config, deployment, stage_name):
# type: (Config, models.DeploymentPackage, str) -> List[models.Model]
resources = [] # type: List[models.Model]
for event_source in config.chalice_app.event_sources:
if isinstance(event_source, app.S3EventConfig):
resources.append(
self._create_bucket_notification(
config, deployment, event_source, stage_name
)
)
elif isinstance(event_source, app.SNSEventConfig):
resources.append(
self._create_sns_subscription(
config, deployment, event_source, stage_name,
)
)
elif isinstance(event_source, app.CloudWatchEventConfig):
resources.append(
self._create_cwe_subscription(
config, deployment, event_source, stage_name
)
)
elif isinstance(event_source, app.ScheduledEventConfig):
resources.append(
self._create_scheduled_model(
config, deployment, event_source, stage_name
)
)
elif isinstance(event_source, app.SQSEventConfig):
resources.append(
self._create_sqs_subscription(
config, deployment, event_source, stage_name,
)
)
elif isinstance(event_source, app.KinesisEventConfig):
resources.append(
self._create_kinesis_subscription(
config, deployment, event_source, stage_name,
)
)
elif isinstance(event_source, app.DynamoDBEventConfig):
resources.append(
self._create_ddb_subscription(
config, deployment, event_source, stage_name,
)
)
return resources
def _create_rest_api_model(self,
config, # type: Config
deployment, # type: models.DeploymentPackage
stage_name, # type: str
):
# type: (...) -> models.RestAPI
# Need to mess with the function name for back-compat.
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name='api_handler',
handler_name='app.app', stage_name=stage_name
)
# For backwards compatibility with the old deployer, the
# lambda function for the API handler doesn't have the
# resource_name appended to its complete function_name,
# it's just <app>-<stage>.
function_name = '%s-%s' % (config.app_name, config.chalice_stage)
lambda_function.function_name = function_name
if config.minimum_compression_size is None:
minimum_compression = ''
else:
minimum_compression = str(config.minimum_compression_size)
authorizers = []
for auth in config.chalice_app.builtin_auth_handlers:
auth_lambda = self._create_lambda_model(
config=config, deployment=deployment, name=auth.name,
handler_name=auth.handler_string, stage_name=stage_name,
)
authorizers.append(auth_lambda)
policy = None
policy_path = config.api_gateway_policy_file
if (config.api_gateway_endpoint_type == 'PRIVATE' and not policy_path):
policy = models.IAMPolicy(
document=self._get_default_private_api_policy(config))
elif policy_path:
policy = models.FileBasedIAMPolicy(
document=models.Placeholder.BUILD_STAGE,
filename=os.path.join(
config.project_dir, '.chalice', policy_path))
custom_domain_name = None
if config.api_gateway_custom_domain:
custom_domain_name = self._create_custom_domain_name(
models.APIType.HTTP,
config.api_gateway_custom_domain,
config.api_gateway_endpoint_type,
config.api_gateway_stage,
)
return models.RestAPI(
resource_name='rest_api',
swagger_doc=models.Placeholder.BUILD_STAGE,
endpoint_type=config.api_gateway_endpoint_type,
minimum_compression=minimum_compression,
api_gateway_stage=config.api_gateway_stage,
lambda_function=lambda_function,
authorizers=authorizers,
policy=policy,
domain_name=custom_domain_name,
xray=config.xray_enabled,
)
def _get_default_private_api_policy(self, config):
# type: (Config) -> StrMapAny
statements = [{
"Effect": "Allow",
"Principal": "*",
"Action": "execute-api:Invoke",
"Resource": "arn:*:execute-api:*:*:*",
"Condition": {
"StringEquals": {
"aws:SourceVpce": config.api_gateway_endpoint_vpce
}
}
}]
return {"Version": "2012-10-17", "Statement": statements}
def _create_websocket_api_model(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
stage_name, # type: str
):
# type: (...) -> models.WebsocketAPI
connect_handler = None # type: Optional[models.LambdaFunction]
message_handler = None # type: Optional[models.LambdaFunction]
disconnect_handler = None # type: Optional[models.LambdaFunction]
routes = {h.route_key_handled: h.handler_string for h
in config.chalice_app.websocket_handlers.values()}
if '$connect' in routes:
connect_handler = self._create_lambda_model(
config=config, deployment=deployment, name='websocket_connect',
handler_name=routes['$connect'], stage_name=stage_name)
routes.pop('$connect')
if '$disconnect' in routes:
disconnect_handler = self._create_lambda_model(
config=config, deployment=deployment,
name='websocket_disconnect',
handler_name=routes['$disconnect'], stage_name=stage_name)
routes.pop('$disconnect')
if routes:
# If there are left over routes they are message handlers.
handler_string = list(routes.values())[0]
message_handler = self._create_lambda_model(
config=config, deployment=deployment, name='websocket_message',
handler_name=handler_string, stage_name=stage_name
)
custom_domain_name = None
if config.websocket_api_custom_domain:
custom_domain_name = self._create_custom_domain_name(
models.APIType.WEBSOCKET,
config.websocket_api_custom_domain,
config.api_gateway_endpoint_type,
config.api_gateway_stage,
)
return models.WebsocketAPI(
name='%s-%s-websocket-api' % (config.app_name, stage_name),
resource_name='websocket_api',
connect_function=connect_handler,
message_function=message_handler,
disconnect_function=disconnect_handler,
routes=[h.route_key_handled for h
in config.chalice_app.websocket_handlers.values()],
api_gateway_stage=config.api_gateway_stage,
domain_name=custom_domain_name
)
def _create_cwe_subscription(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
event_source, # type: app.CloudWatchEventConfig
stage_name, # type: str
):
# type: (...) -> models.CloudWatchEvent
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=event_source.name,
handler_name=event_source.handler_string, stage_name=stage_name
)
resource_name = event_source.name + '-event'
rule_name = '%s-%s-%s' % (config.app_name, config.chalice_stage,
resource_name)
cwe = models.CloudWatchEvent(
resource_name=resource_name,
rule_name=rule_name,
event_pattern=json.dumps(event_source.event_pattern),
lambda_function=lambda_function,
)
return cwe
def _create_scheduled_model(self,
config, # type: Config
deployment, # type: models.DeploymentPackage
event_source, # type: app.ScheduledEventConfig
stage_name, # type: str
):
# type: (...) -> models.ScheduledEvent
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=event_source.name,
handler_name=event_source.handler_string, stage_name=stage_name
)
# Resource names must be unique across a chalice app.
# However, in the original deployer code, the cloudwatch
# event + lambda function was considered a single resource.
# Now that they're treated as two separate resources we need
# a unique name for the event_source that's not the lambda
# function resource name. We handle this by just appending
# '-event' to the name. Ideally this is handled in app.py
# but we won't be able to do that until the old deployer
# is gone.
resource_name = event_source.name + '-event'
if isinstance(event_source.schedule_expression,
app.ScheduleExpression):
expression = event_source.schedule_expression.to_string()
else:
expression = event_source.schedule_expression
rule_name = '%s-%s-%s' % (config.app_name, config.chalice_stage,
resource_name)
scheduled_event = models.ScheduledEvent(
resource_name=resource_name,
rule_name=rule_name,
rule_description=event_source.description,
schedule_expression=expression,
lambda_function=lambda_function,
)
return scheduled_event
def _create_domain_name_model(self,
protocol, # type: models.APIType
data, # type: StrMapAny
endpoint_type, # type: str
api_mapping # type: models.APIMapping
):
# type: (...) -> models.DomainName
default_name = 'api_gateway_custom_domain'
resource_name_map = {
'HTTP': default_name,
'WEBSOCKET': 'websocket_api_custom_domain'
} # type: Dict[str, str]
domain_name = models.DomainName(
protocol=protocol,
resource_name=resource_name_map.get(protocol.value, default_name),
domain_name=data['domain_name'],
tls_version=models.TLSVersion.create(data.get('tls_version', '')),
certificate_arn=data['certificate_arn'],
tags=data.get('tags'),
api_mapping=api_mapping
)
return domain_name
def _create_lambda_model(self,
config, # type: Config
deployment, # type: models.DeploymentPackage
name, # type: str
handler_name, # type: str
stage_name, # type: str
):
# type: (...) -> models.LambdaFunction
new_config = config.scope(
chalice_stage=config.chalice_stage,
function_name=name
)
role = self._get_role_reference(
new_config, stage_name, name)
resource = self._build_lambda_function(
new_config, name, handler_name,
deployment, role
)
return resource
def _get_managed_lambda_layer(self, config):
# type: (Config) -> Optional[models.LambdaLayer]
if not config.automatic_layer:
return None
if self._managed_layer is None:
self._managed_layer = models.LambdaLayer(
resource_name='managed-layer',
layer_name='%s-%s-%s' % (
config.app_name, config.chalice_stage, 'managed-layer'),
runtime=config.lambda_python_version,
deployment_package=models.DeploymentPackage(
models.Placeholder.BUILD_STAGE)
)
return self._managed_layer
def _get_role_reference(self, config, stage_name, function_name):
# type: (Config, str, str) -> models.IAMRole
role = self._create_role_reference(config, stage_name, function_name)
role_identifier = self._get_role_identifier(role)
if role_identifier in self._known_roles:
# If we've already create a models.IAMRole with the same
# identifier, we'll use the existing object instead of
# creating a new one.
return self._known_roles[role_identifier]
self._known_roles[role_identifier] = role
return role
def _get_role_identifier(self, role):
# type: (models.IAMRole) -> str
if isinstance(role, models.PreCreatedIAMRole):
return role.role_arn
# We know that if it's not a PreCreatedIAMRole, it's
# a managed role, so we're using cast() to make mypy happy.
role = cast(models.ManagedIAMRole, role)
return role.resource_name
def _create_role_reference(self, config, stage_name, function_name):
# type: (Config, str, str) -> models.IAMRole
# First option, the user doesn't want us to manage
# the role at all.
if not config.manage_iam_role:
# We've already validated the iam_role_arn is provided
# if manage_iam_role is set to False.
return models.PreCreatedIAMRole(
role_arn=config.iam_role_arn,
)
policy = models.IAMPolicy(document=models.Placeholder.BUILD_STAGE)
if not config.autogen_policy:
resource_name = '%s_role' % function_name
role_name = '%s-%s-%s' % (config.app_name, stage_name,
function_name)
if config.iam_policy_file is not None:
filename = os.path.join(config.project_dir,
'.chalice',
config.iam_policy_file)
else:
filename = os.path.join(config.project_dir,
'.chalice',
'policy-%s.json' % stage_name)
policy = models.FileBasedIAMPolicy(
filename=filename, document=models.Placeholder.BUILD_STAGE)
else:
resource_name = 'default-role'
role_name = '%s-%s' % (config.app_name, stage_name)
policy = models.AutoGenIAMPolicy(
document=models.Placeholder.BUILD_STAGE,
traits=set([]),
)
return models.ManagedIAMRole(
resource_name=resource_name,
role_name=role_name,
trust_policy=LAMBDA_TRUST_POLICY,
policy=policy,
)
def _get_vpc_params(self, function_name, config):
# type: (str, Config) -> Tuple[List[str], List[str]]
security_group_ids = config.security_group_ids
subnet_ids = config.subnet_ids
if security_group_ids and subnet_ids:
return security_group_ids, subnet_ids
elif not security_group_ids and not subnet_ids:
return [], []
else:
raise ChaliceBuildError(
"Invalid VPC params for function '%s', in order to configure "
"VPC for a Lambda function, you must provide the subnet_ids "
"as well as the security_group_ids, got subnet_ids: %s, "
"security_group_ids: %s" % (function_name,
subnet_ids,
security_group_ids)
)
def _get_lambda_layers(self, config):
# type: (Config) -> List[str]
layers = config.layers
return layers if layers else []
def _build_lambda_function(self,
config, # type: Config
name, # type: str
handler_name, # type: str
deployment, # type: models.DeploymentPackage
role, # type: models.IAMRole
):
# type: (...) -> models.LambdaFunction
function_name = '%s-%s-%s' % (
config.app_name, config.chalice_stage, name)
security_group_ids, subnet_ids = self._get_vpc_params(name, config)
lambda_layers = self._get_lambda_layers(config)
function = models.LambdaFunction(
resource_name=name,
function_name=function_name,
environment_variables=config.environment_variables,
runtime=config.lambda_python_version,
handler=handler_name,
tags=config.tags,
timeout=config.lambda_timeout,
memory_size=config.lambda_memory_size,
deployment_package=deployment,
role=role,
security_group_ids=security_group_ids,
subnet_ids=subnet_ids,
reserved_concurrency=config.reserved_concurrency,
layers=lambda_layers,
managed_layer=self._get_managed_lambda_layer(config),
xray=config.xray_enabled,
)
self._inject_role_traits(function, role)
return function
def _inject_role_traits(self, function, role):
# type: (models.LambdaFunction, models.IAMRole) -> None
if not isinstance(role, models.ManagedIAMRole):
return
policy = role.policy
if not isinstance(policy, models.AutoGenIAMPolicy):
return
if function.security_group_ids and function.subnet_ids:
policy.traits.add(models.RoleTraits.VPC_NEEDED)
def _create_bucket_notification(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
s3_event, # type: app.S3EventConfig
stage_name, # type: str
):
# type: (...) -> models.S3BucketNotification
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=s3_event.name,
handler_name=s3_event.handler_string, stage_name=stage_name
)
resource_name = s3_event.name + '-s3event'
s3_bucket = models.S3BucketNotification(
resource_name=resource_name,
bucket=s3_event.bucket,
prefix=s3_event.prefix,
suffix=s3_event.suffix,
events=s3_event.events,
lambda_function=lambda_function,
)
return s3_bucket
def _create_sns_subscription(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
sns_config, # type: app.SNSEventConfig
stage_name, # type: str
):
# type: (...) -> models.SNSLambdaSubscription
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=sns_config.name,
handler_name=sns_config.handler_string, stage_name=stage_name
)
resource_name = sns_config.name + '-sns-subscription'
sns_subscription = models.SNSLambdaSubscription(
resource_name=resource_name,
topic=sns_config.topic,
lambda_function=lambda_function,
)
return sns_subscription
def _create_sqs_subscription(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
sqs_config, # type: app.SQSEventConfig
stage_name, # type: str
):
# type: (...) -> models.SQSEventSource
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=sqs_config.name,
handler_name=sqs_config.handler_string, stage_name=stage_name
)
resource_name = sqs_config.name + '-sqs-event-source'
queue = '' # type: Union[str, models.QueueARN]
if sqs_config.queue_arn is not None:
queue = models.QueueARN(arn=sqs_config.queue_arn)
elif sqs_config.queue is not None:
queue = sqs_config.queue
batch_window = sqs_config.maximum_batching_window_in_seconds
sqs_event_source = models.SQSEventSource(
resource_name=resource_name,
queue=queue,
batch_size=sqs_config.batch_size,
lambda_function=lambda_function,
maximum_batching_window_in_seconds=batch_window,
)
return sqs_event_source
def _create_kinesis_subscription(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
kinesis_config, # type: app.KinesisEventConfig
stage_name, # type: str
):
# type: (...) -> models.KinesisEventSource
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=kinesis_config.name,
handler_name=kinesis_config.handler_string, stage_name=stage_name
)
resource_name = kinesis_config.name + '-kinesis-event-source'
batch_window = kinesis_config.maximum_batching_window_in_seconds
kinesis_event_source = models.KinesisEventSource(
resource_name=resource_name,
stream=kinesis_config.stream,
batch_size=kinesis_config.batch_size,
maximum_batching_window_in_seconds=batch_window,
starting_position=kinesis_config.starting_position,
lambda_function=lambda_function,
)
return kinesis_event_source
def _create_ddb_subscription(
self,
config, # type: Config
deployment, # type: models.DeploymentPackage
ddb_config, # type: app.DynamoDBEventConfig
stage_name, # type: str
):
# type: (...) -> models.DynamoDBEventSource
lambda_function = self._create_lambda_model(
config=config, deployment=deployment, name=ddb_config.name,
handler_name=ddb_config.handler_string, stage_name=stage_name
)
resource_name = ddb_config.name + '-dynamodb-event-source'
batch_window = ddb_config.maximum_batching_window_in_seconds
ddb_event_source = models.DynamoDBEventSource(
resource_name=resource_name,
stream_arn=ddb_config.stream_arn,
batch_size=ddb_config.batch_size,
maximum_batching_window_in_seconds=batch_window,
starting_position=ddb_config.starting_position,
lambda_function=lambda_function,
)
return ddb_event_source
class DependencyBuilder(object):
def __init__(self):
# type: () -> None
pass
def build_dependencies(self, graph):
# type: (models.Model) -> List[models.Model]
seen = set() # type: Set[int]
ordered = [] # type: List[models.Model]
for resource in graph.dependencies():
self._traverse(resource, ordered, seen)
return ordered
def _traverse(self, resource, ordered, seen):
# type: (models.Model, List[models.Model], Set[int]) -> None
for dep in resource.dependencies():
if id(dep) not in seen:
seen.add(id(dep))
self._traverse(dep, ordered, seen)
# If recreating this list is a perf issue later on,
# we can create yet-another set of ids that gets updated
# when we add a resource to the ordered list.
if id(resource) not in [id(r) for r in ordered]:
ordered.append(resource)
class GraphPrettyPrint(object):
_NEW_SECTION = u'\u251c\u2500\u2500'
_LINE_VERTICAL = u'\u2502'
def __init__(self, ui):
# type: (UI) -> None
self._ui = ui
def display_graph(self, graph):
# type: (models.Model) -> None
self._ui.write("Application\n")
for model in graph.dependencies():
self._traverse(model, level=0)
def _traverse(self, graph, level):
# type: (models.Model, int) -> None
prefix = ('%s ' % self._LINE_VERTICAL) * level
spaces = prefix + self._NEW_SECTION + ' '
model_text = self._get_model_text(graph, spaces, level)
current_line = cast(str, '%s%s\n' % (spaces, model_text))
self._ui.write(current_line)
for model in graph.dependencies():
self._traverse(model, level + 1)
def _get_model_text(self, model, spaces, level):
# type: (models.Model, Text, int) -> Text
name = model.__class__.__name__
filtered = self._get_filtered_params(model)
if not filtered:
return '%s()' % name
total_len_prefix = len(spaces) + len(name) + 1
prefix = ('%s ' % self._LINE_VERTICAL) * (level + 2)
full = '%s%s' % (prefix, ' ' * (total_len_prefix - len(prefix)))
param_items = list(filtered.items())
first = param_items[0]
remaining = param_items[1:]
lines = ['%s(%s=%s,' % (name, first[0], first[1])]
self._add_remaining_lines(lines, remaining, full)
return '\n'.join(lines) + ')'
def _add_remaining_lines(self, lines, remaining, full):
# type: (List[str], List[Tuple[str, Any]], Text) -> None
for key, value in remaining:
if isinstance(value, (list, dict)):
value = key.upper()
current = cast(str, '%s%s=%s,' % (full, key, value))
lines.append(current)
def _get_filtered_params(self, model):
# type: (models.Model) -> StrMapAny
dependencies = model.dependencies()
filtered = asdict(
model, filter=lambda _, v: v not in dependencies and v)
return filtered
|
tests/unit/small_text/utils/test_clustering.py | chschroeder/small-text | 218 | 11065293 | import unittest
import numpy as np
from unittest.mock import patch
from small_text.utils.clustering import init_kmeans_plusplus_safe
class ClusteringTest(unittest.TestCase):
@patch('small_text.utils.clustering.warnings.warn')
@patch('small_text.utils.clustering.choice')
@patch('small_text.utils.clustering.kmeans_plusplus')
def test_init_kmeans_plusplus_safe_normal(self, kmeans_plusplus_mock, choice_mock, warn_mock):
X = np.random.rand(100, 10)
result_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 42, 99])
kmeans_plusplus_mock.return_value = [X[result_indices], result_indices]
centers, indices = init_kmeans_plusplus_safe(X, 10)
kmeans_plusplus_mock.assert_called()
choice_mock.assert_not_called()
warn_mock.assert_not_called()
self.assertEqual((10, 10), centers.shape)
self.assertEqual(10, indices.shape[0])
@patch('small_text.utils.clustering.warnings.warn')
@patch('small_text.utils.clustering.choice', wraps=np.random.choice)
@patch('small_text.utils.clustering.kmeans_plusplus')
def test_init_kmeans_plusplus_safe_duplicate_indices(self, kmeans_plusplus_mock, choice_mock,
warn_mock):
X = np.random.rand(100, 10)
result_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 42, 42]) # 42 is not unique here
kmeans_plusplus_mock.return_value = [X[result_indices], result_indices]
centers, indices = init_kmeans_plusplus_safe(X, 10)
kmeans_plusplus_mock.assert_called()
choice_mock.assert_called()
warn_mock.assert_called()
self.assertEqual((10, 10), centers.shape)
self.assertEqual(10, indices.shape[0])
@patch('small_text.utils.clustering.warnings.warn')
@patch('small_text.utils.clustering.choice', wraps=np.random.choice)
@patch('small_text.utils.clustering.kmeans_plusplus')
def test_init_kmeans_plusplus_safe_duplicate_indices_warning_suppressed(
self, kmeans_plusplus_mock, choice_mock, warn_mock):
X = np.random.rand(100, 10)
result_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 42, 42]) # 42 is not unique here
kmeans_plusplus_mock.return_value = [X[result_indices], result_indices]
centers, indices = init_kmeans_plusplus_safe(X, 10, suppress_warning=True)
kmeans_plusplus_mock.assert_called()
choice_mock.assert_called()
warn_mock.assert_not_called()
self.assertEqual((10, 10), centers.shape)
self.assertEqual(10, indices.shape[0])
|
detectron2/modeling/meta_arch/rcnn.py | Nour-7/detectron2 | 171 | 11065300 | <filename>detectron2/modeling/meta_arch/rcnn.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
from typing import Optional, Tuple
import torch
from torch import nn
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import Backbone, build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
NOTE: this interface is experimental.
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element,
representing the per-channel mean and std to be used to normalize
the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1))
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs, image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
|
packs/qualys/actions/get_host_range.py | userlocalhost2000/st2contrib | 164 | 11065322 | <reponame>userlocalhost2000/st2contrib
from lib.base import QualysBaseAction
__all__ = [
'GetHostRangeAction'
]
class GetHostRangeAction(QualysBaseAction):
def run(self, host_start, host_end):
host = self.connection.getHostRange(host_start, host_end)
return self.resultsets.formatter(host)
|
docarray/array/mixins/match.py | qdrant/docarray | 591 | 11065330 | from typing import Optional, Union, Callable, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
import numpy as np
from ...typing import ArrayType
from ... import DocumentArray
class MatchMixin:
"""A mixin that provides match functionality to DocumentArrays"""
def match(
self,
darray: 'DocumentArray',
metric: Union[
str, Callable[['ArrayType', 'ArrayType'], 'np.ndarray']
] = 'cosine',
limit: Optional[Union[int, float]] = 20,
normalization: Optional[Tuple[float, float]] = None,
metric_name: Optional[str] = None,
batch_size: Optional[int] = None,
exclude_self: bool = False,
only_id: bool = False,
use_scipy: bool = False,
device: str = 'cpu',
num_worker: Optional[int] = 1,
**kwargs,
) -> None:
"""Compute embedding based nearest neighbour in `another` for each Document in `self`,
and store results in `matches`.
.. note::
'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.
You can use other distance metric provided by ``scipy``, such as `braycurtis`, `canberra`, `chebyshev`,
`cityblock`, `correlation`, `cosine`, `dice`, `euclidean`, `hamming`, `jaccard`, `jensenshannon`,
`kulsinski`, `mahalanobis`, `matching`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`,
`sokalmichener`, `sokalsneath`, `sqeuclidean`, `wminkowski`, `yule`.
To use scipy metric, please set ``use_scipy=True``.
- To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``
- To invert the distance as score and make all values in range [0, 1],
use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.
- If a custom metric distance is provided. Make sure that it returns scores as distances and not similarity, meaning the smaller the better.
:param darray: the other DocumentArray to match against
:param metric: the distance metric
:param limit: the maximum number of matches, when not given defaults to 20.
:param normalization: a tuple [a, b] to be used with min-max normalization,
the min distance will be rescaled to `a`, the max distance will be rescaled to `b`
all values will be rescaled into range `[a, b]`.
:param metric_name: if provided, then match result will be marked with this string.
:param batch_size: if provided, then ``darray`` is loaded in batches, where each of them is at most ``batch_size``
elements. When `darray` is big, this can significantly speedup the computation.
:param exclude_self: if set, Documents in ``darray`` with same ``id`` as the left-hand values will not be
considered as matches.
:param only_id: if set, then returning matches will only contain ``id``
:param use_scipy: if set, use ``scipy`` as the computation backend. Note, ``scipy`` does not support distance
on sparse matrix.
:param device: the computational device for ``.match()``, can be either `cpu` or `cuda`.
:param num_worker: the number of parallel workers. If not given, then the number of CPUs in the system will be used.
.. note::
This argument is only effective when ``batch_size`` is set.
:param kwargs: other kwargs.
"""
if not (self and darray):
return
for d in self:
d.matches.clear()
match_docs = darray.find(
self,
metric=metric,
limit=limit,
normalization=normalization,
metric_name=metric_name,
batch_size=batch_size,
exclude_self=exclude_self,
only_id=only_id,
use_scipy=use_scipy,
device=device,
num_worker=num_worker,
)
if not isinstance(match_docs, list):
match_docs = [match_docs]
for m, d in zip(match_docs, self):
d.matches = m
|
src/lib/tracker/basetrack.py | EvelynYihuiYang/MCMOT | 306 | 11065333 | # encoding=utf-8
import numpy as np
from collections import defaultdict
from collections import OrderedDict
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
Removed = 3
# TODO: Create a multi-object class BaseTrack class
class MCBaseTrack(object):
_count_dict = defaultdict(int) # the MCBaseTrack class owns this dict
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id(cls_id):
MCBaseTrack._count_dict[cls_id] += 1
return MCBaseTrack._count_dict[cls_id]
# @even: reset track id
@staticmethod
def init_count(num_classes):
"""
Initiate _count for all object classes
:param num_classes:
"""
for cls_id in range(num_classes):
MCBaseTrack._count_dict[cls_id] = 0
@staticmethod
def reset_track_count(cls_id):
MCBaseTrack._count_dict[cls_id] = 0
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
# @even: 重置track id
@staticmethod
def reset_track_count():
BaseTrack._count = 0
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed
|
onmt/inputters/datareader_base.py | l-k-11235/OpenNMT-py | 5,864 | 11065354 | # coding: utf-8
# several data readers need optional dependencies. There's no
# appropriate builtin exception
class MissingDependencyException(Exception):
pass
class DataReaderBase(object):
"""Read data from file system and yield as dicts.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: A number
of DataReaders need specific additional packages.
If any are missing, this will be raised.
"""
@classmethod
def from_opt(cls, opt):
"""Alternative constructor.
Args:
opt (argparse.Namespace): The parsed arguments.
"""
return cls()
@classmethod
def _read_file(cls, path):
"""Line-by-line read a file as bytes."""
with open(path, "rb") as f:
for line in f:
yield line
@staticmethod
def _raise_missing_dep(*missing_deps):
"""Raise missing dep exception with standard error message."""
raise MissingDependencyException(
"Could not create reader. Be sure to install "
"the following dependencies: " + ", ".join(missing_deps))
def read(self, data, side):
"""Read data from file system and yield as dicts."""
raise NotImplementedError()
|
MangoFuzz/fuzzer/blenders/string_blender.py | fengjixuchui/difuze | 347 | 11065367 | from blender import Blender
from ..utils import *
import random
class StringBlender(Blender):
"""
String Blender.
This generates random string of provided size.
"""
supported_types = ["String"]
def __init__(self, engine_obj):
super(StringBlender, self).__init__(engine_obj)
thick_peel("Created a StringBlender")
def getSupportedTypes(self):
return list(StringBlender.supported_types)
def blend(self, old_data, *additional_data):
# this guy expects the number of bytes to be one
# of the argument
if len(additional_data) > 0:
num_bytes = int(additional_data[0])
else:
num_bytes = random.randint(1, 1024)
thick_peel("Called StringBlender without size, using:%d", num_bytes)
# generate random string.
to_ret = ''
for x in range(num_bytes):
to_ret += chr(random.randint(0, 0xff))
return to_ret
def canHandle(self, target_type):
return target_type in StringBlender.supported_types
|
GreedyInfoMax/audio/models/loss_supervised_phones.py | weiaicunzai/pytorch-faster-rcnn | 288 | 11065369 | <reponame>weiaicunzai/pytorch-faster-rcnn
import torch.nn as nn
import torch
from GreedyInfoMax.audio.data import phone_dict
from GreedyInfoMax.audio.models import loss
class Phones_Loss(loss.Loss):
def __init__(self, opt, hidden_dim, calc_accuracy):
super(Phones_Loss, self).__init__()
self.opt = opt
self.phone_dict = phone_dict.load_phone_dict(opt)
self.hidden_dim = hidden_dim
self.calc_accuracy = calc_accuracy
# create linear classifier
self.linear_classifier = nn.Sequential(
nn.Linear(self.hidden_dim, 41)
).to(self.opt.device) # 41 different phones to differentiate
self.phones_loss = nn.CrossEntropyLoss()
self.label_num = 128
def get_loss(self, x, z, c, filename, start_idx):
total_loss, accuracies = self.calc_supervised_phones_loss(
c, filename, start_idx
)
return total_loss, accuracies
def calc_supervised_phones_loss(self, c, filename, start_idx):
"""
Calculates the loss for fully supervised training using the provided phones labels.
Since there are labels for every 10ms of input, we need to downscale the output of
the trained layer to 128 values first, which is done by maxpooling.
:param c: output of the layer to be trained
:param filename: filenames of the current files in the batch
:param start_idx: idx within the audio-files for the current files in the batch
:return: loss and accuracy
"""
targets = torch.zeros(self.opt.batch_size, self.label_num ).long()
for idx, cur_audio_idx in enumerate(start_idx):
targets[idx, :] = torch.LongTensor(
self.phone_dict[filename[idx]][
(cur_audio_idx - 80) // 160 : (cur_audio_idx - 80 + 20480) / 160
]
)
targets = targets.to(self.opt.device).reshape(-1)
# forward pass
c = c.permute(0, 2, 1)
pooled_c = nn.functional.adaptive_avg_pool1d(c, self.label_num)
pooled_c = pooled_c.permute(0, 2, 1).reshape(-1, self.hidden_dim)
phones_out = self.linear_classifier(pooled_c)
loss = self.phones_loss(phones_out, targets)
accuracy = torch.zeros(1)
# calculate accuracy
if self.calc_accuracy:
_, predicted = torch.max(phones_out.data, 1)
total = targets.size(0)
correct = (predicted == targets).sum().item()
accuracy[0] = correct / total
return loss, accuracy
|
model.py | ishit/xfields | 137 | 11065395 |
import tensorflow as tf
from tensorlayer.layers import PadLayer,Conv2d,UpSampling2dLayer,InputLayer,ConcatLayer
import numpy as np
def upsampling_factor_padding(h_res,w_res):
res_temp = h_res
py =[res_temp%2]
while res_temp!=1:
res_temp = res_temp//2
py.append(res_temp%2)
del py[-1]
py = np.flip(py)
res_temp = w_res
px =[res_temp%2]
while res_temp!=1:
res_temp = res_temp//2
px.append(res_temp%2)
del px[-1]
px = np.flip(px)
lx = len(px)
ly = len(py)
up_x = 2*np.ones((lx))
up_y = 2*np.ones((ly))
if lx > ly:
py = np.append(py,[0]*(lx-ly))
up_y = np.append(up_y,[1]*(lx-ly))
if ly > lx:
px = np.append(px,[0]*(ly-lx))
up_x = np.append(up_x,[1]*(ly-lx))
return px,py,up_x,up_y
def gen_flow(batch_input,num_out):
padding_d = [[0,0],[1,1],[1,1],[0,0]]
batch_input = PadLayer(batch_input,padding_d,"REFLECT")
network = Conv2d(batch_input, n_filter=num_out, filter_size=(3, 3),strides=(1, 1), act = tf.tanh, padding='VALID',W_init=tf.random_normal_initializer(0, 0.02),b_init = tf.constant_initializer(value=0.0))
return network.outputs
def conv_layer(batch_input, out_channels,padding_d,fs):
batch_input = PadLayer(batch_input,padding_d,"REFLECT")
network = Conv2d(batch_input, n_filter=out_channels, filter_size=(fs,fs),strides=(1, 1), act=tf.nn.leaky_relu, padding='VALID',W_init=tf.random_normal_initializer(0, 0.02),b_init = tf.constant_initializer(value=0.0))
return network
def Flow(input_coordinates,h_res,w_res,num_out,ngf,min_,max_):
# we calculated the amount of padding for each layer and
# the total number of upsampling in each dimension to output the resolution h_res*w_res.
padx,pady,up_x,up_y = upsampling_factor_padding(h_res,w_res)
num_l = len(padx)
layer_specs = [ngf*16, ngf*16 , ngf*16 , ngf*8 , ngf*8 , ngf*8 , ngf*4 ]
layer_specs.extend([ngf*4]*(num_l-len(layer_specs)))
# coordconv layer
coordconv = tf.constant([[[[min_, min_],
[max_, min_]],
[[min_, max_],
[max_, max_]]]],dtype=tf.float32)
coordconv_tl = InputLayer(tf.tile(coordconv,[input_coordinates.shape[0],1,1,1]))
output = InputLayer(input_coordinates)
for num,num_filter in enumerate(layer_specs):
with tf.variable_scope("layer_%d" % (num)):
upsampled = UpSampling2dLayer(output,(up_y[num],up_x[num]),True,0,True)
if num == 0:
padding = [[0,0],[0,pady[num]],[0,padx[num]],[0,0]]
output = conv_layer(upsampled,num_filter,padding,1)
coordconv_tl = PadLayer(coordconv_tl,padding,"REFLECT")
# concatenating the coordconv layer
output = ConcatLayer([output,coordconv_tl],-1)
else:
padding = [[0,0],[1,1 + pady[num]],[1,1 + padx[num]],[0,0]]
output = conv_layer(upsampled,num_filter,padding,3)
with tf.variable_scope("outputs_flows"):
flows = gen_flow(output,num_out)
return flows
|
gratipay/models/country.py | kant/gratipay.com | 517 | 11065408 | from __future__ import absolute_import, division, print_function, unicode_literals
from postgres.orm import Model
class Country(Model):
"""Represent country records from our database (read-only).
:var int id: the record's primary key in our ``countries`` table
:var unicode code: the country's `ISO 3166-1 alpha-2`_ code
.. _ISO 3166-1 alpha-2 : https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
"""
typname = 'countries'
@classmethod
def from_code(cls, code):
return cls.db.one("SELECT countries.*::countries FROM countries WHERE code=%s", (code,))
|
test.py | DLPerf/PointASNL | 209 | 11065413 | import tensorflow as tf
import numpy as np
import argparse
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import modelnet_dataset
import provider
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--data', default='data/modelnet40_normal_resampled/', help='Data path')
parser.add_argument('--model', default='pointasnl_cls', help='Model name. [default: pointasnl_cls]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [1024/512/256/128/64] [default: 1024]')
parser.add_argument('--model_path', required=True, help='Model checkpoint file path')
parser.add_argument('--dump_dir', default='log/dump/', help='Dump folder path [dump]')
parser.add_argument('--normal', type=str, default='True', help='Whether use normal information')
parser.add_argument('--num_votes', type=int, default=5, help='Aggregate classification scores from multiple test [default: 5]')
parser.add_argument('--AS', action='store_true', help='Whether use adaptive sampling [default: False]')
parser.add_argument('--noise', action='store_true', help='Noisy Point Number [1/10/50/100]')
FLAGS = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
NOISE_POINT = [1,10,50,100]
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MODEL_PATH = FLAGS.model_path
GPU_INDEX = FLAGS.gpu
MODEL = importlib.import_module(FLAGS.model) # import network module
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
NUM_CLASSES = 40
SHAPE_NAMES = [line.rstrip() for line in open(os.path.join(FLAGS.data,'modelnet40_shape_names.txt'))]
HOSTNAME = socket.gethostname()
# Official train/test split
assert (NUM_POINT <= 10000)
DATA_PATH = FLAGS.data
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test',
normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate(num_votes):
with tf.device('/gpu:0'):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, use_normal=FLAGS.normal)
is_training_pl = tf.placeholder(tf.bool, shape=())
# simple model
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, use_normal=FLAGS.normal, adaptive_sample=FLAGS.AS)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': total_loss}
log_string('*** Evaluation ***')
acc = eval_one_epoch(sess, ops, num_votes)
if FLAGS.noise:
noise_acc = []
txt = 'Noise Accuracy\n'
txt += ' 000 %.3f\n' %acc
for noise_num in NOISE_POINT:
log_string('\n*** Evaluation with %d Noisy Points ***' % noise_num)
tem_acc = (eval_one_epoch(sess, ops, num_votes, NUM_NOISY_POINT=noise_num))
noise_acc.append(tem_acc)
txt += ' %03d %.3f\n' % (noise_num, tem_acc)
log_string(txt)
def eval_one_epoch(sess, ops, num_votes=1, NUM_NOISY_POINT=0):
is_training = False
# Make sure batch data is of same size
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
num_batch = int(len(TEST_DATASET) / BATCH_SIZE)
total_correct = 0
total_object = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
with tqdm(total=num_batch) as pbar:
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch()
# for the last batch in the epoch, the bsize:end are from last batch
bsize = batch_data.shape[0]
# noisy robustness
if NUM_NOISY_POINT > 0:
noisy_point = np.random.random((bsize, NUM_NOISY_POINT, 3))
noisy_point = provider.normalize_data(noisy_point)
batch_data[:bsize, :NUM_NOISY_POINT, :3] = noisy_point
loss_vote = 0
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes
for vote_idx in range(num_votes):
# Shuffle point order to achieve different farthest samplings
shuffled_indices = np.arange(NUM_POINT)
np.random.shuffle(shuffled_indices)
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict)
batch_pred_sum += pred_val
loss_vote += loss_val
loss_vote /= num_votes
pred_val = np.argmax(batch_pred_sum, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_vote
batch_idx += 1
total_object += BATCH_SIZE
for i in range(bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
pbar.update(1)
log_string('Eval mean loss: %f' % (loss_sum / float(total_object)))
log_string('Eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('Eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
for i, name in enumerate(SHAPE_NAMES):
log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
TEST_DATASET.reset()
return total_correct / float(total_seen)
if __name__=='__main__':
with tf.Graph().as_default():
evaluate(num_votes=FLAGS.num_votes)
LOG_FOUT.close()
|
mayan/apps/sources/tests/__init__.py | atitaya1412/Mayan-EDMS | 336 | 11065416 | <filename>mayan/apps/sources/tests/__init__.py<gh_stars>100-1000
from .source_backends import ( # NOQA
SourceBackendSimple, SourceBackendTestPeriodic, SourceBackendTestEmail
)
|
knowledge/admin.py | bitmazk/django-knowledge | 199 | 11065424 | <filename>knowledge/admin.py<gh_stars>100-1000
from django.contrib import admin
from knowledge.models import Question, Response, Category
class CategoryAdmin(admin.ModelAdmin):
list_display = [f.name for f in Category._meta.fields]
prepopulated_fields = {'slug': ('title', )}
admin.site.register(Category, CategoryAdmin)
class QuestionAdmin(admin.ModelAdmin):
list_display = [f.name for f in Question._meta.fields]
list_select_related = True
raw_id_fields = ['user']
admin.site.register(Question, QuestionAdmin)
class ResponseAdmin(admin.ModelAdmin):
list_display = [f.name for f in Response._meta.fields]
list_select_related = True
raw_id_fields = ['user', 'question']
admin.site.register(Response, ResponseAdmin)
|
modules/dbnd-airflow-monitor/src/airflow_monitor/airflow_monitor_utils.py | busunkim96/dbnd | 224 | 11065428 | import logging
from airflow_monitor.common.metric_reporter import METRIC_REPORTER
logger = logging.getLogger(__name__)
def log_received_tasks(url, fetched_data):
if not fetched_data:
return
try:
d = sorted(
[
(k, len(v))
for k, v in fetched_data.items()
if hasattr(v, "__len__") and not isinstance(v, str)
]
)
if "since" in fetched_data:
d.append(("since", fetched_data["since"]))
logger.info(
"Received data from %s with: {%s}",
url,
", ".join(["{}: {}".format(k, v) for k, v in d]),
)
except Exception as e:
logging.warning("Could not log received data. %s", e)
def send_metrics(airflow_instance_label, fetched_data):
if not fetched_data:
return
try:
metrics = fetched_data.get("metrics")
logger.debug("Received metrics from airflow plugin: %s", metrics)
observe_many(
airflow_instance_label,
METRIC_REPORTER.performance,
metrics.get("performance", None),
)
observe_many(
airflow_instance_label, METRIC_REPORTER.sizes, metrics.get("sizes", None)
)
except Exception as e:
logger.error("Failed to send plugin metrics. %s", e)
def observe_many(airflow_instance_label, summary, data):
if not data:
return
for metric_name, value in data.items():
summary.labels(airflow_instance_label, metric_name.lstrip("_"),).observe(value)
|
lib/config/config.py | SunwungLee/Faster-RCNN-TensorFlow-Python3 | 433 | 11065434 | <reponame>SunwungLee/Faster-RCNN-TensorFlow-Python3
import os
import os.path as osp
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
FLAGS2 = {}
######################
# General Parameters #
######################
FLAGS2["pixel_means"] = np.array([[[102.9801, 115.9465, 122.7717]]])
tf.app.flags.DEFINE_integer('rng_seed', 3, "Tensorflow seed for reproducibility")
######################
# Network Parameters #
######################
tf.app.flags.DEFINE_string('network', "vgg16", "The network to be used as backbone")
#######################
# Training Parameters #
#######################
tf.app.flags.DEFINE_float('weight_decay', 0.0005, "Weight decay, for regularization")
tf.app.flags.DEFINE_float('learning_rate', 0.001, "Learning rate")
tf.app.flags.DEFINE_float('momentum', 0.9, "Momentum")
tf.app.flags.DEFINE_float('gamma', 0.1, "Factor for reducing the learning rate")
tf.app.flags.DEFINE_integer('batch_size', 256, "Network batch size during training")
tf.app.flags.DEFINE_integer('max_iters', 40000, "Max iteration")
tf.app.flags.DEFINE_integer('step_size', 30000, "Step size for reducing the learning rate, currently only support one step")
tf.app.flags.DEFINE_integer('display', 10, "Iteration intervals for showing the loss during training, on command line interface")
tf.app.flags.DEFINE_string('initializer', "truncated", "Network initialization parameters")
tf.app.flags.DEFINE_string('pretrained_model', "./data/imagenet_weights/vgg16.ckpt", "Pretrained network weights")
tf.app.flags.DEFINE_boolean('bias_decay', False, "Whether to have weight decay on bias as well")
tf.app.flags.DEFINE_boolean('double_bias', True, "Whether to double the learning rate for bias")
tf.app.flags.DEFINE_boolean('use_all_gt', True, "Whether to use all ground truth bounding boxes for training, "
"For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''")
tf.app.flags.DEFINE_integer('max_size', 1000, "Max pixel size of the longest side of a scaled input image")
tf.app.flags.DEFINE_integer('test_max_size', 1000, "Max pixel size of the longest side of a scaled input image")
tf.app.flags.DEFINE_integer('ims_per_batch', 1, "Images to use per minibatch")
tf.app.flags.DEFINE_integer('snapshot_iterations', 5000, "Iteration to take snapshot")
FLAGS2["scales"] = (600,)
FLAGS2["test_scales"] = (600,)
######################
# Testing Parameters #
######################
tf.app.flags.DEFINE_string('test_mode', "top", "Test mode for bbox proposal") # nms, top
##################
# RPN Parameters #
##################
tf.app.flags.DEFINE_float('rpn_negative_overlap', 0.3, "IOU < thresh: negative example")
tf.app.flags.DEFINE_float('rpn_positive_overlap', 0.7, "IOU >= thresh: positive example")
tf.app.flags.DEFINE_float('rpn_fg_fraction', 0.5, "Max number of foreground examples")
tf.app.flags.DEFINE_float('rpn_train_nms_thresh', 0.7, "NMS threshold used on RPN proposals")
tf.app.flags.DEFINE_float('rpn_test_nms_thresh', 0.7, "NMS threshold used on RPN proposals")
tf.app.flags.DEFINE_integer('rpn_train_pre_nms_top_n', 12000, "Number of top scoring boxes to keep before apply NMS to RPN proposals")
tf.app.flags.DEFINE_integer('rpn_train_post_nms_top_n', 2000, "Number of top scoring boxes to keep before apply NMS to RPN proposals")
tf.app.flags.DEFINE_integer('rpn_test_pre_nms_top_n', 6000, "Number of top scoring boxes to keep before apply NMS to RPN proposals")
tf.app.flags.DEFINE_integer('rpn_test_post_nms_top_n', 300, "Number of top scoring boxes to keep before apply NMS to RPN proposals")
tf.app.flags.DEFINE_integer('rpn_batchsize', 256, "Total number of examples")
tf.app.flags.DEFINE_integer('rpn_positive_weight', -1,
'Give the positive RPN examples weight of p * 1 / {num positives} and give negatives a weight of (1 - p).'
'Set to -1.0 to use uniform example weighting')
tf.app.flags.DEFINE_integer('rpn_top_n', 300, "Only useful when TEST.MODE is 'top', specifies the number of top proposals to select")
tf.app.flags.DEFINE_boolean('rpn_clobber_positives', False, "If an anchor satisfied by positive and negative conditions set to negative")
#######################
# Proposal Parameters #
#######################
tf.app.flags.DEFINE_float('proposal_fg_fraction', 0.25, "Fraction of minibatch that is labeled foreground (i.e. class > 0)")
tf.app.flags.DEFINE_boolean('proposal_use_gt', False, "Whether to add ground truth boxes to the pool when sampling regions")
###########################
# Bounding Box Parameters #
###########################
tf.app.flags.DEFINE_float('roi_fg_threshold', 0.5, "Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)")
tf.app.flags.DEFINE_float('roi_bg_threshold_high', 0.5, "Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI))")
tf.app.flags.DEFINE_float('roi_bg_threshold_low', 0.1, "Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI))")
tf.app.flags.DEFINE_boolean('bbox_normalize_targets_precomputed', True, "# Normalize the targets using 'precomputed' (or made up) means and stdevs (BBOX_NORMALIZE_TARGETS must also be True)")
tf.app.flags.DEFINE_boolean('test_bbox_reg', True, "Test using bounding-box regressors")
FLAGS2["bbox_inside_weights"] = (1.0, 1.0, 1.0, 1.0)
FLAGS2["bbox_normalize_means"] = (0.0, 0.0, 0.0, 0.0)
FLAGS2["bbox_normalize_stds"] = (0.1, 0.1, 0.1, 0.1)
##################
# ROI Parameters #
##################
tf.app.flags.DEFINE_integer('roi_pooling_size', 7, "Size of the pooled region after RoI pooling")
######################
# Dataset Parameters #
######################
FLAGS2["root_dir"] = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
FLAGS2["data_dir"] = osp.abspath(osp.join(FLAGS2["root_dir"], 'data'))
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(FLAGS2["root_dir"], FLAGS2["root_dir"] , 'default', imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
|
test_epoch.py | Co1lin/RfDNet | 143 | 11065450 | <reponame>Co1lin/RfDNet
# Testing functions.
# author: ynie
# date: April, 2020
from net_utils.utils import LossRecorder
from time import time
import torch
from net_utils.ap_helper import APCalculator
import numpy as np
def test_func(cfg, tester, test_loader):
'''
test function.
:param cfg: configuration file
:param tester: specific tester for networks
:param test_loader: dataloader for testing
:return:
'''
mode = cfg.config['mode']
batch_size = cfg.config[mode]['batch_size']
loss_recorder = LossRecorder(batch_size)
AP_IOU_THRESHOLDS = cfg.config[mode]['ap_iou_thresholds']
evaluate_mesh_mAP = True if cfg.config[mode]['phase'] == 'completion' and cfg.config['generation'][
'generate_mesh'] and cfg.config[mode]['evaluate_mesh_mAP'] else False
ap_calculator_list = [APCalculator(iou_thresh, cfg.dataset_config.class2type, evaluate_mesh_mAP) for iou_thresh in
AP_IOU_THRESHOLDS]
cfg.log_string('-'*100)
for iter, data in enumerate(test_loader):
loss, est_data = tester.test_step(data)
eval_dict = est_data[4]
for ap_calculator in ap_calculator_list:
ap_calculator.step(eval_dict['batch_pred_map_cls'], eval_dict['batch_gt_map_cls'])
# visualize intermediate results.
if cfg.config['generation']['dump_results']:
tester.visualize_step(mode, iter, data, est_data, eval_dict)
loss_recorder.update_loss(loss)
if ((iter + 1) % cfg.config['log']['print_step']) == 0:
cfg.log_string('Process: Phase: %s. Epoch %d: %d/%d. Current loss: %s.' % (
mode, 0, iter + 1, len(test_loader), str({key: np.mean(item) for key, item in loss.items()})))
return loss_recorder.loss_recorder, ap_calculator_list
def test(cfg, tester, test_loader):
'''
train epochs for network
:param cfg: configuration file
:param tester: specific tester for networks
:param test_loader: dataloader for testing
:return:
'''
cfg.log_string('-' * 100)
# set mode
mode = cfg.config['mode']
tester.net.train(mode == 'train')
start = time()
test_loss_recoder, ap_calculator_list = test_func(cfg, tester, test_loader)
cfg.log_string('Test time elapsed: (%f).' % (time()-start))
for key, test_loss in test_loss_recoder.items():
cfg.log_string('Test loss (%s): %f' % (key, test_loss.avg))
# Evaluate average precision
AP_IOU_THRESHOLDS = cfg.config[mode]['ap_iou_thresholds']
for i, ap_calculator in enumerate(ap_calculator_list):
cfg.log_string(('-'*10 + 'iou_thresh: %f' + '-'*10) % (AP_IOU_THRESHOLDS[i]))
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
cfg.log_string('eval %s: %f' % (key, metrics_dict[key])) |
mmdet/models/bbox_heads/bbox_head.py | jiangwenj02/SOLO | 1,467 | 11065456 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32,
multiclass_nms)
from ..builder import build_loss
from ..losses import accuracy
from ..registry import HEADS
@HEADS.register_module
class BBoxHead(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively"""
def __init__(self,
with_avg_pool=False,
with_cls=True,
with_reg=True,
roi_feat_size=7,
in_channels=256,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
super(BBoxHead, self).__init__()
assert with_cls or with_reg
self.with_avg_pool = with_avg_pool
self.with_cls = with_cls
self.with_reg = with_reg
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.num_classes = num_classes
self.target_means = target_means
self.target_stds = target_stds
self.reg_class_agnostic = reg_class_agnostic
self.fp16_enabled = False
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
if self.with_cls:
self.fc_cls = nn.Linear(in_channels, num_classes)
if self.with_reg:
out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
self.fc_reg = nn.Linear(in_channels, out_dim_reg)
self.debug_imgs = None
def init_weights(self):
if self.with_cls:
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
if self.with_reg:
nn.init.normal_(self.fc_reg.weight, 0, 0.001)
nn.init.constant_(self.fc_reg.bias, 0)
@auto_fp16()
def forward(self, x):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x) if self.with_cls else None
bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred
def get_target(self, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
reg_classes = 1 if self.reg_class_agnostic else self.num_classes
cls_reg_targets = bbox_target(
pos_proposals,
neg_proposals,
pos_gt_bboxes,
pos_gt_labels,
rcnn_train_cfg,
reg_classes,
target_means=self.target_means,
target_stds=self.target_stds)
return cls_reg_targets
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def loss(self,
cls_score,
bbox_pred,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
pos_inds = labels > 0
if pos_inds.any():
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(bbox_pred.size(0),
4)[pos_inds]
else:
pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,
4)[pos_inds,
labels[pos_inds]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds],
bbox_weights[pos_inds],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_det_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = torch.from_numpy(scale_factor).to(bboxes.device)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image. The first column is
the image id and the next 4 columns are x1, y1, x2, y2.
labels (Tensor): Shape (n*bs, ).
bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
Example:
>>> # xdoctest: +REQUIRES(module:kwarray)
>>> import kwarray
>>> import numpy as np
>>> from mmdet.core.bbox.demodata import random_boxes
>>> self = BBoxHead(reg_class_agnostic=True)
>>> n_roi = 2
>>> n_img = 4
>>> scale = 512
>>> rng = np.random.RandomState(0)
>>> img_metas = [{'img_shape': (scale, scale)}
... for _ in range(n_img)]
>>> # Create rois in the expected format
>>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
>>> img_ids = torch.randint(0, n_img, (n_roi,))
>>> img_ids = img_ids.float()
>>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
>>> # Create other args
>>> labels = torch.randint(0, 2, (n_roi,)).long()
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
>>> # For each image, pretend random positive boxes are gts
>>> is_label_pos = (labels.numpy() > 0).astype(np.int)
>>> lbl_per_img = kwarray.group_items(is_label_pos,
... img_ids.numpy())
>>> pos_per_img = [sum(lbl_per_img.get(gid, []))
... for gid in range(n_img)]
>>> pos_is_gts = [
>>> torch.randint(0, 2, (npos,)).byte().sort(
>>> descending=True)[0]
>>> for npos in pos_per_img
>>> ]
>>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
>>> pos_is_gts, img_metas)
>>> print(bboxes_list)
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() <= len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(rois[:, 0] == i).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
bbox_pred_ = bbox_preds[inds]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4)
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
if not self.reg_class_agnostic:
label = label * 4
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
assert bbox_pred.size(1) == 4
if rois.size(1) == 4:
new_rois = delta2bbox(rois, bbox_pred, self.target_means,
self.target_stds, img_meta['img_shape'])
else:
bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,
self.target_stds, img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
|
objectModel/Python/cdm/storage/adls.py | jocubeit/CDM | 265 | 11065477 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import base64
from collections import OrderedDict
from datetime import datetime
import hashlib
import hmac
from http import HTTPStatus
from time import mktime
import typing
from typing import List, Optional
from wsgiref.handlers import format_date_time
import json
import urllib
import urllib.parse
import msal
import dateutil.parser
from cdm.utilities import StorageUtils
from cdm.utilities.network.cdm_http_client import CdmHttpClient
from cdm.utilities.string_utils import StringUtils
from cdm.storage.network import NetworkAdapter
from cdm.enums.azure_cloud_endpoint import AzureCloudEndpoint
from .base import StorageAdapterBase
class ADLSAdapter(NetworkAdapter, StorageAdapterBase):
"""Azure Data Lake Storage Gen2 storage adapter"""
ADLS_DEFAULT_TIMEOUT = 9000
HTTP_DEFAULT_MAX_RESULTS = 5000
def __init__(self, hostname: Optional[str] = None, root: Optional[str] = None, **kwargs) -> None:
super().__init__()
super(NetworkAdapter, self).__init__()
super(StorageAdapterBase, self).__init__()
# --- internal ---
self._adapter_paths = {} # type: Dict[str, str]
self._root_blob_container = None # type: Optional[str]
self._http_authorization = 'Authorization'
self._http_client = CdmHttpClient() # type: CdmHttpClient
self._http_xms_continuation = 'x-ms-continuation'
self._http_xms_date = 'x-ms-date'
self._http_xms_version = 'x-ms-version'
self._http_xms_version = 'x-ms-version'
self._scope = ['https://storage.azure.com/.default'] # type: Optional[List[str]]
self._type = 'adls'
self._root = None
self._sas_token = None
self._unescaped_root_sub_path = None # type: Optional[str]
self._escaped_root_sub_path = None # type: Optional[str]
self._file_modified_time_cache = {} # type: Dict[str, datetime]
self.http_max_results = self.HTTP_DEFAULT_MAX_RESULTS # type: int
self.timeout = self.ADLS_DEFAULT_TIMEOUT # type: int
if root and hostname:
self.root = root # type: Optional[str]
self.hostname = hostname # type: Optional[str]
self.client_id = kwargs.get('client_id', None) # type: Optional[str]
self.secret = kwargs.get('secret', None) # type: Optional[str]
self.shared_key = kwargs.get('shared_key', None) # type: Optional[str]
self.sas_token = kwargs.get('sas_token', None) # type: Optional[str]
self.token_provider = kwargs.get('token_provider', None) # type: Optional[TokenProvider]
self.endpoint = kwargs.get('endpoint', AzureCloudEndpoint.AZURE_PUBLIC) # type: AzureCloudEndpoint
# --- internal ---
self._tenant = kwargs.get('tenant', None) # type: Optional[str]
self._auth_context = None
@property
def hostname(self) -> str:
return self._hostname
@hostname.setter
def hostname(self, value: str):
if StringUtils.is_null_or_white_space(value):
raise ValueError('Hostname cannot be null or whitespace.')
self._hostname = value
self._formatted_hostname = self._format_hostname(self.hostname)
self._formatted_hostname_no_protocol = self._format_hostname(self._remove_protocol_from_hostname(self._hostname))
@property
def root(self) -> str:
return self._root
@root.setter
def root(self, value: str):
self._root = self._extract_root_blob_container_and_sub_path(value)
@property
def tenant(self) -> str:
return self._tenant
@property
def sas_token(self) -> str:
return self._sas_token
@sas_token.setter
def sas_token(self, value: str):
"""
The SAS token. If supplied string begins with '?' symbol, the symbol gets stripped away.
:param value: SAS token
"""
if value:
# Remove the leading question mark, so we can append this token to URLs that already have it
self._sas_token = value[1:] if value.startswith('?') else value
else:
self._sas_token = None
def can_read(self) -> bool:
return True
def can_write(self) -> bool:
return True
def clear_cache(self) -> None:
self._file_modified_time_cache.clear()
async def compute_last_modified_time_async(self, corpus_path: str) -> Optional[datetime]:
cachedValue = None
if self._is_cache_enabled:
cachedValue = self._file_modified_time_cache.get(corpus_path)
if cachedValue is not None:
return cachedValue
else:
adapter_path = self._create_formatted_adapter_path(corpus_path)
request = self._build_request(adapter_path, 'HEAD')
cdm_response = await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
if cdm_response.status_code == HTTPStatus.OK:
lastTime = dateutil.parser.parse(typing.cast(str, cdm_response.response_headers['Last-Modified']))
if lastTime is not None and self._is_cache_enabled:
self._file_modified_time_cache[corpus_path] = lastTime
return lastTime
return None
def create_adapter_path(self, corpus_path: str) -> str:
if corpus_path is None:
return None
if corpus_path.startswith('//'):
corpus_path = corpus_path[1:]
formatted_corpus_path = self._format_corpus_path(corpus_path)
if formatted_corpus_path is None:
return None
if formatted_corpus_path in self._adapter_paths:
return self._adapter_paths[formatted_corpus_path]
else:
return 'https://' + self._remove_protocol_from_hostname(self.hostname) + self._get_escaped_root() + self._escape_path(formatted_corpus_path)
def create_corpus_path(self, adapter_path: str) -> Optional[str]:
if adapter_path:
start_index = len('https://')
end_index = adapter_path.find('/', start_index + 1)
if end_index < start_index:
raise Exception('Unexpected adapter path:', adapter_path)
hostname = self._format_hostname(adapter_path[start_index:end_index])
if hostname == self._formatted_hostname_no_protocol and adapter_path[end_index:].startswith(self._get_escaped_root()):
escaped_corpus_path = adapter_path[end_index + len(self._get_escaped_root()):]
corpus_path = urllib.parse.unquote(escaped_corpus_path)
if corpus_path not in self._adapter_paths:
self._adapter_paths[corpus_path] = adapter_path
return corpus_path
# Signal that we did not recognize path as one for this adapter.
return None
async def fetch_all_files_async(self, folder_corpus_path: str) -> List[str]:
if folder_corpus_path is None:
return None
url = 'https://{}/{}'.format(self._formatted_hostname_no_protocol, self._root_blob_container)
escaped_folder_corpus_path = self._escape_path(folder_corpus_path)
directory = self._escaped_root_sub_path + self._format_corpus_path(escaped_folder_corpus_path)
if directory.startswith('/'):
directory = directory[1:]
continuation_token = None
results = []
while True:
if continuation_token is None:
request = self._build_request(
'{}?directory={}&maxResults={}&recursive=True&resource=filesystem'.format(url, directory,
self.http_max_results),
'GET')
else:
request = self._build_request(
'{}?continuation={}&directory={}&maxResults={}&recursive=True&resource=filesystem'.format(url,
urllib.parse.quote(
continuation_token),
directory,
self.http_max_results),
'GET')
cdm_response = await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
if cdm_response.status_code == HTTPStatus.OK:
continuation_token = cdm_response.response_headers.get(self._http_xms_continuation)
data = json.loads(cdm_response.content)
for path in data['paths']:
if 'isDirectory' not in path or path['isDirectory'] != 'true':
name = path['name'] # type: str
name_without_root_sub_path = name[len(
self._unescaped_root_sub_path) + 1:] if self._unescaped_root_sub_path and name.startswith(
self._unescaped_root_sub_path) else name
filepath = self._format_corpus_path(name_without_root_sub_path)
results.append(filepath)
lastTimeString = path.get('lastModified')
if lastTimeString is not None and self._is_cache_enabled:
self._file_modified_time_cache[filepath] = dateutil.parser.parse(lastTimeString)
if continuation_token is None:
break
return results
def fetch_config(self) -> str:
result_config = {'type': self._type}
config_object = {
'hostname': self.hostname,
'root': self.root
}
# Check for clientId auth, we won't write shared key or secrets to JSON.
if self.client_id and self.tenant:
config_object['tenant'] = self.tenant
config_object['clientId'] = self.client_id
# Try constructing network configs.
config_object.update(self.fetch_network_config())
if self.location_hint:
config_object['locationHint'] = self.location_hint
if self.endpoint:
config_object['endpoint'] = StringUtils.snake_case_to_pascal_case(self.endpoint.name)
result_config['config'] = config_object
return json.dumps(result_config)
async def read_async(self, corpus_path: str) -> str:
url = self._create_formatted_adapter_path(corpus_path)
request = self._build_request(url, 'GET')
return await super()._read(request)
def update_config(self, config: str):
configs_json = json.loads(config)
if configs_json.get('root'):
self.root = configs_json['root']
else:
raise ValueError('Root has to be set for ADLS adapter.')
if configs_json.get('hostname'):
self.hostname = configs_json['hostname']
else:
raise ValueError('Hostname has to be set for ADLS adapter.')
self.update_network_config(config)
if configs_json.get('tenant') and configs_json.get('clientId'):
self._tenant = configs_json['tenant']
self.client_id = configs_json['clientId']
# To keep backwards compatibility with config files that were generated before the introduction of the `endpoint` property.
if not hasattr(self, 'endpoint') or not self.endpoint:
self.endpoint = AzureCloudEndpoint.AZURE_PUBLIC
if configs_json.get('locationHint'):
self.location_hint = configs_json['locationHint']
if configs_json.get('endpoint'):
endpoint_from_config = StringUtils.pascal_case_to_snake_case(configs_json['endpoint'])
if endpoint_from_config in AzureCloudEndpoint.__members__.keys():
self.endpoint = AzureCloudEndpoint[endpoint_from_config]
else:
raise ValueError('Endpoint value should be a string of an enumeration value from the class AzureCloudEndpoint in Pascal case.')
async def write_async(self, corpus_path: str, data: str) -> None:
url = self._create_formatted_adapter_path(corpus_path)
request = self._build_request(url + '?resource=file', 'PUT')
await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
request = self._build_request(url + '?action=append&position=0', 'PATCH', data,
'application/json; charset=utf-8')
await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
request = self._build_request(url + '?action=flush&position=' + str(len(data)), 'PATCH')
await self._http_client._send_async(request, self.wait_time_callback, self.ctx)
def _apply_shared_key(self, shared_key: str, url: str, method: str, content: Optional[str] = None,
content_type: Optional[str] = None):
headers = OrderedDict()
headers[self._http_xms_date] = format_date_time(mktime(datetime.now().timetuple()))
headers[self._http_xms_version] = '2018-06-17'
content_length = 0
if content is not None:
content_length = len(content)
uri = urllib.parse.urlparse(url)
builder = []
builder.append(method) # Verb.
builder.append('\n') # Verb.
builder.append('\n') # Content-Encoding.
builder.append('\n') # Content-Language.
builder.append(str(content_length) + '\n' if content_length else '\n') # Content length.
builder.append('\n') # Content-md5.
builder.append(content_type + '\n' if content_type else '\n') # Content-type.
builder.append('\n') # Date.
builder.append('\n') # If-modified-since.
builder.append('\n') # If-match.
builder.append('\n') # If-none-match.
builder.append('\n') # If-unmodified-since.
builder.append('\n') # Range.
for key, value in headers.items():
builder.append('{0}:{1}\n'.format(key, value))
# append canonicalized resource.
account_name = uri.netloc.split('.')[0]
builder.append('/')
builder.append(account_name)
builder.append(uri.path)
# append canonicalized queries.
if uri.query:
query_parameters = uri.query.split('&') # type: List[str]
for parameter in query_parameters:
key_value_pair = parameter.split('=')
builder.append('\n{}:{}'.format(key_value_pair[0].lower(), urllib.parse.unquote(key_value_pair[1])))
# Hash the payload.
data_to_hash = ''.join(builder).rstrip()
shared_key_bytes = self._try_from_base64_string(shared_key)
if not shared_key_bytes:
raise Exception('Couldn\'t encode the shared key.')
message = base64.b64encode(
hmac.new(shared_key_bytes, msg=data_to_hash.encode('utf-8'), digestmod=hashlib.sha256).digest()).decode(
'utf-8')
signed_string = 'SharedKey {}:{}'.format(account_name, message)
headers[self._http_authorization] = signed_string
return headers
def _apply_sas_token(self, url: str) -> str:
"""
Appends SAS token to the given URL.
:param url: URL to be appended with the SAS token
:return: URL with the SAS token appended
"""
return '{}{}{}'.format(url, '?' if '?' not in url else '&', self.sas_token)
def _build_request(self, url: str, method: str = 'GET', content: Optional[str] = None,
content_type: Optional[str] = None):
if self.shared_key is not None:
request = self._set_up_cdm_request(url, self._apply_shared_key(self.shared_key, url, method, content,
content_type), method)
elif self.sas_token is not None:
request = self._set_up_cdm_request(self._apply_sas_token(url), None, method)
elif self.tenant is not None and self.client_id is not None and self.secret is not None:
token = self._generate_bearer_token()
headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}
request = self._set_up_cdm_request(url, headers, method)
elif self.token_provider is not None:
headers = {'Authorization': self.token_provider.get_token()}
request = self._set_up_cdm_request(url, headers, method)
else:
raise Exception('ADLS adapter is not configured with any auth method')
if content is not None:
request.content = content
request.content_type = content_type
return request
def _create_formatted_adapter_path(self, corpus_path: str) -> str:
adapter_path = self.create_adapter_path(corpus_path)
if adapter_path is None:
return None
return adapter_path.replace(self.hostname, self._formatted_hostname)
def _escape_path(self, unescaped_path: str):
return urllib.parse.quote(unescaped_path).replace('%2F', '/')
def _extract_root_blob_container_and_sub_path(self, root: str) -> str:
# No root value was set
if not root:
self._root_blob_container = ''
self._update_root_sub_path('')
return ''
# Remove leading and trailing /
prep_root = root[1:] if root[0] == '/' else root
prep_root = prep_root[0: len(prep_root) - 1] if prep_root[len(prep_root) - 1] == '/' else prep_root
# Root contains only the file-system name, e.g. "fs-name"
if prep_root.find('/') == -1:
self._root_blob_container = prep_root
self._update_root_sub_path('')
return '/{}'.format(self._root_blob_container)
# Root contains file-system name and folder, e.g. "fs-name/folder/folder..."
prep_root_array = prep_root.split('/')
self._root_blob_container = prep_root_array[0]
self._update_root_sub_path('/'.join(prep_root_array[1:]))
return '/{}/{}'.format(self._root_blob_container, self._unescaped_root_sub_path)
def _format_corpus_path(self, corpus_path: str) -> Optional[str]:
path_tuple = StorageUtils.split_namespace_path(corpus_path)
if not path_tuple:
return None
corpus_path = path_tuple[1]
if corpus_path and corpus_path[0] != '/':
corpus_path = '/' + corpus_path
return corpus_path
def _format_hostname(self, hostname: str) -> str:
hostname = hostname.replace('.blob.', '.dfs.')
port = ':443'
if port in hostname:
hostname = hostname[0:-len(port)]
return hostname
def _generate_bearer_token(self) -> Optional[dict]:
self._build_context()
result = self._auth_context.acquire_token_for_client(scopes=self._scope)
if result and 'error' in result:
error_description = result['error'] + ' error_description: ' + result['error_description'] \
if 'error_description' in result else result['error']
raise Exception('There was an error while acquiring ADLS Adapter\'s Token with '
'client ID/secret authentication. Exception: ' + error_description)
if result is None or 'access_token' not in result or 'token_type' not in result:
raise Exception('Received invalid ADLS Adapter\'s authentication result. The result may be None, or missing'
' access_toke and/or token_type authorization header from the authentication result.')
return result
def _get_escaped_root(self):
return '/' + self._root_blob_container + '/' + self._escaped_root_sub_path if self._escaped_root_sub_path else '/' + self._root_blob_container
def _try_from_base64_string(self, content: str) -> Optional[bytes]:
try:
return base64.b64decode(content)
except Exception:
return None
def _update_root_sub_path(self, value: str):
self._unescaped_root_sub_path = value
self._escaped_root_sub_path = self._escape_path(value)
def _build_context(self):
"""Build context when users make the first call. Also need to ensure client Id, tenant and secret are not null."""
if self._auth_context is None:
self._auth_context = msal.ConfidentialClientApplication(
self.client_id, authority=self.endpoint.value + self.tenant, client_credential=self.secret)
def _remove_protocol_from_hostname(self, hostname: str) -> str:
"""
Check if the hostname has a leading protocol.
if it doesn't have, return the hostname
if the leading protocol is not "https://", throw an error
otherwise, return the hostname with no leading protocol.
"""
if hostname.find('://') == -1:
return hostname
try:
url = urllib.parse.urlsplit(hostname)
if url.scheme == 'https':
return hostname[len('https://'):]
except Exception:
raise ValueError('Please provide a valid hostname.')
raise ValueError('ADLS Adapter only supports HTTPS, please provide a leading \"https://\" hostname or a non-protocol-relative hostname.')
|
blogs/parser.py | ewjoachim/pythondotorg | 911 | 11065491 | import datetime
import feedparser
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.timezone import make_aware, utc
from boxes.models import Box
from .models import BlogEntry, Feed
def get_all_entries(feed_url):
""" Retrieve all entries from a feed URL """
d = feedparser.parse(feed_url)
entries = []
for e in d['entries']:
published = make_aware(
datetime.datetime(*e['published_parsed'][:7]), timezone=utc
)
entry = {
'title': e['title'],
'summary': e.get('summary', ''),
'pub_date': published,
'url': e['link'],
}
entries.append(entry)
return entries
def _render_blog_supernav(entry):
""" Utility to make testing update_blogs management command easier """
return render_to_string('blogs/supernav.html', {'entry': entry})
def update_blog_supernav():
"""Retrieve latest entry and update blog supernav item """
try:
latest_entry = BlogEntry.objects.filter(
feed=Feed.objects.get(
feed_url=settings.PYTHON_BLOG_FEED_URL,
)
).latest()
except (BlogEntry.DoesNotExist, Feed.DoesNotExist):
pass
else:
rendered_box = _render_blog_supernav(latest_entry)
box, _ = Box.objects.update_or_create(
label='supernav-python-blog',
defaults={
'content': rendered_box,
'content_markup_type': 'html',
}
)
|
autokeras/adapters/input_adapters.py | lc0/autokeras | 4,704 | 11065501 | <reponame>lc0/autokeras
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import tensorflow as tf
from autokeras.engine import adapter as adapter_module
class InputAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to Input to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError(
"Expect the data to Input to be numerical, but got "
"{type}.".format(type=x.dtype)
)
class ImageAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to ImageInput to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError(
"Expect the data to ImageInput to be numerical, but got "
"{type}.".format(type=x.dtype)
)
class TextAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to TextInput to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
class StructuredDataAdapter(adapter_module.Adapter):
def check(self, x):
if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):
raise TypeError(
"Unsupported type {type} for "
"{name}.".format(type=type(x), name=self.__class__.__name__)
)
def convert_to_dataset(self, dataset, batch_size):
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
if isinstance(dataset, np.ndarray) and dataset.dtype == np.object:
dataset = dataset.astype(np.unicode)
return super().convert_to_dataset(dataset, batch_size)
class TimeseriesAdapter(adapter_module.Adapter):
def __init__(self, lookback=None, **kwargs):
super().__init__(**kwargs)
self.lookback = lookback
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data in TimeseriesInput to be numpy.ndarray"
" or tf.data.Dataset or pd.DataFrame, but got {type}.".format(
type=type(x)
)
)
def convert_to_dataset(self, dataset, batch_size):
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
return super().convert_to_dataset(dataset, batch_size)
|
5_pytorch_retinanet/lib/train_detector.py | deepchatterjeevns/Monk_Object_Detection | 549 | 11065503 | <gh_stars>100-1000
import collections
import os
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
from retinanet import model
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
Normalizer
from torch.utils.data import DataLoader
from retinanet import coco_eval
from retinanet import csv_eval
assert torch.__version__.split('.')[0] == '1'
class Detector():
'''
Class to train a detector
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
def __init__(self, verbose=1):
self.system_dict = {};
self.system_dict["verbose"] = verbose;
self.system_dict["local"] = {};
self.system_dict["dataset"] = {};
self.system_dict["dataset"]["train"] = {};
self.system_dict["dataset"]["val"] = {};
self.system_dict["dataset"]["val"]["status"] = False;
self.system_dict["params"] = {};
self.system_dict["params"]["batch_size"] = 8;
self.system_dict["params"]["num_workers"] = 3;
self.system_dict["params"]["use_gpu"] = True;
self.system_dict["params"]["lr"] = 0.0001;
self.system_dict["params"]["gpu_devices"] = [0];
self.system_dict["params"]["num_epochs"] = 10;
self.system_dict["params"]["val_interval"] = 1;
self.system_dict["params"]["print_interval"] = 20;
self.system_dict["output"] = {};
self.system_dict["output"]["saved_model"] = "final_model.pt";
def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True, num_workers=3):
'''
User function: Set training dataset parameters
Dataset Directory Structure
root_dir
|
|------coco_dir
| |
| |----img_dir
| |
| |------<set_dir_train> (set_dir) (Train)
| |
| |---------img1.jpg
| |---------img2.jpg
| |---------..........(and so on)
|
|
| |---annotations
| |----|
| |--------------------instances_Train.json (instances_<set_dir_train>.json)
| |--------------------classes.txt
- instances_Train.json -> In proper COCO format
- classes.txt -> A list of classes in alphabetical order
For TrainSet
- root_dir = "../sample_dataset";
- coco_dir = "kangaroo";
- img_dir = "images";
- set_dir = "Train";
Note: Annotation file name too coincides against the set_dir
Args:
root_dir (str): Path to root directory containing coco_dir
coco_dir (str): Name of coco_dir containing image folder and annotation folder
img_dir (str): Name of folder containing all training and validation folders
set_dir (str): Name of folder containing all training images
batch_size (int): Mini batch sampling size for training epochs
image_size (int): Either of [512, 300]
use_gpu (bool): If True use GPU else run on CPU
num_workers (int): Number of parallel processors for data loader
Returns:
None
'''
self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
self.system_dict["dataset"]["train"]["set_dir"] = set_dir;
self.system_dict["params"]["batch_size"] = batch_size;
self.system_dict["params"]["image_size"] = image_size;
self.system_dict["params"]["use_gpu"] = use_gpu;
self.system_dict["params"]["num_workers"] = num_workers;
self.system_dict["local"]["dataset_train"] = CocoDataset(self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"],
img_dir=self.system_dict["dataset"]["train"]["img_dir"],
set_dir=self.system_dict["dataset"]["train"]["set_dir"],
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
self.system_dict["local"]["sampler"] = AspectRatioBasedSampler(self.system_dict["local"]["dataset_train"],
batch_size=self.system_dict["params"]["batch_size"], drop_last=False)
self.system_dict["local"]["dataloader_train"] = DataLoader(self.system_dict["local"]["dataset_train"],
num_workers=self.system_dict["params"]["num_workers"],
collate_fn=collater,
batch_sampler=self.system_dict["local"]["sampler"])
print('Num training images: {}'.format(len(self.system_dict["local"]["dataset_train"])))
def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
'''
User function: Set training dataset parameters
Dataset Directory Structure
root_dir
|
|------coco_dir
| |
| |----img_dir
| |
| |------<set_dir_val> (set_dir) (Validation)
| |
| |---------img1.jpg
| |---------img2.jpg
| |---------..........(and so on)
|
|
| |---annotations
| |----|
| |--------------------instances_Val.json (instances_<set_dir_val>.json)
| |--------------------classes.txt
- instances_Train.json -> In proper COCO format
- classes.txt -> A list of classes in alphabetical order
For ValSet
- root_dir = "..sample_dataset";
- coco_dir = "kangaroo";
- img_dir = "images";
- set_dir = "Val";
Note: Annotation file name too coincides against the set_dir
Args:
root_dir (str): Path to root directory containing coco_dir
coco_dir (str): Name of coco_dir containing image folder and annotation folder
img_dir (str): Name of folder containing all training and validation folders
set_dir (str): Name of folder containing all validation images
Returns:
None
'''
self.system_dict["dataset"]["val"]["status"] = True;
self.system_dict["dataset"]["val"]["root_dir"] = root_dir;
self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
self.system_dict["dataset"]["val"]["set_dir"] = set_dir;
self.system_dict["local"]["dataset_val"] = CocoDataset(self.system_dict["dataset"]["val"]["root_dir"] + "/" + self.system_dict["dataset"]["val"]["coco_dir"],
img_dir=self.system_dict["dataset"]["val"]["img_dir"],
set_dir=self.system_dict["dataset"]["val"]["set_dir"],
transform=transforms.Compose([Normalizer(), Resizer()]))
self.system_dict["local"]["sampler_val"] = AspectRatioBasedSampler(self.system_dict["local"]["dataset_val"],
batch_size=self.system_dict["params"]["batch_size"], drop_last=False)
self.system_dict["local"]["dataloader_val"] = DataLoader(self.system_dict["local"]["dataset_val"],
num_workers=self.system_dict["params"]["num_workers"],
collate_fn=collater,
batch_sampler=self.system_dict["local"]["sampler_val"])
print('Num validation images: {}'.format(len(self.system_dict["local"]["dataset_val"])))
def Model(self, model_name="resnet18",gpu_devices=[0]):
'''
User function: Set Model parameters
Available Models
resnet18
resnet34
resnet50
resnet101
resnet152
Args:
model_name (str): Select model from available models
gpu_devices (list): List of GPU Device IDs to be used in training
Returns:
None
'''
num_classes = self.system_dict["local"]["dataset_train"].num_classes();
if model_name == "resnet18":
retinanet = model.resnet18(num_classes=num_classes, pretrained=True)
elif model_name == "resnet34":
retinanet = model.resnet34(num_classes=num_classes, pretrained=True)
elif model_name == "resnet50":
retinanet = model.resnet50(num_classes=num_classes, pretrained=True)
elif model_name == "resnet101":
retinanet = model.resnet101(num_classes=num_classes, pretrained=True)
elif model_name == "resnet152":
retinanet = model.resnet152(num_classes=num_classes, pretrained=True)
if self.system_dict["params"]["use_gpu"]:
self.system_dict["params"]["gpu_devices"] = gpu_devices
if len(self.system_dict["params"]["gpu_devices"])==1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.system_dict["params"]["gpu_devices"][0])
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(id) for id in self.system_dict["params"]["gpu_devices"]])
self.system_dict["local"]["device"] = 'cuda' if torch.cuda.is_available() else 'cpu'
retinanet = retinanet.to(self.system_dict["local"]["device"])
retinanet = torch.nn.DataParallel(retinanet).to(self.system_dict["local"]["device"])
retinanet.training = True
retinanet.train()
retinanet.module.freeze_bn()
self.system_dict["local"]["model"] = retinanet;
def Set_Hyperparams(self, lr=0.0001, val_interval=1, print_interval=20):
'''
User function: Set hyper parameters
Args:
lr (float): Initial learning rate for training
val_interval (int): Post specified number of training epochs, a validation epoch will be carried out
print_interval (int): Post every specified iteration the training losses and accuracies will be printed
Returns:
None
'''
self.system_dict["params"]["lr"] = lr;
self.system_dict["params"]["val_interval"] = val_interval;
self.system_dict["params"]["print_interval"] = print_interval;
self.system_dict["local"]["optimizer"] = torch.optim.Adam(self.system_dict["local"]["model"].parameters(),
self.system_dict["params"]["lr"]);
self.system_dict["local"]["scheduler"] = torch.optim.lr_scheduler.ReduceLROnPlateau(self.system_dict["local"]["optimizer"],
patience=3, verbose=True)
self.system_dict["local"]["loss_hist"] = collections.deque(maxlen=500)
def Train(self, num_epochs=2, output_model_name="final_model.pt"):
'''
User function: Start training
Args:
num_epochs (int): Number of epochs to train for
output_model_name (str): Final model name for saving purposes, with extension ".pt"
Returns:
None
'''
self.system_dict["output"]["saved_model"] = output_model_name;
self.system_dict["params"]["num_epochs"] = num_epochs;
for epoch_num in range(num_epochs):
self.system_dict["local"]["model"].train()
self.system_dict["local"]["model"].module.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(self.system_dict["local"]["dataloader_train"]):
try:
self.system_dict["local"]["optimizer"].zero_grad()
classification_loss, regression_loss = self.system_dict["local"]["model"]([data['img'].to(self.system_dict["local"]["device"]).float(), data['annot'].to(self.system_dict["local"]["device"])])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(self.system_dict["local"]["model"].parameters(), 0.1)
self.system_dict["local"]["optimizer"].step()
self.system_dict["local"]["loss_hist"].append(float(loss))
epoch_loss.append(float(loss))
if(iter_num % self.system_dict["params"]["print_interval"] == 0):
print(
'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(self.system_dict["local"]["loss_hist"])))
del classification_loss
del regression_loss
except Exception as e:
print(e)
continue
if(self.system_dict["dataset"]["val"]["status"]):
print('Evaluating dataset')
coco_eval.evaluate_coco(self.system_dict["local"]["dataset_val"], self.system_dict["local"]["model"])
self.system_dict["local"]["scheduler"].step(np.mean(epoch_loss))
torch.save(self.system_dict["local"]["model"], 'resume.pt')
self.system_dict["local"]["model"].eval()
torch.save(self.system_dict["local"]["model"], output_model_name)
|
examples/rl/scripts/design-4/train.py | ONLYA/RoboGrammar | 156 | 11065504 | <reponame>ONLYA/RoboGrammar<gh_stars>100-1000
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--save-dir', type = str, default = './trained_models/RobotLocomotion-v0/design-4/')
args = parser.parse_args()
cmd = 'python train/train.py --rule-sequence 0, 14, 2, 11, 4, 16, 6, 4, 19, 10, 20, 18, 11, 9, 10, 4, 5, 11, 19, 5 --save-dir {}'.format(args.save_dir)
os.system(cmd)
|
eeauditor/auditors/aws/AWS_IAM_Auditor.py | kbhagi/ElectricEye | 442 | 11065517 | <reponame>kbhagi/ElectricEye
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
import json
registry = CheckRegister()
# import boto3 clients
iam = boto3.client("iam")
# loop through IAM users
def list_users(cache):
response = cache.get("list_users")
if response:
return response
cache["list_users"] = iam.list_users(MaxItems=1000)
return cache["list_users"]
@registry.register_check("iam")
def iam_access_key_age_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.1] IAM Access Keys should be rotated every 90 days"""
user = list_users(cache=cache)
for users in user["Users"]:
userName = str(users["UserName"])
userArn = str(users["Arn"])
try:
response = iam.list_access_keys(UserName=userName)
for keys in response["AccessKeyMetadata"]:
keyUserName = str(keys["UserName"])
keyId = str(keys["AccessKeyId"])
keyStatus = str(keys["Status"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if keyStatus == "Active":
keyCreateDate = keys["CreateDate"]
todaysDatetime = datetime.datetime.now(datetime.timezone.utc)
keyAgeFinder = todaysDatetime - keyCreateDate
if keyAgeFinder <= datetime.timedelta(days=90):
# this is a passing check
finding = {
"SchemaVersion": "2018-10-08",
"Id": keyUserName + keyId + "/iam-access-key-age-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn + keyId,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.1] IAM Access Keys should be rotated every 90 days",
"Description": "IAM access key "
+ keyId
+ " for user "
+ keyUserName
+ " is not over 90 days old.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM access key rotation refer to the Rotating Access Keys section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_RotateAccessKey",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamAccessKey",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"AwsIamAccessKey": {
"PrincipalId": keyId,
"PrincipalName": keyUserName,
"Status": keyStatus,
}
},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": keyUserName + keyId + "/iam-access-key-age-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn + keyId,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[IAM.1] IAM Access Keys should be rotated every 90 days",
"Description": "IAM access key "
+ keyId
+ " for user "
+ keyUserName
+ " is over 90 days old. As a security best practice, AWS recommends that you regularly rotate (change) IAM user access keys. If your administrator granted you the necessary permissions, you can rotate your own access keys. Refer to the remediation section to remediate this behavior.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM access key rotation refer to the Rotating Access Keys section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_RotateAccessKey",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamAccessKey",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"AwsIamAccessKey": {
"PrincipalId": keyId,
"PrincipalName": keyUserName,
"Status": keyStatus,
}
},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
pass
except Exception as e:
print(e)
@registry.register_check("iam")
def user_permission_boundary_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""aaa"""
user = list_users(cache=cache)
for users in user["Users"]:
userName = str(users["UserName"])
userArn = str(users["Arn"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
permBoundaryArn = str(users["PermissionsBoundary"]["PermissionsBoundaryArn"])
# this is a passing check
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-permissions-boundary-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.2] IAM users should have permissions boundaries attached",
"Description": "IAM user " + userName + " has a permissions boundary attached.",
"Remediation": {
"Recommendation": {
"Text": "For information on permissions boundaries refer to the Permissions Boundaries for IAM Entities section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"PrincipalName": userName,
"permissionsBoundaryArn": permBoundaryArn,
}
},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-4",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-5",
"NIST SP 800-53 AC-6",
"NIST SP 800-53 AC-14",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-24",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.6.1.2",
"ISO 27001:2013 A.9.1.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.4.1",
"ISO 27001:2013 A.9.4.4",
"ISO 27001:2013 A.9.4.5",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
if str(e) == "'PermissionsBoundary'":
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-permissions-boundary-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[IAM.2] IAM users should have permissions boundaries attached",
"Description": "IAM user "
+ userName
+ " does not have a permissions boundary attached. A permissions boundary is an advanced feature for using a managed policy to set the maximum permissions that an identity-based policy can grant to an IAM entity. A permissions boundary allows it to perform only the actions that are allowed by both its identity-based policies and its permissions boundaries. Refer to the remediation section to remediate this behavior.",
"Remediation": {
"Recommendation": {
"Text": "For information on permissions boundaries refer to the Permissions Boundaries for IAM Entities section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-4",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 AC-3",
"NIST SP 800-53 AC-5",
"NIST SP 800-53 AC-6",
"NIST SP 800-53 AC-14",
"NIST SP 800-53 AC-16",
"NIST SP 800-53 AC-24",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.6.1.2",
"ISO 27001:2013 A.9.1.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.4.1",
"ISO 27001:2013 A.9.4.4",
"ISO 27001:2013 A.9.4.5",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
print(e)
@registry.register_check("iam")
def user_mfa_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.3] IAM users should have Multi-Factor Authentication (MFA) enabled"""
user = list_users(cache=cache)
for users in user["Users"]:
userName = str(users["UserName"])
userArn = str(users["Arn"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = iam.list_mfa_devices(UserName=userName)
if str(response["MFADevices"]) == "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-mfa-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[IAM.3] IAM users should have Multi-Factor Authentication (MFA) enabled",
"Description": "IAM user "
+ userName
+ " does not have MFA enabled. For increased security, AWS recommends that you configure multi-factor authentication (MFA) to help protect your AWS resources. Refer to the remediation section to remediate this behavior.",
"Remediation": {
"Recommendation": {
"Text": "For information on MFA refer to the Using Multi-Factor Authentication (MFA) in AWS section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-mfa-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.3] IAM users should have Multi-Factor Authentication (MFA) enabled",
"Description": "IAM user " + userName + " has MFA enabled.",
"Remediation": {
"Recommendation": {
"Text": "For information on MFA refer to the Using Multi-Factor Authentication (MFA) in AWS section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("iam")
def user_inline_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.4] IAM users should not have attached in-line policies"""
user = list_users(cache=cache)
allUsers = user["Users"]
for users in allUsers:
userName = str(users["UserName"])
userArn = str(users["Arn"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = iam.list_user_policies(UserName=userName)
if str(response["PolicyNames"]) != "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-attach-inline-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.4] IAM users should not have attached in-line policies",
"Description": "IAM user "
+ userName
+ " has an in-line policy attached. It is recommended that IAM policies be applied directly to groups and roles but not users. Refer to the remediation section to remediate this behavior.",
"Remediation": {
"Recommendation": {
"Text": "For information on user attached policies refer to the Managed Policies and Inline Policies section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-attach-inline-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.4] IAM users should not have attached in-line policies",
"Description": "IAM user "
+ userName
+ " does not have an in-line policy attached.",
"Remediation": {
"Recommendation": {
"Text": "For information on user attached policies refer to the Managed Policies and Inline Policies section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("iam")
def user_direct_attached_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.5] IAM users should not have attached managed policies"""
user = list_users(cache=cache)
allUsers = user["Users"]
for users in allUsers:
userName = str(users["UserName"])
userArn = str(users["Arn"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = iam.list_attached_user_policies(UserName=userName)
if str(response["AttachedPolicies"]) != "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-attach-managed-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.5] IAM users should not have attached managed policies",
"Description": "IAM user "
+ userName
+ " has a managed policy attached. It is recommended that IAM policies be applied directly to groups and roles but not users. Refer to the remediation section to remediate this behavior.",
"Remediation": {
"Recommendation": {
"Text": "For information on user attached policies refer to the Managed Policies and Inline Policies section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": userArn + "/iam-user-attach-managed-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": userArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.5] IAM users should not have attached managed policies",
"Description": "IAM user "
+ userName
+ " does not have a managed policy attached.",
"Remediation": {
"Recommendation": {
"Text": "For information on user attached policies refer to the Managed Policies and Inline Policies section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": userArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PrincipalName": userName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("iam")
def cis_aws_foundation_benchmark_pw_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.6] The IAM password policy should meet or exceed the AWS CIS Foundations Benchmark standard"""
try:
# TODO: if no policy is found, this will throw an exception in
# which case we need to create an ACTIVE finding
response = iam.get_account_password_policy()
pwPolicy = response["PasswordPolicy"]
minPwLength = int(pwPolicy["MinimumPasswordLength"])
symbolReq = str(pwPolicy["RequireSymbols"])
numberReq = str(pwPolicy["RequireNumbers"])
uppercaseReq = str(pwPolicy["RequireUppercaseCharacters"])
lowercaseReq = str(pwPolicy["RequireLowercaseCharacters"])
maxPwAge = int(pwPolicy["MaxPasswordAge"])
pwReuse = int(pwPolicy["PasswordReusePrevention"])
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if (
minPwLength >= 14
and maxPwAge <= 90
and pwReuse >= 24
and symbolReq == "True"
and numberReq == "True"
and uppercaseReq == "True"
and lowercaseReq == "True"
):
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cis-aws-foundations-benchmark-pw-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": awsAccountId + "iam-password-policy",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.6] The IAM password policy should meet or exceed the AWS CIS Foundations Benchmark standard",
"Description": "The IAM password policy for account "
+ awsAccountId
+ " meets or exceeds the AWS CIS Foundations Benchmark standard.",
"Remediation": {
"Recommendation": {
"Text": "For information on the CIS AWS Foundations Benchmark standard for the password policy refer to the linked Standard",
"Url": "https://d1.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAccount",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/cis-aws-foundations-benchmark-pw-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": awsAccountId + "iam-password-policy",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[IAM.6] The IAM password policy should meet or exceed the AWS CIS Foundations Benchmark standard",
"Description": "The IAM password policy for account "
+ awsAccountId
+ " does not meet the AWS CIS Foundations Benchmark standard. Refer to the remediation instructions if this configuration is not intended.",
"Remediation": {
"Recommendation": {
"Text": "For information on the CIS AWS Foundations Benchmark standard for the password policy refer to the linked Standard",
"Url": "https://d1.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAccount",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("iam")
def server_certs_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.7] There should not be any server certificates stored in AWS IAM"""
try:
response = iam.list_server_certificates()
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if str(response["ServerCertificateMetadataList"]) != "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/server-x509-certs-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": awsAccountId + "server-cert",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[IAM.7] There should not be any server certificates stored in AWS IAM",
"Description": "There are server certificates stored in AWS IAM for the account "
+ awsAccountId
+ ". ACM is the preferred tool to provision, manage, and deploy your server certificates. With ACM you can request a certificate or deploy an existing ACM or external certificate to AWS resources. Certificates provided by ACM are free and automatically renew. Refer to the remediation instructions if this configuration is not intended.",
"Remediation": {
"Recommendation": {
"Text": "For information on server certificates refer to the Working with Server Certificates section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAccount",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": awsAccountId + "/server-x509-certs-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": awsAccountId + "server-cert",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.7] There should not be any server certificates stored in AWS IAM",
"Description": "There are not server certificates stored in AWS IAM for the account "
+ awsAccountId
+ ".",
"Remediation": {
"Recommendation": {
"Text": "For information on server certificates refer to the Working with Server Certificates section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsAccount",
"Id": f"{awsPartition.upper()}::::Account:{awsAccountId}",
"Partition": awsPartition,
"Region": awsRegion,
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("iam")
def iam_mngd_policy_least_priv_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.8] Managed policies should follow least privilege principles"""
try:
policies = iam.list_policies(Scope='Local')
for mngd_policy in policies['Policies']:
policy_arn = mngd_policy['Arn']
version_id = mngd_policy['DefaultVersionId']
policy_doc = iam.get_policy_version(
PolicyArn=policy_arn,
VersionId=version_id
)['PolicyVersion']['Document']
#handle policies docs returned as strings
if type(policy_doc) == str:
policy_doc = json.loads(policy_doc)
least_priv_rating = 'passing'
for statement in policy_doc['Statement']:
if statement["Effect"] == 'Allow':
if statement.get('Condition') == None:
# action structure could be a string or a list
if type(statement['Action']) == list:
if len(['True' for x in statement['Action'] if ":*" in x or '*' == x]) > 0:
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
# Single action in a statement
elif type(statement['Action']) == str:
if ":*" in statement['Action'] or statement['Action'] == '*':
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if least_priv_rating == 'passing':
finding = {
"SchemaVersion": "2018-10-08",
"Id": policy_arn + "/mngd_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": policy_arn + "mngd_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.8] Managed policies should follow least privilege principles",
"Description": f"The customer managed policy {policy_arn} is following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the Controlling access section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_controlling.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamPolicy",
"Id": policy_arn,
"Partition": awsPartition,
"Region": awsRegion
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
elif least_priv_rating == 'failed_low':
finding = {
"SchemaVersion": "2018-10-08",
"Id": policy_arn + "/mngd_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": policy_arn + "mngd_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.8] Managed policies should follow least privilege principles",
"Description": f"The customer managed policy {policy_arn} is not following least privilege principles and has been rated: {least_priv_rating}.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the Controlling access section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_controlling.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamPolicy",
"Id": policy_arn,
"Partition": awsPartition,
"Region": awsRegion
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
elif least_priv_rating == 'failed_high':
finding = {
"SchemaVersion": "2018-10-08",
"Id": policy_arn + "/mngd_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": policy_arn + "mngd_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[IAM.8] Managed policies should follow least privilege principles",
"Description": f"The customer managed policy {policy_arn} is not following least privilege principles and has been rated: {least_priv_rating}.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the Controlling access section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_controlling.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamPolicy",
"Id": policy_arn,
"Partition": awsPartition,
"Region": awsRegion
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass
@registry.register_check("iam")
def iam_user_policy_least_priv_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.9] User inline policies should follow least privilege principles"""
try:
Users = iam.list_users()
for user in Users['Users']:
user_arn = user['Arn']
UserName = user['UserName']
policy_names = iam.list_user_policies(
UserName=UserName
)['PolicyNames']
for policy_name in policy_names:
policy_doc = iam.get_user_policy(
UserName=UserName,
PolicyName=policy_name
)['PolicyDocument']
#handle policies docs returned as strings
if type(policy_doc) == str:
policy_doc = json.loads(policy_doc)
least_priv_rating = 'passing'
for statement in policy_doc['Statement']:
if statement["Effect"] == 'Allow':
if statement.get('Condition') == None:
# action structure could be a string or a list
if type(statement['Action']) == list:
if len(['True' for x in statement['Action'] if ":*" in x or '*' == x]) > 0:
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
# Single action in a statement
elif type(statement['Action']) == str:
if ":*" in statement['Action'] or statement['Action'] == '*':
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if least_priv_rating == 'passing':
finding = {
"SchemaVersion": "2018-10-08",
"Id": user_arn + "/user_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": user_arn + "user_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.9] User inline policies should follow least privilege principles",
"Description": f"The user {user_arn} inline policy {policy_name} is following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": user_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"PrincipalName": UserName
}
},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
elif least_priv_rating == 'failed_low':
finding = {
"SchemaVersion": "2018-10-08",
"Id": user_arn + "/user_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": user_arn + "user_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.9] User inline policies should follow least privilege principles",
"Description": f"The user {user_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": user_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"PrincipalName": UserName
}
},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
elif least_priv_rating == 'failed_high':
finding = {
"SchemaVersion": "2018-10-08",
"Id": user_arn + "/user_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": user_arn + "user_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[IAM.9] User inline policies should follow least privilege principles",
"Description": f"The user {user_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamUser",
"Id": user_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"PrincipalName": UserName
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass
@registry.register_check("iam")
def iam_group_policy_least_priv_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.10] Group inline policies should follow least privilege principles"""
try:
Groups = iam.list_groups()
for group in Groups['Groups']:
group_arn = group['Arn']
GroupName = group['GroupName']
policy_names = iam.list_group_policies(
GroupName=GroupName
)['PolicyNames']
for policy_name in policy_names:
policy_doc = iam.get_group_policy(
GroupName=GroupName,
PolicyName=policy_name
)['PolicyDocument']
#handle policies docs returned as strings
if type(policy_doc) == str:
policy_doc = json.loads(policy_doc)
least_priv_rating = 'passing'
for statement in policy_doc['Statement']:
if statement["Effect"] == 'Allow':
if statement.get('Condition') == None:
# action structure could be a string or a list
if type(statement['Action']) == list:
if len(['True' for x in statement['Action'] if ":*" in x or '*' == x]) > 0:
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
# Single action in a statement
elif type(statement['Action']) == str:
if ":*" in statement['Action'] or statement['Action'] == '*':
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if least_priv_rating == 'passing':
finding = {
"SchemaVersion": "2018-10-08",
"Id": group_arn + "/group_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": group_arn + "group_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.10] Group inline policies should follow least privilege principles",
"Description": f"The group {group_arn} inline policy {policy_name} is following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamGroup",
"Id": group_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
elif least_priv_rating == 'failed_low':
finding = {
"SchemaVersion": "2018-10-08",
"Id": group_arn + "/group_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": group_arn + "group_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.10] Group inline policies should follow least privilege principles",
"Description": f"The group {group_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamGroup",
"Id": group_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
elif least_priv_rating == 'failed_high':
finding = {
"SchemaVersion": "2018-10-08",
"Id": group_arn + "/group_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": group_arn + "group_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[IAM.10] Group inline policies should follow least privilege principles",
"Description": f"The group {group_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamGroup",
"Id": group_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass
@registry.register_check("iam")
def iam_role_policy_least_priv_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[IAM.11] Role inline policies should follow least privilege principles"""
try:
Roles = iam.list_roles()
for role in Roles['Roles']:
role_arn = role['Arn']
RoleName = role['RoleName']
policy_names = iam.list_role_policies(
RoleName=RoleName
)['PolicyNames']
for policy_name in policy_names:
policy_doc = iam.get_role_policy(
RoleName=RoleName,
PolicyName=policy_name
)['PolicyDocument']
#handle policies docs returned as strings
if type(policy_doc) == str:
policy_doc = json.loads(policy_doc)
least_priv_rating = 'passing'
for statement in policy_doc['Statement']:
if statement["Effect"] == 'Allow':
if statement.get('Condition') == None:
# action structure could be a string or a list
if type(statement['Action']) == list:
if len(['True' for x in statement['Action'] if ":*" in x or '*' == x]) > 0:
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
# Single action in a statement
elif type(statement['Action']) == str:
if ":*" in statement['Action'] or statement['Action'] == '*':
if type(statement['Resource']) == str and statement['Resource'] == '*':
least_priv_rating = 'failed_high'
# Means that an initial failure will not be overwritten by a lower finding later
next
elif type(statement['Resource']) == list:
least_priv_rating = 'failed_low'
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if least_priv_rating == 'passing':
finding = {
"SchemaVersion": "2018-10-08",
"Id": role_arn + "/role_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": role_arn + "role_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[IAM.11] Role inline policies should follow least privilege principles",
"Description": f"The role {role_arn} inline policy {policy_name} is following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamRole",
"Id": role_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {
"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
elif least_priv_rating == 'failed_low':
finding = {
"SchemaVersion": "2018-10-08",
"Id": role_arn + "/role_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": role_arn + "role_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[IAM.11] Role inline policies should follow least privilege principles",
"Description": f"The role {role_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamRole",
"Id": role_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {
"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
elif least_priv_rating == 'failed_high':
finding = {
"SchemaVersion": "2018-10-08",
"Id": role_arn + "/role_policy_least_priv",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": role_arn + "role_policy_least_priv",
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[IAM.11] Role inline policies should follow least privilege principles",
"Description": f"The role {role_arn} inline policy {policy_name} is not following least privilege principles.",
"Remediation": {
"Recommendation": {
"Text": "For information on IAM least privilege refer to the inline policy section of the AWS IAM User Guide",
"Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsIamRole",
"Id": role_arn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {
"PolicyName": policy_name}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-3",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-17",
"NIST SP 800-53 AC-19",
"NIST SP 800-53 AC-20",
"NIST SP 800-53 SC-15",
"AICPA TSC CC6.6",
"ISO 27001:2013 A.6.2.1",
"ISO 27001:2013 A.6.2.2",
"ISO 27001:2013 A.11.2.6",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1"
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass |
corehq/apps/userreports/tests/test_columns.py | akashkj/commcare-hq | 471 | 11065518 | import uuid
from django.test import SimpleTestCase, TestCase
from sqlagg import SumWhen
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.util import post_case_blocks
from corehq.apps.userreports import tasks
from corehq.apps.userreports.app_manager.helpers import clean_table_name
from corehq.apps.userreports.columns import get_distinct_values
from corehq.apps.userreports.const import DEFAULT_MAXIMUM_EXPANSION
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.models import (
DataSourceConfiguration,
ReportConfiguration,
)
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.reports.factory import ReportColumnFactory
from corehq.apps.userreports.reports.specs import (
AggregateDateColumn,
FieldColumn,
PercentageColumn,
)
from corehq.apps.userreports.sql.columns import expand_column
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.sql_db.connections import UCR_ENGINE_ID, connection_manager
class TestFieldColumn(SimpleTestCase):
def testColumnSetFromAlias(self):
field = ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"alias": "the_right_answer",
"type": "field",
}, is_static=False)
self.assertTrue(isinstance(field, FieldColumn))
self.assertEqual('the_right_answer', field.column_id)
def testColumnDefaultsToField(self):
field = ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"type": "field",
}, is_static=False)
self.assertEqual('doc_id', field.column_id)
def testBadAggregation(self):
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
"aggregation": "simple_",
"field": "doc_id",
"type": "field",
}, is_static=False)
def testGoodFormat(self):
for format in [
'default',
'percent_of_total',
]:
self.assertEqual(FieldColumn, type(
ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"format": format,
"type": "field",
}, is_static=False)
))
def testBadFormat(self):
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"format": "default_",
"type": "field",
}, is_static=False)
class ChoiceListColumnDbTest(TestCase):
def test_column_uniqueness_when_truncated(self):
problem_spec = {
"display_name": "practicing_lessons",
"property_name": "long_column",
"choices": [
# test for regression:
# with sqlalchemy paramstyle='pyformat' (default)
# some queries that included columns with ')' in the column name
# would fail with a very cryptic message
"duplicate_choice_1(s)",
"duplicate_choice_2",
],
"select_style": "multiple",
"column_id": "a_very_long_base_selection_column_name_with_limited_room",
"type": "choice_list",
}
data_source_config = DataSourceConfiguration(
domain='test',
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=uuid.uuid4().hex,
configured_filter={},
configured_indicators=[problem_spec],
)
adapter = get_indicator_adapter(data_source_config)
adapter.rebuild_table()
# ensure we can save data to the table.
adapter.save({
'_id': uuid.uuid4().hex,
'domain': 'test',
'doc_type': 'CommCareCase',
'long_column': 'duplicate_choice_1(s)',
})
# and query it back
q = adapter.get_query_object()
self.assertEqual(1, q.count())
class ArrayTypeColumnDbTest(TestCase):
def test_array_type_column(self):
problem_spec = {
"column_id": "referral_health_problem",
"datatype": "array",
"type": "expression",
"expression": {
"type": "split_string",
"string_expression": {
"type": "property_name",
"property_name": "referral_health_problem",
}
},
}
data_source_config = DataSourceConfiguration(
domain='test',
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=uuid.uuid4().hex,
configured_filter={},
configured_indicators=[problem_spec],
)
adapter = get_indicator_adapter(data_source_config)
adapter.rebuild_table()
self.addCleanup(adapter.drop_table)
# ensure we can save data to the table.
adapter.save({
'_id': uuid.uuid4().hex,
'domain': 'test',
'doc_type': 'CommCareCase',
'referral_health_problem': 'bleeding convulsions',
})
# and query it back
qs = adapter.get_query_object()
self.assertEqual(1, qs.count())
self.assertEqual(qs.first().referral_health_problem, ['bleeding', 'convulsions'])
class TestExpandedColumn(TestCase):
domain = 'foo'
case_type = 'person'
def _new_case(self, properties):
id = uuid.uuid4().hex
case_block = CaseBlock.deprecated_init(
create=True,
case_id=id,
case_type=self.case_type,
update=properties,
).as_xml()
post_case_blocks([case_block], {'domain': self.domain})
return CaseAccessors(self.domain).get_case(id)
def _build_report(self, vals, field='my_field', build_data_source=True):
"""
Build a new report, and populate it with cases.
Return a ConfigurableReportDataSource and a FieldColumn
:param vals: List of values to populate the given report field with.
:param field: The name of a field in the data source/report
:return: Tuple containing a ConfigurableReportDataSource and FieldColumn.
The column is a column mapped to the given field.
"""
# Create Cases
for v in vals:
update_props = {field: v} if v is not None else {}
self._new_case(update_props).save()
if build_data_source:
tasks.rebuild_indicators(self.data_source_config._id)
report_config = ReportConfiguration(
domain=self.domain,
config_id=self.data_source_config._id,
title='foo',
aggregation_columns=['doc_id'],
columns=[{
"type": "expanded",
"field": field,
"display": field,
"format": "default",
}],
filters=[],
configured_charts=[]
)
report_config.save()
self.addCleanup(report_config.delete)
data_source = ConfigurableReportDataSource.from_spec(report_config)
return data_source, data_source.top_level_columns[0]
@classmethod
def setUpClass(cls):
super(TestExpandedColumn, cls).setUpClass()
cls.data_source_config = DataSourceConfiguration(
domain=cls.domain,
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=clean_table_name(cls.domain, str(uuid.uuid4().hex)),
configured_filter={
"type": "boolean_expression",
"operator": "eq",
"expression": {
"type": "property_name",
"property_name": "type"
},
"property_value": cls.case_type,
},
configured_indicators=[{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": field
},
"column_id": field,
"display_name": field,
"datatype": "string"
} for field in ['my_field', 'field_name_with_CAPITAL_letters']],
)
cls.data_source_config.save()
@classmethod
def tearDownClass(cls):
cls.data_source_config.delete()
super(TestExpandedColumn, cls).tearDownClass()
def tearDown(self):
adapter = get_indicator_adapter(self.data_source_config)
adapter.drop_table()
connection_manager.dispose_engine(UCR_ENGINE_ID)
super(TestExpandedColumn, self).tearDown()
def test_getting_distinct_values(self):
data_source, column = self._build_report([
'apple',
'apple',
'banana',
'blueberry'
])
vals = get_distinct_values(data_source.config, column)[0]
self.assertListEqual(vals, ['apple', 'banana', 'blueberry'])
def test_no_distinct_values(self):
data_source, column = self._build_report([])
distinct_vals, too_many_values = get_distinct_values(data_source.config, column)
self.assertListEqual(distinct_vals, [])
def test_too_large_expansion(self):
vals = ['foo' + str(i) for i in range(DEFAULT_MAXIMUM_EXPANSION + 1)]
data_source, column = self._build_report(vals)
distinct_vals, too_many_values = get_distinct_values(data_source.config, column)
self.assertTrue(too_many_values)
self.assertEqual(len(distinct_vals), DEFAULT_MAXIMUM_EXPANSION)
def test_allowed_expansion(self):
num_columns = DEFAULT_MAXIMUM_EXPANSION + 1
vals = ['foo' + str(i) for i in range(num_columns)]
data_source, column = self._build_report(vals)
column.max_expansion = num_columns
distinct_vals, too_many_values = get_distinct_values(
data_source.config,
column,
expansion_limit=num_columns,
)
self.assertFalse(too_many_values)
self.assertEqual(len(distinct_vals), num_columns)
def test_unbuilt_data_source(self):
data_source, column = self._build_report(['apple'], build_data_source=False)
distinct_vals, too_many_values = get_distinct_values(data_source.config, column)
self.assertListEqual(distinct_vals, [])
self.assertFalse(too_many_values)
def test_expansion(self):
column = ReportColumnFactory.from_spec(dict(
type="expanded",
field="lab_result",
display="Lab Result",
format="default",
description="foo"
), is_static=False)
cols = expand_column(column, ["positive", "negative"], "en")
self.assertEqual(len(cols), 2)
self.assertEqual(type(cols[0].view), SumWhen)
self.assertEqual(cols[1].view.whens, [['negative', 1]])
def test_none_in_values(self):
"""
Confirm that expanded columns work when one of the distinct values is None.
This is an edge case because postgres uses different operators for comparing
columns to null than it does for comparing to non-null values. e.g.
"my_column = 4" vs "my_column is NULL"
"""
field_name = 'field_name_with_CAPITAL_letters'
submitted_vals = [None, None, 'foo']
data_source, _ = self._build_report(submitted_vals, field=field_name)
headers = [column.header for column in data_source.columns]
self.assertEqual(set(headers), {"{}-{}".format(field_name, x) for x in submitted_vals})
def get_expected_row(submitted_value, distinct_values):
# The headers looks like "my_field-foo", but the rows are dicts with
# keys like "my_field-1". So, we need use the index of the headers to
# to determine which keys in the rows correspond to which values.
row = {}
for value in distinct_values:
header_index = headers.index("{}-{}".format(field_name, value))
row_key = "{}-{}".format(field_name, header_index)
row[row_key] = 1 if submitted_value == value else 0
return row
expected_rows = [get_expected_row(v, set(submitted_vals)) for v in submitted_vals]
data = data_source.get_data()
self.assertItemsEqual(expected_rows, data)
class TestAggregateDateColumn(SimpleTestCase):
def setUp(self):
self._spec = {
'type': 'aggregate_date',
'column_id': 'a_date',
'field': 'a_date',
}
def test_wrap(self):
wrapped = ReportColumnFactory.from_spec(self._spec, is_static=False)
self.assertTrue(isinstance(wrapped, AggregateDateColumn))
self.assertEqual('a_date', wrapped.column_id)
def test_group_by(self):
wrapped = ReportColumnFactory.from_spec(self._spec, is_static=False)
self.assertEqual(['a_date_year', 'a_date_month'], wrapped.get_query_column_ids())
def test_format(self):
wrapped = ReportColumnFactory.from_spec(self._spec, is_static=False)
self.assertEqual('2015-03', wrapped.get_format_fn()({'year': 2015, 'month': 3}))
def test_custom_format(self):
self._spec.update({'format': '%b %Y'})
wrapped = ReportColumnFactory.from_spec(self._spec, is_static=False)
self.assertEqual('Mar 2015', wrapped.get_format_fn()({'year': 2015, 'month': 3}))
def test_format_missing(self):
wrapped = ReportColumnFactory.from_spec(self._spec, is_static=False)
self.assertEqual('Unknown Date', wrapped.get_format_fn()({'year': None, 'month': None}))
class TestPercentageColumn(SimpleTestCase):
def test_wrap(self):
wrapped = ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': {
"aggregation": "sum",
"field": "has_danger_signs",
"type": "field",
},
'denominator': {
"aggregation": "sum",
"field": "is_pregnant",
"type": "field",
},
}, is_static=False)
self.assertTrue(isinstance(wrapped, PercentageColumn))
self.assertEqual('pct', wrapped.column_id)
self.assertEqual('has_danger_signs', wrapped.numerator.field)
self.assertEqual('is_pregnant', wrapped.denominator.field)
self.assertEqual('percent', wrapped.format)
def test_missing_fields(self):
field_spec = {
"aggregation": "simple",
"field": "is_pregnant",
"type": "field",
}
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
}, is_static=False)
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': field_spec,
}, is_static=False)
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'denominator': field_spec,
}, is_static=False)
def test_wrong_field_type(self):
# can't put a percent in another percent
field_spec = {
"aggregation": "simple",
"field": "is_pregnant",
"type": "percent",
}
with self.assertRaises(BadSpecError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': field_spec,
'denominator': field_spec,
}, is_static=False)
def test_format_pct(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
self.assertEqual('33%', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_pct_denom_0(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
for empty_value in [0, 0.0, None, '']:
self.assertEqual('--', wrapped.get_format_fn()({'num': 1, 'denom': empty_value}))
def test_format_fraction(self):
spec = self._test_spec()
spec['format'] = 'fraction'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
self.assertEqual('1/3', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_both(self):
spec = self._test_spec()
spec['format'] = 'both'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
self.assertEqual('33% (1/3)', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_pct_non_numeric(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
for unexpected_value in ['hello', object()]:
self.assertEqual('?', wrapped.get_format_fn()({'num': 1, 'denom': unexpected_value}),
'non-numeric value failed for denominator {}'. format(unexpected_value))
self.assertEqual('?', wrapped.get_format_fn()({'num': unexpected_value, 'denom': 1}))
def test_format_numeric_pct(self):
spec = self._test_spec()
spec['format'] = 'numeric_percent'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
self.assertEqual(33, wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_float(self):
spec = self._test_spec()
spec['format'] = 'decimal'
wrapped = ReportColumnFactory.from_spec(spec, is_static=False)
self.assertEqual(.333, wrapped.get_format_fn()({'num': 1, 'denom': 3}))
self.assertEqual(.25, wrapped.get_format_fn()({'num': 1, 'denom': 4}))
def _test_spec(self):
return {
'type': 'percent',
'column_id': 'pct',
'denominator': {
"aggregation": "simple",
"field": "is_pregnant",
"type": "field",
},
'numerator': {
"aggregation": "simple",
"field": "has_danger_signs",
"type": "field",
}
}
|
Contents/Libraries/Shared/rebulk/test/test_toposort.py | jippo015/Sub-Zero.bundle | 1,553 | 11065548 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 True Blade Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Original:
# - https://bitbucket.org/ericvsmith/toposort (1.4)
# Modifications:
# - port to pytest
# pylint: skip-file
import pytest
from ..toposort import toposort, toposort_flatten, CyclicDependency
class TestCase(object):
def test_simple(self):
results = list(toposort({2: set([11]), 9: set([11, 8]), 10: set([11, 3]), 11: set([7, 5]), 8: set([7, 3])}))
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert results == expected
# make sure self dependencies are ignored
results = list(toposort({2: set([2, 11]), 9: set([11, 8]), 10: set([10, 11, 3]), 11: set([7, 5]), 8: set([7, 3])}))
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert results == expected
assert list(toposort({1: set()})) == [set([1])]
assert list(toposort({1: set([1])})) == [set([1])]
def test_no_dependencies(self):
assert list(toposort({1: set([2]), 3: set([4]), 5: set([6])})) == [set([2, 4, 6]), set([1, 3, 5])]
assert list(toposort({1: set(), 3: set(), 5: set()})) == [set([1, 3, 5])]
def test_empty(self):
assert list(toposort({})) == []
def test_strings(self):
results = list(toposort({'2': set(['11']), '9': set(['11', '8']), '10': set(['11', '3']), '11': set(['7', '5']), '8': set(['7', '3'])}))
expected = [set(['3', '5', '7']), set(['8', '11']), set(['2', '9', '10'])]
assert results == expected
def test_objects(self):
o2 = object()
o3 = object()
o5 = object()
o7 = object()
o8 = object()
o9 = object()
o10 = object()
o11 = object()
results = list(toposort({o2: set([o11]), o9: set([o11, o8]), o10: set([o11, o3]), o11: set([o7, o5]), o8: set([o7, o3, o8])}))
expected = [set([o3, o5, o7]), set([o8, o11]), set([o2, o9, o10])]
assert results == expected
def test_cycle(self):
# a simple, 2 element cycle
with pytest.raises(CyclicDependency):
list(toposort({1: set([2]), 2: set([1])}))
# an indirect cycle
with pytest.raises(CyclicDependency):
list(toposort({1: set([2]), 2: set([3]), 3: set([1])}))
def test_input_not_modified(self):
data = {2: set([11]),
9: set([11, 8]),
10: set([11, 3]),
11: set([7, 5]),
8: set([7, 3, 8]), # includes something self-referential
}
orig = data.copy()
results = list(toposort(data))
assert data == orig
def test_input_not_modified_when_cycle_error(self):
data = {1: set([2]),
2: set([1]),
3: set([4]),
}
orig = data.copy()
with pytest.raises(CyclicDependency):
list(toposort(data))
assert data == orig
class TestCaseAll(object):
def test_sort_flatten(self):
data = {2: set([11]),
9: set([11, 8]),
10: set([11, 3]),
11: set([7, 5]),
8: set([7, 3, 8]), # includes something self-referential
}
expected = [set([3, 5, 7]), set([8, 11]), set([2, 9, 10])]
assert list(toposort(data)) == expected
# now check the sorted results
results = []
for item in expected:
results.extend(sorted(item))
assert toposort_flatten(data) == results
# and the unsorted results. break the results up into groups to compare them
actual = toposort_flatten(data, False)
results = [set([i for i in actual[0:3]]), set([i for i in actual[3:5]]), set([i for i in actual[5:8]])]
assert results == expected
|
utils/logger.py | mjlbach/3detr | 336 | 11065553 | <reponame>mjlbach/3detr
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
try:
from tensorboardX import SummaryWriter
except ImportError:
print("Cannot import tensorboard. Will log to txt files only.")
SummaryWriter = None
from utils.dist import is_primary
class Logger(object):
def __init__(self, log_dir=None) -> None:
self.log_dir = log_dir
if SummaryWriter is not None and is_primary():
self.writer = SummaryWriter(self.log_dir)
else:
self.writer = None
def log_scalars(self, scalar_dict, step, prefix=None):
if self.writer is None:
return
for k in scalar_dict:
v = scalar_dict[k]
if isinstance(v, torch.Tensor):
v = v.detach().cpu().item()
if prefix is not None:
k = prefix + k
self.writer.add_scalar(k, v, step)
|
froide/account/migrations/0025_auto_20200812_1212.py | xenein/froide | 198 | 11065558 | # Generated by Django 3.0.8 on 2020-08-12 10:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("account", "0024_auto_20200608_1111"),
]
operations = [
migrations.AlterModelOptions(
name="accountblocklist",
options={
"verbose_name": "Blocklist entry",
"verbose_name_plural": "Blocklist",
},
),
migrations.RemoveField(
model_name="user",
name="newsletter",
),
]
|
torchplus/train/learning_schedules_fastai.py | jerry99s/second.pytorch | 1,541 | 11065561 | import numpy as np
import math
from functools import partial
import torch
class LRSchedulerStep(object):
def __init__(self, fai_optimizer, total_step, lr_phases, mom_phases):
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < int(start * total_step)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step),
int(lr_phases[i + 1][0] * total_step),
lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step,
lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < int(start * total_step)
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func)
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step),
int(mom_phases[i + 1][0] * total_step),
lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step,
lambda_func))
if len(mom_phases) > 0:
assert self.mom_phases[0][0] == 0
def step(self, step):
lrs = []
moms = []
for start, end, func in self.lr_phases:
if step >= start:
lrs.append(func((step - start) / (end - start)))
if len(lrs) > 0:
self.optimizer.lr = lrs[-1]
for start, end, func in self.mom_phases:
if step >= start:
moms.append(func((step - start) / (end - start)))
self.optimizer.mom = func((step - start) / (end - start))
if len(moms) > 0:
self.optimizer.mom = moms[-1]
@property
def learning_rate(self):
return self.optimizer.lr
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
a1 = int(total_step * self.pct_start)
a2 = total_step - a1
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class ExponentialDecay(LRSchedulerStep):
def __init__(self,
fai_optimizer,
total_step,
initial_learning_rate,
decay_length,
decay_factor,
staircase=True):
"""
Args:
decay_length: must in (0, 1)
"""
assert decay_length > 0
assert decay_length < 1
self._decay_steps_unified = decay_length
self._decay_factor = decay_factor
self._staircase = staircase
step = 0
stage = 1
lr_phases = []
if staircase:
while step <= total_step:
func = lambda p, _d=initial_learning_rate * stage: _d
lr_phases.append((step / total_step, func))
stage *= decay_factor
step += int(decay_length * total_step)
else:
func = lambda p: pow(decay_factor, (p / decay_length))
lr_phases.append((0, func))
super().__init__(fai_optimizer, total_step, lr_phases, [])
class ManualStepping(LRSchedulerStep):
def __init__(self, fai_optimizer, total_step, boundaries, rates):
assert all([b > 0 and b < 1 for b in boundaries])
assert len(boundaries) + 1 == len(rates)
boundaries.insert(0, 0.0)
lr_phases = []
for start, rate in zip(boundaries, rates):
func = lambda p, _d=rate: _d
lr_phases.append((start, func))
super().__init__(fai_optimizer, total_step, lr_phases, [])
class FakeOptim:
def __init__(self):
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
# schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.4)
schd = ExponentialDecay(opt, 100, 3e-4, 0.1, 0.8, staircase=True)
schd = ManualStepping(opt, 100, [0.8, 0.9], [0.001, 0.0001, 0.00005])
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
# plt.show()
# plt.plot(moms)
plt.show()
|
scripts/__init__.py | bigblue/pynab | 657 | 11065579 | <filename>scripts/__init__.py
__author__ = 'James'
|
tests/unit/gapic/aiplatform_v1/test_job_service.py | conankun/python-aiplatform | 180 | 11065584 | <filename>tests/unit/gapic/aiplatform_v1/test_job_service.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient
from google.cloud.aiplatform_v1.services.job_service import JobServiceClient
from google.cloud.aiplatform_v1.services.job_service import pagers
from google.cloud.aiplatform_v1.services.job_service import transports
from google.cloud.aiplatform_v1.types import accelerator_type
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1.types import completion_stats
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import env_var
from google.cloud.aiplatform_v1.types import explanation
from google.cloud.aiplatform_v1.types import explanation_metadata
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import job_service
from google.cloud.aiplatform_v1.types import job_state
from google.cloud.aiplatform_v1.types import machine_resources
from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
from google.cloud.aiplatform_v1.types import model
from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job
from google.cloud.aiplatform_v1.types import (
model_deployment_monitoring_job as gca_model_deployment_monitoring_job,
)
from google.cloud.aiplatform_v1.types import model_monitoring
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import study
from google.cloud.aiplatform_v1.types import unmanaged_container_model
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert JobServiceClient._get_default_mtls_endpoint(None) is None
assert (
JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,])
def test_job_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.JobServiceGrpcTransport, "grpc"),
(transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_job_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient,])
def test_job_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_job_service_client_get_transport_class():
transport = JobServiceClient.get_transport_class()
available_transports = [
transports.JobServiceGrpcTransport,
]
assert transport in available_transports
transport = JobServiceClient.get_transport_class("grpc")
assert transport == transports.JobServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_job_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_job_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = JobServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_custom_job(
transport: str = "grpc", request_type=job_service.CreateCustomJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_custom_job_from_dict():
test_create_custom_job(request_type=dict)
def test_create_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
client.create_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
@pytest.mark.asyncio
async def test_create_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CreateCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_custom_job_async_from_dict():
await test_create_custom_job_async(request_type=dict)
def test_create_custom_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateCustomJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
call.return_value = gca_custom_job.CustomJob()
client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_custom_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateCustomJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob()
)
await client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_custom_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_custom_job(
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].custom_job
mock_val = gca_custom_job.CustomJob(name="name_value")
assert arg == mock_val
def test_create_custom_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_custom_job(
job_service.CreateCustomJobRequest(),
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_custom_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_custom_job(
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].custom_job
mock_val = gca_custom_job.CustomJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_custom_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_custom_job(
job_service.CreateCustomJobRequest(),
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
def test_get_custom_job(
transport: str = "grpc", request_type=job_service.GetCustomJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_custom_job_from_dict():
test_get_custom_job(request_type=dict)
def test_get_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
client.get_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
@pytest.mark.asyncio
async def test_get_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_custom_job_async_from_dict():
await test_get_custom_job_async(request_type=dict)
def test_get_custom_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
call.return_value = custom_job.CustomJob()
client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_custom_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob()
)
await client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_custom_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_custom_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_custom_job(
job_service.GetCustomJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_custom_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_custom_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_custom_job(
job_service.GetCustomJobRequest(), name="name_value",
)
def test_list_custom_jobs(
transport: str = "grpc", request_type=job_service.ListCustomJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_custom_jobs_from_dict():
test_list_custom_jobs(request_type=dict)
def test_list_custom_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
client.list_custom_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
@pytest.mark.asyncio
async def test_list_custom_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.ListCustomJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_custom_jobs_async_from_dict():
await test_list_custom_jobs_async(request_type=dict)
def test_list_custom_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListCustomJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
call.return_value = job_service.ListCustomJobsResponse()
client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_custom_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListCustomJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse()
)
await client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_custom_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_custom_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_custom_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_custom_jobs(
job_service.ListCustomJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_custom_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_custom_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_custom_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_custom_jobs(
job_service.ListCustomJobsRequest(), parent="parent_value",
)
def test_list_custom_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(custom_jobs=[], next_page_token="def",),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(),], next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(), custom_job.CustomJob(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_custom_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, custom_job.CustomJob) for i in results)
def test_list_custom_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(custom_jobs=[], next_page_token="def",),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(),], next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(), custom_job.CustomJob(),],
),
RuntimeError,
)
pages = list(client.list_custom_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_custom_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(custom_jobs=[], next_page_token="def",),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(),], next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(), custom_job.CustomJob(),],
),
RuntimeError,
)
async_pager = await client.list_custom_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, custom_job.CustomJob) for i in responses)
@pytest.mark.asyncio
async def test_list_custom_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(custom_jobs=[], next_page_token="def",),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(),], next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[custom_job.CustomJob(), custom_job.CustomJob(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_custom_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_custom_job(
transport: str = "grpc", request_type=job_service.DeleteCustomJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_custom_job_from_dict():
test_delete_custom_job(request_type=dict)
def test_delete_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
client.delete_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
@pytest.mark.asyncio
async def test_delete_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.DeleteCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_custom_job_async_from_dict():
await test_delete_custom_job_async(request_type=dict)
def test_delete_custom_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_custom_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_custom_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_custom_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_custom_job(
job_service.DeleteCustomJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_custom_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_custom_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_custom_job(
job_service.DeleteCustomJobRequest(), name="name_value",
)
def test_cancel_custom_job(
transport: str = "grpc", request_type=job_service.CancelCustomJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_custom_job_from_dict():
test_cancel_custom_job(request_type=dict)
def test_cancel_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
client.cancel_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
@pytest.mark.asyncio
async def test_cancel_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CancelCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_custom_job_async_from_dict():
await test_cancel_custom_job_async(request_type=dict)
def test_cancel_custom_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
call.return_value = None
client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_custom_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_custom_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_custom_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_custom_job(
job_service.CancelCustomJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_custom_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_custom_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_custom_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_custom_job(
job_service.CancelCustomJobRequest(), name="name_value",
)
def test_create_data_labeling_job(
transport: str = "grpc", request_type=job_service.CreateDataLabelingJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
response = client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
def test_create_data_labeling_job_from_dict():
test_create_data_labeling_job(request_type=dict)
def test_create_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
client.create_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_create_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
)
response = await client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
@pytest.mark.asyncio
async def test_create_data_labeling_job_async_from_dict():
await test_create_data_labeling_job_async(request_type=dict)
def test_create_data_labeling_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateDataLabelingJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
call.return_value = gca_data_labeling_job.DataLabelingJob()
client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateDataLabelingJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob()
)
await client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_data_labeling_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_data_labeling_job(
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].data_labeling_job
mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
assert arg == mock_val
def test_create_data_labeling_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_data_labeling_job(
job_service.CreateDataLabelingJobRequest(),
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_data_labeling_job(
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].data_labeling_job
mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_data_labeling_job(
job_service.CreateDataLabelingJobRequest(),
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
def test_get_data_labeling_job(
transport: str = "grpc", request_type=job_service.GetDataLabelingJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
response = client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
def test_get_data_labeling_job_from_dict():
test_get_data_labeling_job(request_type=dict)
def test_get_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
client.get_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_get_data_labeling_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetDataLabelingJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
)
response = await client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
@pytest.mark.asyncio
async def test_get_data_labeling_job_async_from_dict():
await test_get_data_labeling_job_async(request_type=dict)
def test_get_data_labeling_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
call.return_value = data_labeling_job.DataLabelingJob()
client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob()
)
await client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_data_labeling_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_data_labeling_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_data_labeling_job(
job_service.GetDataLabelingJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_data_labeling_job(
job_service.GetDataLabelingJobRequest(), name="name_value",
)
def test_list_data_labeling_jobs(
transport: str = "grpc", request_type=job_service.ListDataLabelingJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataLabelingJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_data_labeling_jobs_from_dict():
test_list_data_labeling_jobs(request_type=dict)
def test_list_data_labeling_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
client.list_data_labeling_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListDataLabelingJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_from_dict():
await test_list_data_labeling_jobs_async(request_type=dict)
def test_list_data_labeling_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListDataLabelingJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
call.return_value = job_service.ListDataLabelingJobsResponse()
client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListDataLabelingJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse()
)
await client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_data_labeling_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_data_labeling_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_data_labeling_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_data_labeling_jobs(
job_service.ListDataLabelingJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_data_labeling_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_data_labeling_jobs(
job_service.ListDataLabelingJobsRequest(), parent="parent_value",
)
def test_list_data_labeling_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[], next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[data_labeling_job.DataLabelingJob(),],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_data_labeling_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in results)
def test_list_data_labeling_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[], next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[data_labeling_job.DataLabelingJob(),],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
pages = list(client.list_data_labeling_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[], next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[data_labeling_job.DataLabelingJob(),],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
async_pager = await client.list_data_labeling_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in responses)
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[], next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[data_labeling_job.DataLabelingJob(),],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_data_labeling_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_data_labeling_job(
transport: str = "grpc", request_type=job_service.DeleteDataLabelingJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_data_labeling_job_from_dict():
test_delete_data_labeling_job(request_type=dict)
def test_delete_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
client.delete_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_delete_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_data_labeling_job_async_from_dict():
await test_delete_data_labeling_job_async(request_type=dict)
def test_delete_data_labeling_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_data_labeling_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_data_labeling_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_data_labeling_job(
job_service.DeleteDataLabelingJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_data_labeling_job(
job_service.DeleteDataLabelingJobRequest(), name="name_value",
)
def test_cancel_data_labeling_job(
transport: str = "grpc", request_type=job_service.CancelDataLabelingJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_data_labeling_job_from_dict():
test_cancel_data_labeling_job(request_type=dict)
def test_cancel_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
client.cancel_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_async_from_dict():
await test_cancel_data_labeling_job_async(request_type=dict)
def test_cancel_data_labeling_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
call.return_value = None
client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_data_labeling_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_data_labeling_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_data_labeling_job(
job_service.CancelDataLabelingJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_data_labeling_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_data_labeling_job(
job_service.CancelDataLabelingJobRequest(), name="name_value",
)
def test_create_hyperparameter_tuning_job(
transport: str = "grpc",
request_type=job_service.CreateHyperparameterTuningJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_hyperparameter_tuning_job_from_dict():
test_create_hyperparameter_tuning_job(request_type=dict)
def test_create_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
client.create_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_async_from_dict():
await test_create_hyperparameter_tuning_job_async(request_type=dict)
def test_create_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateHyperparameterTuningJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateHyperparameterTuningJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob()
)
await client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_hyperparameter_tuning_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_hyperparameter_tuning_job(
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].hyperparameter_tuning_job
mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
)
assert arg == mock_val
def test_create_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_hyperparameter_tuning_job(
job_service.CreateHyperparameterTuningJobRequest(),
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_hyperparameter_tuning_job(
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].hyperparameter_tuning_job
mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_hyperparameter_tuning_job(
job_service.CreateHyperparameterTuningJobRequest(),
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
def test_get_hyperparameter_tuning_job(
transport: str = "grpc", request_type=job_service.GetHyperparameterTuningJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_hyperparameter_tuning_job_from_dict():
test_get_hyperparameter_tuning_job(request_type=dict)
def test_get_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
client.get_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_async_from_dict():
await test_get_hyperparameter_tuning_job_async(request_type=dict)
def test_get_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob()
)
await client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_hyperparameter_tuning_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_hyperparameter_tuning_job(
job_service.GetHyperparameterTuningJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_hyperparameter_tuning_job(
job_service.GetHyperparameterTuningJobRequest(), name="name_value",
)
def test_list_hyperparameter_tuning_jobs(
transport: str = "grpc",
request_type=job_service.ListHyperparameterTuningJobsRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListHyperparameterTuningJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_hyperparameter_tuning_jobs_from_dict():
test_list_hyperparameter_tuning_jobs(request_type=dict)
def test_list_hyperparameter_tuning_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
client.list_hyperparameter_tuning_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListHyperparameterTuningJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_from_dict():
await test_list_hyperparameter_tuning_jobs_async(request_type=dict)
def test_list_hyperparameter_tuning_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListHyperparameterTuningJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListHyperparameterTuningJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse()
)
await client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_hyperparameter_tuning_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_hyperparameter_tuning_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_hyperparameter_tuning_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_hyperparameter_tuning_jobs(
job_service.ListHyperparameterTuningJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_hyperparameter_tuning_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_hyperparameter_tuning_jobs(
job_service.ListHyperparameterTuningJobsRequest(), parent="parent_value",
)
def test_list_hyperparameter_tuning_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[], next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_hyperparameter_tuning_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
for i in results
)
def test_list_hyperparameter_tuning_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[], next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[], next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
async_pager = await client.list_hyperparameter_tuning_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
for i in responses
)
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[], next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_hyperparameter_tuning_jobs(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_hyperparameter_tuning_job(
transport: str = "grpc",
request_type=job_service.DeleteHyperparameterTuningJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_hyperparameter_tuning_job_from_dict():
test_delete_hyperparameter_tuning_job(request_type=dict)
def test_delete_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
client.delete_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_async_from_dict():
await test_delete_hyperparameter_tuning_job_async(request_type=dict)
def test_delete_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_hyperparameter_tuning_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_hyperparameter_tuning_job(
job_service.DeleteHyperparameterTuningJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_hyperparameter_tuning_job(
job_service.DeleteHyperparameterTuningJobRequest(), name="name_value",
)
def test_cancel_hyperparameter_tuning_job(
transport: str = "grpc",
request_type=job_service.CancelHyperparameterTuningJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_hyperparameter_tuning_job_from_dict():
test_cancel_hyperparameter_tuning_job(request_type=dict)
def test_cancel_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
client.cancel_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_async_from_dict():
await test_cancel_hyperparameter_tuning_job_async(request_type=dict)
def test_cancel_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = None
client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_hyperparameter_tuning_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_hyperparameter_tuning_job(
job_service.CancelHyperparameterTuningJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_hyperparameter_tuning_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_hyperparameter_tuning_job(
job_service.CancelHyperparameterTuningJobRequest(), name="name_value",
)
def test_create_batch_prediction_job(
transport: str = "grpc", request_type=job_service.CreateBatchPredictionJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_batch_prediction_job_from_dict():
test_create_batch_prediction_job(request_type=dict)
def test_create_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
client.create_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_create_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_batch_prediction_job_async_from_dict():
await test_create_batch_prediction_job_async(request_type=dict)
def test_create_batch_prediction_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateBatchPredictionJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateBatchPredictionJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob()
)
await client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_batch_prediction_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_batch_prediction_job(
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].batch_prediction_job
mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
assert arg == mock_val
def test_create_batch_prediction_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_batch_prediction_job(
job_service.CreateBatchPredictionJobRequest(),
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_batch_prediction_job(
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].batch_prediction_job
mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_batch_prediction_job(
job_service.CreateBatchPredictionJobRequest(),
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
def test_get_batch_prediction_job(
transport: str = "grpc", request_type=job_service.GetBatchPredictionJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_batch_prediction_job_from_dict():
test_get_batch_prediction_job(request_type=dict)
def test_get_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
client.get_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_get_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_batch_prediction_job_async_from_dict():
await test_get_batch_prediction_job_async(request_type=dict)
def test_get_batch_prediction_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
call.return_value = batch_prediction_job.BatchPredictionJob()
client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob()
)
await client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_batch_prediction_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_batch_prediction_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_batch_prediction_job(
job_service.GetBatchPredictionJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_batch_prediction_job(
job_service.GetBatchPredictionJobRequest(), name="name_value",
)
def test_list_batch_prediction_jobs(
transport: str = "grpc", request_type=job_service.ListBatchPredictionJobsRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBatchPredictionJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_batch_prediction_jobs_from_dict():
test_list_batch_prediction_jobs(request_type=dict)
def test_list_batch_prediction_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
client.list_batch_prediction_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListBatchPredictionJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_from_dict():
await test_list_batch_prediction_jobs_async(request_type=dict)
def test_list_batch_prediction_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListBatchPredictionJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
call.return_value = job_service.ListBatchPredictionJobsResponse()
client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListBatchPredictionJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse()
)
await client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_batch_prediction_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_batch_prediction_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_batch_prediction_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_batch_prediction_jobs(
job_service.ListBatchPredictionJobsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_batch_prediction_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_batch_prediction_jobs(
job_service.ListBatchPredictionJobsRequest(), parent="parent_value",
)
def test_list_batch_prediction_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[], next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[batch_prediction_job.BatchPredictionJob(),],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_batch_prediction_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, batch_prediction_job.BatchPredictionJob) for i in results
)
def test_list_batch_prediction_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[], next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[batch_prediction_job.BatchPredictionJob(),],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
pages = list(client.list_batch_prediction_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[], next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[batch_prediction_job.BatchPredictionJob(),],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
async_pager = await client.list_batch_prediction_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, batch_prediction_job.BatchPredictionJob) for i in responses
)
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[], next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[batch_prediction_job.BatchPredictionJob(),],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_batch_prediction_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_delete_batch_prediction_job(
transport: str = "grpc", request_type=job_service.DeleteBatchPredictionJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_batch_prediction_job_from_dict():
test_delete_batch_prediction_job(request_type=dict)
def test_delete_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
client.delete_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_async_from_dict():
await test_delete_batch_prediction_job_async(request_type=dict)
def test_delete_batch_prediction_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_batch_prediction_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_batch_prediction_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_batch_prediction_job(
job_service.DeleteBatchPredictionJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_batch_prediction_job(
job_service.DeleteBatchPredictionJobRequest(), name="name_value",
)
def test_cancel_batch_prediction_job(
transport: str = "grpc", request_type=job_service.CancelBatchPredictionJobRequest
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_batch_prediction_job_from_dict():
test_cancel_batch_prediction_job(request_type=dict)
def test_cancel_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
client.cancel_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_async_from_dict():
await test_cancel_batch_prediction_job_async(request_type=dict)
def test_cancel_batch_prediction_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
call.return_value = None
client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_batch_prediction_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_batch_prediction_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_batch_prediction_job(
job_service.CancelBatchPredictionJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_batch_prediction_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_batch_prediction_job(
job_service.CancelBatchPredictionJobRequest(), name="name_value",
)
def test_create_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
response = client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
def test_create_model_deployment_monitoring_job_from_dict():
test_create_model_deployment_monitoring_job(request_type=dict)
def test_create_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
client.create_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
)
response = await client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_async_from_dict():
await test_create_model_deployment_monitoring_job_async(request_type=dict)
def test_create_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateModelDeploymentMonitoringJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateModelDeploymentMonitoringJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
await client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_model_deployment_monitoring_job(
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
def test_create_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_model_deployment_monitoring_job(
job_service.CreateModelDeploymentMonitoringJobRequest(),
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_model_deployment_monitoring_job(
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_model_deployment_monitoring_job(
job_service.CreateModelDeploymentMonitoringJobRequest(),
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
def test_search_model_deployment_monitoring_stats_anomalies(
transport: str = "grpc",
request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
next_page_token="next_page_token_value",
)
response = client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager
)
assert response.next_page_token == "next_page_token_value"
def test_search_model_deployment_monitoring_stats_anomalies_from_dict():
test_search_model_deployment_monitoring_stats_anomalies(request_type=dict)
def test_search_model_deployment_monitoring_stats_anomalies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
client.search_model_deployment_monitoring_stats_anomalies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async(
transport: str = "grpc_asyncio",
request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_model_deployment_monitoring_stats_anomalies(
request
)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager
)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict():
await test_search_model_deployment_monitoring_stats_anomalies_async(
request_type=dict
)
def test_search_model_deployment_monitoring_stats_anomalies_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job=model_deployment_monitoring_job/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
await client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job=model_deployment_monitoring_job/value",
) in kw["metadata"]
def test_search_model_deployment_monitoring_stats_anomalies_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = "model_deployment_monitoring_job_value"
assert arg == mock_val
arg = args[0].deployed_model_id
mock_val = "deployed_model_id_value"
assert arg == mock_val
def test_search_model_deployment_monitoring_stats_anomalies_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_model_deployment_monitoring_stats_anomalies(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = "model_deployment_monitoring_job_value"
assert arg == mock_val
arg = args[0].deployed_model_id
mock_val = "deployed_model_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_model_deployment_monitoring_stats_anomalies(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
def test_search_model_deployment_monitoring_stats_anomalies_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[], next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("model_deployment_monitoring_job", ""),)
),
)
pager = client.search_model_deployment_monitoring_stats_anomalies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(
i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
)
for i in results
)
def test_search_model_deployment_monitoring_stats_anomalies_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[], next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
pages = list(
client.search_model_deployment_monitoring_stats_anomalies(request={}).pages
)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[], next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
async_pager = await client.search_model_deployment_monitoring_stats_anomalies(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(
i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
)
for i in responses
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[], next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.search_model_deployment_monitoring_stats_anomalies(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.GetModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
response = client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
def test_get_model_deployment_monitoring_job_from_dict():
test_get_model_deployment_monitoring_job(request_type=dict)
def test_get_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
client.get_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
)
response = await client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_async_from_dict():
await test_get_model_deployment_monitoring_job_async(request_type=dict)
def test_get_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
await client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model_deployment_monitoring_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_model_deployment_monitoring_job(
job_service.GetModelDeploymentMonitoringJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_model_deployment_monitoring_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_model_deployment_monitoring_job(
job_service.GetModelDeploymentMonitoringJobRequest(), name="name_value",
)
def test_list_model_deployment_monitoring_jobs(
transport: str = "grpc",
request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_model_deployment_monitoring_jobs_from_dict():
test_list_model_deployment_monitoring_jobs(request_type=dict)
def test_list_model_deployment_monitoring_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
client.list_model_deployment_monitoring_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse(
next_page_token="<PASSWORD>",
)
)
response = await client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_from_dict():
await test_list_model_deployment_monitoring_jobs_async(request_type=dict)
def test_list_model_deployment_monitoring_jobs_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListModelDeploymentMonitoringJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListModelDeploymentMonitoringJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse()
)
await client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_model_deployment_monitoring_jobs_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_model_deployment_monitoring_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_model_deployment_monitoring_jobs_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_model_deployment_monitoring_jobs(
job_service.ListModelDeploymentMonitoringJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_model_deployment_monitoring_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_model_deployment_monitoring_jobs(
job_service.ListModelDeploymentMonitoringJobsRequest(),
parent="parent_value",
)
def test_list_model_deployment_monitoring_jobs_pager():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[], next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_model_deployment_monitoring_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
for i in results
)
def test_list_model_deployment_monitoring_jobs_pages():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[], next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_pager():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[], next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
async_pager = await client.list_model_deployment_monitoring_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
for i in responses
)
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_pages():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[], next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_model_deployment_monitoring_jobs(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_model_deployment_monitoring_job_from_dict():
test_update_model_deployment_monitoring_job(request_type=dict)
def test_update_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
client.update_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_async_from_dict():
await test_update_model_deployment_monitoring_job_async(request_type=dict)
def test_update_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateModelDeploymentMonitoringJobRequest()
request.model_deployment_monitoring_job.name = (
"model_deployment_monitoring_job.name/value"
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateModelDeploymentMonitoringJobRequest()
request.model_deployment_monitoring_job.name = (
"model_deployment_monitoring_job.name/value"
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value",
) in kw["metadata"]
def test_update_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_model_deployment_monitoring_job(
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_model_deployment_monitoring_job(
job_service.UpdateModelDeploymentMonitoringJobRequest(),
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_model_deployment_monitoring_job(
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_model_deployment_monitoring_job(
job_service.UpdateModelDeploymentMonitoringJobRequest(),
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_model_deployment_monitoring_job_from_dict():
test_delete_model_deployment_monitoring_job(request_type=dict)
def test_delete_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
client.delete_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_async_from_dict():
await test_delete_model_deployment_monitoring_job_async(request_type=dict)
def test_delete_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_model_deployment_monitoring_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_model_deployment_monitoring_job(
job_service.DeleteModelDeploymentMonitoringJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_model_deployment_monitoring_job(
job_service.DeleteModelDeploymentMonitoringJobRequest(), name="name_value",
)
def test_pause_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_pause_model_deployment_monitoring_job_from_dict():
test_pause_model_deployment_monitoring_job(request_type=dict)
def test_pause_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
client.pause_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_async_from_dict():
await test_pause_model_deployment_monitoring_job_async(request_type=dict)
def test_pause_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.PauseModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = None
client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.PauseModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_pause_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.pause_model_deployment_monitoring_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_pause_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pause_model_deployment_monitoring_job(
job_service.PauseModelDeploymentMonitoringJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.pause_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pause_model_deployment_monitoring_job(
job_service.PauseModelDeploymentMonitoringJobRequest(), name="name_value",
)
def test_resume_model_deployment_monitoring_job(
transport: str = "grpc",
request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_resume_model_deployment_monitoring_job_from_dict():
test_resume_model_deployment_monitoring_job(request_type=dict)
def test_resume_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
client.resume_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_async_from_dict():
await test_resume_model_deployment_monitoring_job_async(request_type=dict)
def test_resume_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ResumeModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = None
client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ResumeModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_resume_model_deployment_monitoring_job_flattened():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.resume_model_deployment_monitoring_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_resume_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resume_model_deployment_monitoring_job(
job_service.ResumeModelDeploymentMonitoringJobRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.resume_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.resume_model_deployment_monitoring_job(
job_service.ResumeModelDeploymentMonitoringJobRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = JobServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.JobServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = JobServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.JobServiceGrpcTransport,)
def test_job_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_job_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_custom_job",
"get_custom_job",
"list_custom_jobs",
"delete_custom_job",
"cancel_custom_job",
"create_data_labeling_job",
"get_data_labeling_job",
"list_data_labeling_jobs",
"delete_data_labeling_job",
"cancel_data_labeling_job",
"create_hyperparameter_tuning_job",
"get_hyperparameter_tuning_job",
"list_hyperparameter_tuning_jobs",
"delete_hyperparameter_tuning_job",
"cancel_hyperparameter_tuning_job",
"create_batch_prediction_job",
"get_batch_prediction_job",
"list_batch_prediction_jobs",
"delete_batch_prediction_job",
"cancel_batch_prediction_job",
"create_model_deployment_monitoring_job",
"search_model_deployment_monitoring_stats_anomalies",
"get_model_deployment_monitoring_job",
"list_model_deployment_monitoring_jobs",
"update_model_deployment_monitoring_job",
"delete_model_deployment_monitoring_job",
"pause_model_deployment_monitoring_job",
"resume_model_deployment_monitoring_job",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_job_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_job_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport()
adc.assert_called_once()
def test_job_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
JobServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport,],
)
def test_job_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.JobServiceGrpcTransport, grpc_helpers),
(transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_job_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_job_service_host_no_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_job_service_host_with_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_job_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_job_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_service_grpc_lro_client():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_job_service_grpc_lro_async_client():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_batch_prediction_job_path():
project = "squid"
location = "clam"
batch_prediction_job = "whelk"
expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
project=project, location=location, batch_prediction_job=batch_prediction_job,
)
actual = JobServiceClient.batch_prediction_job_path(
project, location, batch_prediction_job
)
assert expected == actual
def test_parse_batch_prediction_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"batch_prediction_job": "nudibranch",
}
path = JobServiceClient.batch_prediction_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_batch_prediction_job_path(path)
assert expected == actual
def test_custom_job_path():
project = "cuttlefish"
location = "mussel"
custom_job = "winkle"
expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project, location=location, custom_job=custom_job,
)
actual = JobServiceClient.custom_job_path(project, location, custom_job)
assert expected == actual
def test_parse_custom_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"custom_job": "abalone",
}
path = JobServiceClient.custom_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_custom_job_path(path)
assert expected == actual
def test_data_labeling_job_path():
project = "squid"
location = "clam"
data_labeling_job = "whelk"
expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
project=project, location=location, data_labeling_job=data_labeling_job,
)
actual = JobServiceClient.data_labeling_job_path(
project, location, data_labeling_job
)
assert expected == actual
def test_parse_data_labeling_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"data_labeling_job": "nudibranch",
}
path = JobServiceClient.data_labeling_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_data_labeling_job_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = JobServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nautilus",
"location": "scallop",
"dataset": "abalone",
}
path = JobServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_dataset_path(path)
assert expected == actual
def test_endpoint_path():
project = "squid"
location = "clam"
endpoint = "whelk"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project, location=location, endpoint=endpoint,
)
actual = JobServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "octopus",
"location": "oyster",
"endpoint": "nudibranch",
}
path = JobServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_hyperparameter_tuning_job_path():
project = "cuttlefish"
location = "mussel"
hyperparameter_tuning_job = "winkle"
expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
project=project,
location=location,
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
actual = JobServiceClient.hyperparameter_tuning_job_path(
project, location, hyperparameter_tuning_job
)
assert expected == actual
def test_parse_hyperparameter_tuning_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"hyperparameter_tuning_job": "abalone",
}
path = JobServiceClient.hyperparameter_tuning_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path)
assert expected == actual
def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = JobServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "octopus",
"location": "oyster",
"model": "nudibranch",
}
path = JobServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_model_path(path)
assert expected == actual
def test_model_deployment_monitoring_job_path():
project = "cuttlefish"
location = "mussel"
model_deployment_monitoring_job = "winkle"
expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(
project=project,
location=location,
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
actual = JobServiceClient.model_deployment_monitoring_job_path(
project, location, model_deployment_monitoring_job
)
assert expected == actual
def test_parse_model_deployment_monitoring_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"model_deployment_monitoring_job": "abalone",
}
path = JobServiceClient.model_deployment_monitoring_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path)
assert expected == actual
def test_network_path():
project = "squid"
network = "clam"
expected = "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
actual = JobServiceClient.network_path(project, network)
assert expected == actual
def test_parse_network_path():
expected = {
"project": "whelk",
"network": "octopus",
}
path = JobServiceClient.network_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_network_path(path)
assert expected == actual
def test_tensorboard_path():
project = "oyster"
location = "nudibranch"
tensorboard = "cuttlefish"
expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(
project=project, location=location, tensorboard=tensorboard,
)
actual = JobServiceClient.tensorboard_path(project, location, tensorboard)
assert expected == actual
def test_parse_tensorboard_path():
expected = {
"project": "mussel",
"location": "winkle",
"tensorboard": "nautilus",
}
path = JobServiceClient.tensorboard_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_tensorboard_path(path)
assert expected == actual
def test_trial_path():
project = "scallop"
location = "abalone"
study = "squid"
trial = "clam"
expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
project=project, location=location, study=study, trial=trial,
)
actual = JobServiceClient.trial_path(project, location, study, trial)
assert expected == actual
def test_parse_trial_path():
expected = {
"project": "whelk",
"location": "octopus",
"study": "oyster",
"trial": "nudibranch",
}
path = JobServiceClient.trial_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_trial_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cut<PASSWORD>"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = JobServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = JobServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = JobServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = JobServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = JobServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = JobServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = JobServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = JobServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = JobServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = JobServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = JobServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
|
package.py | saimohithnaag/StockPredictor | 1,761 | 11065661 | <gh_stars>1000+
# Inspired by npm's package.json file
name = 'bulbea'
version = '0.1.0'
release = '0.1.0'
description = 'A neural stock market predictor and model builder'
long_description = ['README.md']
keywords = ['neural', 'network', 'machine', 'deep',
'learning', 'tensorflow', 'stock', 'market', 'prediction']
authors = [
{ 'name': '<NAME>', 'email': '<EMAIL>' }
]
maintainers = [
{ 'name': '<NAME>', 'email': '<EMAIL>' }
]
license = 'Apache 2.0'
modules = [
'bulbea',
'bulbea.config',
'bulbea._util',
'bulbea.entity',
'bulbea.learn',
'bulbea.learn.models',
'bulbea.learn.evaluation',
'bulbea.learn.sentiment',
'bulbea.app',
'bulbea.app.client',
'bulbea.app.server',
'bulbea.app.config'
]
test_modules = [
'bulbea._util.tests'
]
homepage = 'https://achillesrasquinha.github.io/bulbea'
github_username = 'achillesrasquinha'
github_repository = 'bulbea'
github_url = '{baseurl}/{username}/{repository}'.format(
baseurl = 'https://github.com',
username = github_username,
repository = github_repository)
|
oss_src/unity/python/sframe/test/test_sarray_builder.py | venkattgg/venkey | 493 | 11065665 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from ..data_structures.sarray_builder import SArrayBuilder
import unittest
import array
import datetime as dt
from ..util.timezone import GMT
class SArrayBuilderTest(unittest.TestCase):
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertEqual(len(_sarray), len(_data))
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_append(self, sb, data, dtype):
for i in data:
sb.append(i)
self.assertEquals(sb.get_type(), dtype)
sa = sb.close()
self.__test_equal(sa, data, dtype)
def __test_append_multiple(self, sb, data, dtype):
sb.append_multiple(data)
self.assertEquals(sb.get_type(), dtype)
sa = sb.close()
self.__test_equal(sa, data, dtype)
def test_basic(self):
data_to_test = [([1,-1,None,2],int),
([i for i in range(20000)], int),
([None, 1.0, -1.0, 2.3],float),
(["hi", None, "hello", "None"],str),
([dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None],dt.datetime),
([["hi",1],None,["hi",2,3],["hello"]],list),
([array.array('d',[1.0,2.0]),array.array('d',[3.0,4.0]),None],array.array),
([{'a':1,'b':2},{'c':3,'d':4},None],dict),
]
for i in data_to_test:
sb = SArrayBuilder(i[1])
self.__test_append(sb, i[0], i[1])
sb = SArrayBuilder(i[1])
self.__test_append_multiple(sb, i[0], i[1])
def test_history(self):
sb = SArrayBuilder(int, history_size=10)
sb.append_multiple((i for i in range(8)))
hist = sb.read_history(3)
self.assertEquals(hist,[5,6,7])
hist = sb.read_history(20)
self.assertEquals(hist, [i for i in range(8)])
hist = sb.read_history()
self.assertEquals(hist, [i for i in range(8)])
sb.append_multiple((i for i in range(5)))
hist = sb.read_history(10)
self.assertEquals(hist, [3,4,5,6,7,0,1,2,3,4])
sb.append(50)
hist = sb.read_history(10)
self.assertEquals(hist, [4,5,6,7,0,1,2,3,4,50])
hist = sb.read_history(-1)
self.assertEquals(hist, [])
hist = sb.read_history(0)
self.assertEquals(hist, [])
sa = sb.close()
self.__test_equal(sa,[i for i in range(8)] + [i for i in range(5)] + [50],int)
def test_segments(self):
sb = SArrayBuilder(int, num_segments=4)
sb.append_multiple((i for i in range(20,30)), segment=2)
sb.append_multiple((i for i in range(10,20)), segment=1)
sb.append_multiple((i for i in range(30,40)), segment=3)
sb.append_multiple((i for i in range(0,10)), segment=0)
hist = sb.read_history(3, segment=0)
self.assertSequenceEqual(hist, [7,8,9])
hist = sb.read_history(3, segment=1)
self.assertSequenceEqual(hist, [17,18,19])
hist = sb.read_history(3, segment=2)
self.assertSequenceEqual(hist, [27,28,29])
hist = sb.read_history(3, segment=3)
self.assertSequenceEqual(hist, [37,38,39])
with self.assertRaises(RuntimeError):
sb.read_history(3, segment=99)
sa = sb.close()
self.__test_equal(sa, range(40), int)
|
SPOJ/Week 1/rpn/reverse.py | VastoLorde95/Competitive-Programming | 170 | 11065667 | no_of_iter = input()
N = 0
count = 0
lis = []
while count < no_of_iter:
num1, num2 = raw_input().split()
lis.append((num1,num2))
count+=1
for sample in lis:
num1, num2 = sample
l1, l2 = len(num1), len(num2)
number = ''
l, i, carry = min(l1, l2), 0, 0
while i < l:
x = int(num1[i]) + int(num2[i]) + carry
summ = x%10
number += str(summ)
carry, i = x/10, i + 1
if carry == 0:
if l1>l2:
number += num1[i:]
elif l2>l1:
number += num2[i:]
else:
if l1>l2:
while i <l1:
x = carry + int(num1[i])
summ = x % 10
number += str(summ)
carry, i = x/10, i + 1
elif l2>l1:
while i <l2:
x = carry + int(num2[i])
summ = x % 10
number += str(summ)
carry, i = x/10, i + 1
else:
number += str(carry)
carry = 0
if carry != 0:
number +=str(carry)
print int(number)
|
vunit/parsing/verilog/preprocess.py | eataesierp/vunit | 507 | 11065691 | <reponame>eataesierp/vunit<gh_stars>100-1000
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, <NAME> <EMAIL>
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
"""
Verilog parsing functionality
"""
from pathlib import Path
import logging
from vunit.parsing.tokenizer import (
TokenStream,
Token,
add_previous,
strip_previous,
EOFException,
LocationException,
)
from vunit.parsing.verilog.tokens import (
COMMA,
EQUAL,
IDENTIFIER,
LBRACE,
LBRACKET,
LPAR,
NEWLINE,
PREPROCESSOR,
RBRACE,
RBRACKET,
RPAR,
STRING,
WHITESPACE,
)
from vunit.ostools import read_file
LOGGER = logging.getLogger(__name__)
class VerilogPreprocessor(object):
"""
A Verilog preprocessor
"""
def __init__(self, tokenizer):
self._tokenizer = tokenizer
self._macro_trace = set()
self._include_trace = set()
def preprocess(self, tokens, defines=None, include_paths=None, included_files=None):
"""
Entry point of preprocessing
"""
self._include_trace = set()
self._macro_trace = set()
return self._preprocess(tokens, defines, include_paths, included_files)
def _preprocess(self, tokens, defines=None, include_paths=None, included_files=None):
"""
Pre-process tokens while filling in defines
"""
stream = TokenStream(tokens)
include_paths = [] if include_paths is None else include_paths
included_files = [] if included_files is None else included_files
defines = {} if defines is None else defines
result = []
while not stream.eof:
token = stream.pop()
if not token.kind == PREPROCESSOR:
result.append(token)
continue
try:
result += self.preprocessor(token, stream, defines, include_paths, included_files)
except LocationException as exc:
exc.log(LOGGER)
return result
def preprocessor( # pylint: disable=too-many-arguments,too-many-branches
self, token, stream, defines, include_paths, included_files
):
"""
Handle preprocessor token
"""
if token.value == "define":
macro = define(token, stream)
if macro is not None:
defines[macro.name] = macro
elif token.value == "undef":
undef(token, stream, defines)
elif token.value in ("undefineall", "resetall"):
defines.clear()
elif token.value == "include":
return self.include(token, stream, include_paths, included_files, defines)
elif token.value in ("ifdef", "ifndef"):
try:
tokens = self.if_statement(token, stream, defines)
return self._preprocess(
tokens,
defines=defines,
include_paths=include_paths,
included_files=included_files,
)
except EOFException as exe:
raise LocationException.warning(f"EOF reached when parsing `{token.value!s}", token.location) from exe
elif token.value in ("celldefine", "endcelldefine", "nounconnected_drive"):
# Ignored
pass
elif token.value in ("timescale", "default_nettype", "unconnected_drive"):
# Ignore directive and arguments
stream.skip_until(NEWLINE)
elif token.value == "pragma":
stream.skip_while(WHITESPACE)
pp_token = stream.pop()
if pp_token.value == "protect":
stream.skip_while(WHITESPACE)
token = stream.pop()
if token.value == "begin_protected":
self._skip_protected_region(stream)
elif token.value in defines:
return self.expand_macro(token, stream, defines, include_paths, included_files)
else:
raise LocationException.debug("Verilog undefined name", token.location)
return []
@staticmethod
def _skip_protected_region(stream):
"""
Skip a protected region
`pragma protect begin_protected
Skipped
`pragma protect end_protected
"""
while not stream.eof:
stream.skip_while(WHITESPACE)
token = stream.pop()
if token.kind == PREPROCESSOR and token.value == "pragma":
stream.skip_while(WHITESPACE)
token = stream.pop()
if token.value == "protect":
stream.skip_while(WHITESPACE)
token = stream.pop()
if token.value == "end_protected":
return
def expand_macro( # pylint: disable=too-many-arguments
self, macro_token, stream, defines, include_paths, included_files
):
"""
Expand a macro
"""
macro = defines[macro_token.value]
macro_point = (
strip_previous(macro_token.location),
hash(frozenset(defines.keys())),
)
if macro_point in self._macro_trace:
raise LocationException.error(
f"Circular macro expansion of {macro_token.value!s} detected",
macro_token.location,
)
self._macro_trace.add(macro_point)
tokens = self._preprocess(
macro.expand_from_stream(macro_token, stream, previous=macro_token.location),
defines=defines,
include_paths=include_paths,
included_files=included_files,
)
self._macro_trace.remove(macro_point)
return tokens
@staticmethod
def if_statement(if_token, stream, defines):
"""
Handle if statement
"""
def check_arg(if_token, arg):
"""
Check the define argument of an if statement
"""
if arg.kind != IDENTIFIER:
raise LocationException.warning(f"Bad argument to `{if_token.value!s}", arg.location)
stream.skip_while(NEWLINE)
def determine_if_taken(if_token, arg):
"""
Determine if the branch was taken
"""
if if_token.value in ("ifdef", "elsif"):
return arg.value in defines
if if_token.value == "ifndef":
return arg.value not in defines
raise ValueError(f"Invalid if token {if_token.value!r}")
result = []
stream.skip_while(WHITESPACE)
arg = stream.pop()
check_arg(if_token, arg)
taken = determine_if_taken(if_token, arg)
any_taken = taken
count = 1
while True:
token = stream.pop()
if token.kind == PREPROCESSOR:
if token.value in ("ifdef", "ifndef"):
count += 1
elif token.value == "endif":
count -= 1
if count == 0:
break
if count == 1 and (token.kind, token.value) == (PREPROCESSOR, "else"):
stream.skip_while(NEWLINE)
if not any_taken:
taken = True
any_taken = True
else:
taken = False
elif count == 1 and (token.kind, token.value) == (PREPROCESSOR, "elsif"):
stream.skip_while(WHITESPACE)
arg = stream.pop()
check_arg(token, arg)
stream.skip_while(NEWLINE)
if not any_taken:
taken = determine_if_taken(token, arg)
any_taken = taken
else:
taken = False
elif taken:
result.append(token)
stream.skip_while(NEWLINE)
return result
def include(self, token, stream, include_paths, included_files, defines): # pylint: disable=too-many-arguments
"""
Handle `include directive
"""
stream.skip_while(WHITESPACE)
try:
tok = stream.pop()
except EOFException as exe:
raise LocationException.warning("EOF reached when parsing `include argument", token.location) from exe
if tok.kind == PREPROCESSOR:
if tok.value in defines:
macro = defines[tok.value]
else:
raise LocationException.warning("Verilog `include argument not defined", tok.location)
expanded_tokens = self.expand_macro(tok, stream, defines, include_paths, included_files)
# pylint crashes when trying to fix the warning below
if len(expanded_tokens) == 0: # pylint: disable=len-as-condition
raise LocationException.warning(
f"Verilog `include has bad argument, empty define `{macro.name!s}",
tok.location,
)
if expanded_tokens[0].kind != STRING:
raise LocationException.warning("Verilog `include has bad argument", expanded_tokens[0].location)
file_name_tok = expanded_tokens[0]
elif tok.kind == STRING:
file_name_tok = tok
else:
raise LocationException.warning("Verilog `include bad argument", tok.location)
included_file = find_included_file(include_paths, file_name_tok.value)
included_files.append((file_name_tok.value, included_file))
if included_file is None:
# Is debug message since there are so many builtin includes in tools
raise LocationException.debug(
f"Could not find `include file {file_name_tok.value!s}",
file_name_tok.location,
)
include_point = (
strip_previous(token.location),
hash(frozenset(defines.keys())),
)
if include_point in self._include_trace:
raise LocationException.error(
f"Circular `include of {file_name_tok.value!s} detected",
file_name_tok.location,
)
self._include_trace.add(include_point)
included_tokens = self._tokenizer.tokenize(
read_file(included_file),
file_name=included_file,
previous_location=token.location,
)
included_tokens = self._preprocess(included_tokens, defines, include_paths, included_files)
self._include_trace.remove(include_point)
return included_tokens
def find_included_file(include_paths, file_name):
"""
Find the file to include given include_paths
"""
for include_path in include_paths:
full_name = str((Path(include_path) / file_name).resolve())
if Path(full_name).exists():
return full_name
return None
def undef(undef_token, stream, defines):
"""
Handles undef directive
"""
stream.skip_while(WHITESPACE, NEWLINE)
try:
name_token = stream.pop()
except EOFException as exe:
raise LocationException.warning("EOF reached when parsing `undef", undef_token.location) from exe
if name_token.kind != IDENTIFIER:
raise LocationException.warning("Bad argument to `undef", name_token.location)
if name_token.value not in defines:
raise LocationException.warning("`undef argument was not previously defined", name_token.location)
del defines[name_token.value]
def define(define_token, stream):
"""
Handle a `define directive
"""
stream.skip_while(WHITESPACE, NEWLINE)
try:
name_token = stream.pop()
except EOFException as exe:
raise LocationException.warning("Verilog `define without argument", define_token.location) from exe
if name_token.kind != IDENTIFIER:
raise LocationException.warning("Verilog `define invalid name", name_token.location)
name = name_token.value
try:
token = stream.pop()
except EOFException:
# Empty define
return Macro(name)
if token.kind in (NEWLINE,):
# Empty define
return Macro(name)
if token.kind in (WHITESPACE,):
# Define without arguments
args = tuple()
defaults = {}
elif token.kind == LPAR:
lpar_token = token
args = tuple()
defaults = {}
try:
while token.kind != RPAR:
if token.kind == IDENTIFIER:
argname = token.value
args = args + (argname,)
token = stream.pop()
if token.kind == EQUAL:
token = stream.pop()
defaults[argname] = [token]
token = stream.pop()
else:
token = stream.pop()
except EOFException as exe:
raise LocationException.warning(
"EOF reached when parsing `define argument list", lpar_token.location
) from exe
stream.skip_while(WHITESPACE)
start = stream.idx
end = stream.skip_until(NEWLINE)
if not stream.eof:
stream.pop()
return Macro(name, tokens=stream.slice(start, end), args=args, defaults=defaults)
class Macro(object):
"""
A `define macro with zero or more arguments
"""
def __init__(self, name, tokens=None, args=tuple(), defaults=None):
self.name = name
self.tokens = [] if tokens is None else tokens
self.args = args
self.defaults = {} if defaults is None else defaults
@property
def num_args(self):
return len(self.args)
def __repr__(self):
return f"Macro({self.name!r}, {self.tokens!r} {self.args!r}, {self.defaults!r})"
def expand(self, values, previous):
"""
Expand macro with actual values, returns a list of expanded tokens
"""
tokens = []
for token in self.tokens:
if token.kind == IDENTIFIER and token.value in self.args:
idx = self.args.index(token.value)
value = values[idx]
tokens += value
else:
tokens.append(token)
return [Token(tok.kind, tok.value, add_previous(tok.location, previous)) for tok in tokens]
def __eq__(self, other):
return (
(self.name == other.name)
and (self.tokens == other.tokens)
and (self.args == other.args)
and (self.defaults == other.defaults)
)
def expand_from_stream(self, token, stream, previous=None):
"""
Expand macro consuming arguments from the stream
returns the expanded tokens
"""
if self.num_args == 0:
values = []
else:
try:
values = self._parse_macro_actuals(token, stream)
except EOFException as exe:
raise LocationException.warning(
"EOF reached when parsing `define actuals", location=token.location
) from exe
# Bind defaults
if len(values) < len(self.args):
for i in range(len(values), len(self.args)):
name = self.args[i]
if name in self.defaults:
values.append(self.defaults[name])
else:
raise LocationException.warning(f"Missing value for argument {name!s}", token.location)
elif len(values) > len(self.args):
raise LocationException.warning(
f"Too many arguments got {len(values):d} expected {len(self.args):d}",
token.location,
)
return self.expand(values, previous)
@staticmethod
def _parse_macro_actuals(define_token, stream):
"""
Parse the actual values of macro call such as
1 2 in `macro(1, 2)
"""
stream.skip_while(WHITESPACE)
token = stream.pop()
if token.kind != LPAR:
raise LocationException.warning("Bad `define argument list", define_token.location)
token = stream.pop()
value = []
values = []
bracket_count = 0
brace_count = 0
par_count = 0
while not (token.kind == RPAR and par_count == 0):
if token.kind is LBRACKET:
bracket_count += 1
elif token.kind is RBRACKET:
bracket_count += -1
elif token.kind is LBRACE:
brace_count += 1
elif token.kind is RBRACE:
brace_count += -1
elif token.kind is LPAR:
par_count += 1
elif token.kind is RPAR:
par_count += -1
value_ok = token.kind == COMMA and bracket_count == 0 and brace_count == 0 and par_count == 0
if value_ok:
values.append(value)
value = []
else:
value.append(token)
token = stream.pop()
values.append(value)
return values
|
ext/totals.py | sskras/timewarrior | 810 | 11065733 | <gh_stars>100-1000
#!/usr/bin/env python3
###############################################################################
#
# Copyright 2016 - 2021, <NAME>, <NAME>, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import datetime
import json
import sys
from dateutil import tz
DATEFORMAT = "%Y%m%dT%H%M%SZ"
def format_seconds(seconds):
"""Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return "{:4d}:{:02d}:{:02d}".format(hours, minutes, seconds)
def calculate_totals(input_stream):
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag.
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATEFORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATEFORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
# Determine largest tag width.
max_width = len("Total")
for tag in totals:
if len(tag) > max_width:
max_width = len(tag)
if "temp.report.start" not in configuration:
return ["There is no data in the database"]
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATEFORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATEFORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
return ["No data in the range {:%Y-%m-%d %H:%M:%S} - {:%Y-%m-%d %H:%M:%S}".format(start, end)]
# Compose report header.
output = [
"",
"Total by Tag, for {:%Y-%m-%d %H:%M:%S} - {:%Y-%m-%d %H:%M:%S}".format(start, end),
""
]
# Compose table header.
if configuration["color"] == "on":
output.append("[4m{:{width}}[0m [4m{:>10}[0m".format("Tag", "Total", width=max_width))
else:
output.append("{:{width}} {:>10}".format("Tag", "Total", width=max_width))
output.append("{} {}".format("-" * max_width, "----------"))
# Compose table rows.
grand_total = 0
for tag in sorted(totals):
seconds = int(totals[tag].total_seconds())
formatted = format_seconds(seconds)
grand_total += seconds
output.append("{:{width}} {:10}".format(tag, formatted, width=max_width))
if untagged is not None:
seconds = int(untagged.total_seconds())
formatted = format_seconds(seconds)
grand_total += seconds
output.append("{:{width}} {:10}".format("", formatted, width=max_width))
# Compose total.
if configuration["color"] == "on":
output.append("{} {}".format(" " * max_width, "[4m [0m"))
else:
output.append("{} {}".format(" " * max_width, "----------"))
output.append("{:{width}} {:10}".format("Total", format_seconds(grand_total), width=max_width))
output.append("")
return output
if __name__ == "__main__":
for line in calculate_totals(sys.stdin):
print(line)
|
aif360/algorithms/inprocessing/art_classifier.py | sumacm/fairattr | 982 | 11065738 | import numpy as np
from aif360.datasets import BinaryLabelDataset
from aif360.algorithms import Transformer
class ARTClassifier(Transformer):
"""Wraps an instance of an :obj:`art.classifiers.Classifier` to extend
:obj:`~aif360.algorithms.Transformer`.
"""
def __init__(self, art_classifier):
"""Initialize ARTClassifier.
Args:
art_classifier (art.classifier.Classifier): A Classifier
object from the `adversarial-robustness-toolbox`_.
.. _adversarial-robustness-toolbox:
https://github.com/Trusted-AI/adversarial-robustness-toolbox
"""
super(ARTClassifier, self).__init__(art_classifier=art_classifier)
self._art_classifier = art_classifier
def fit(self, dataset, batch_size=128, nb_epochs=20):
"""Train a classifer on the input.
Args:
dataset (Dataset): Training dataset.
batch_size (int): Size of batches (passed through to ART).
nb_epochs (int): Number of epochs to use for training (passed
through to ART).
Returns:
ARTClassifier: Returns self.
"""
self._art_classifier.fit(dataset.features, dataset.labels,
batch_size=batch_size, nb_epochs=nb_epochs)
return self
def predict(self, dataset, logits=False):
"""Perform prediction for the input.
Args:
dataset (Dataset): Test dataset.
logits (bool, optional): True is prediction should be done at the
logits layer (passed through to ART).
Returns:
Dataset: Dataset with predicted labels in the `labels` field.
"""
pred_labels = self._art_classifier.predict(dataset.features,
dataset.labels, logits=logits)
if isinstance(dataset, BinaryLabelDataset):
pred_labels = np.argmax(pred_labels, axis=1).reshape((-1, 1))
pred_dataset = dataset.copy()
pred_dataset.labels = pred_labels
return pred_dataset
|
ClemBot.Bot/bot/cogs/eval_cog.py | Iapetus-11/ClemBot | 121 | 11065745 | import json
import logging
import typing as t
import aiohttp
import discord.ext.commands as commands
import discord.utils as utils
import bot.bot_secrets as bot_secrets
import bot.extensions as ext
log = logging.getLogger(__name__)
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json'
}
MAX_CONTENT_LENGTH = 1900
MAX_LINE_LENGTH = 15
EVAL_COMMAND_COOLDOWN = 2
class EvalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command(aliases=['e'])
@commands.cooldown(1, EVAL_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'Allows for the evaluations of arbitrary python code directly in discord'
'Supports all internal standard libraries like json or re'
)
@ext.short_help('Runs arbitrary python code in discord')
@ext.example('eval print("hello world")')
async def eval(self, ctx, *, code) -> None:
code = code.replace('```python', '')
code = code.replace('```py', '')
code = code.replace('`', '')
code = utils.escape_mentions(code)
feedback_mes = await ctx.send('Code execution started')
log.info(f'Code: {code} sent for evaluation by author: {ctx.author.id} in guild: {ctx.guild.id}')
output = await self._post_eval(code)
stdout = output['stdout']
stdout = stdout.strip('`')
stdout = utils.escape_mentions(stdout)
await feedback_mes.delete()
if len(stdout) > MAX_CONTENT_LENGTH:
await ctx.send(f'{ctx.author.mention} Attempted output length exceeds 2000 characters, Please try again')
return
result_emoji = ':white_check_mark:' if output['returncode'] == 0 else ':warning:'
out = f'{ctx.author.mention} {result_emoji} Eval Completed with response code: {output["returncode"]}'
if stdout:
await ctx.send(f'{out}\n\n```{self._format(stdout)}```')
else:
await ctx.send(f'{out}\n\n```[No Output]```')
def _format(self, resp):
lines = [f'{(i + 1):03d} | {line}' for i, line in enumerate(resp.split('\n')) if line]
if len(lines) > MAX_LINE_LENGTH:
lines = lines[:MAX_LINE_LENGTH]
lines.append('... Output line limit exceeded, data truncated')
return '\n'.join(lines)
async def _post_eval(self, code) -> t.Union[str, None]:
data = {
"input": code
}
json_data = json.dumps(data)
async with aiohttp.ClientSession() as s:
async with s.post(bot_secrets.secrets.repl_url,
data=json_data,
headers=HEADERS) as resp:
if resp.status == 200:
return json.loads(await resp.text())
def setup(bot):
bot.add_cog(EvalCog(bot))
|
statsmodels/genmod/tests/test_constrained.py | bukzor/statsmodels | 6,931 | 11065755 | # -*- coding: utf-8 -*-
"""
Unit tests for fit_constrained
Tests for Poisson and Binomial are in discrete
Created on Sun Jan 7 09:21:39 2018
Author: <NAME>
"""
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tools.tools import add_constant
class ConstrainedCompareMixin(object):
@classmethod
def setup_class(cls):
nobs, k_exog = 100, 5
np.random.seed(987125)
x = np.random.randn(nobs, k_exog - 1)
x = add_constant(x)
y_true = x.sum(1) / 2
y = y_true + 2 * np.random.randn(nobs)
cls.endog = y
cls.exog = x
cls.idx_uc = [0, 2, 3, 4]
cls.idx_p_uc = np.array(cls.idx_uc)
cls.idx_c = [1]
cls.exogc = xc = x[:, cls.idx_uc]
mod_ols_c = OLS(y - 0.5 * x[:, 1], xc)
mod_ols_c.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.mod2 = mod_ols_c
cls.init()
def test_params(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1.params[self.idx_p_uc], res2.params, rtol=1e-10)
def test_se(self):
res1 = self.res1
res2 = self.res2
assert_equal(res1.df_resid, res2.df_resid)
assert_allclose(res1.scale, res2.scale, rtol=1e-10)
assert_allclose(res1.bse[self.idx_p_uc], res2.bse, rtol=1e-10)
assert_allclose(res1.cov_params()[self.idx_p_uc[:, None],
self.idx_p_uc], res2.cov_params(), rtol=5e-9, atol=1e-15)
def test_resid(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1.resid_response, res2.resid, rtol=1e-10)
class TestGLMGaussianOffset(ConstrainedCompareMixin):
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit()
mod = GLM(cls.endog, cls.exogc,
offset=0.5 * cls.exog[:, cls.idx_c].squeeze())
mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.res1 = mod.fit()
cls.idx_p_uc = np.arange(cls.exogc.shape[1])
class TestGLMGaussianConstrained(ConstrainedCompareMixin):
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit()
mod = GLM(cls.endog, cls.exog)
mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']
cls.res1 = mod.fit_constrained('x1=0.5')
class TestGLMGaussianOffsetHC(ConstrainedCompareMixin):
@classmethod
def init(cls):
cov_type = 'HC0'
cls.res2 = cls.mod2.fit(cov_type=cov_type)
mod = GLM(cls.endog, cls.exogc,
offset=0.5 * cls.exog[:, cls.idx_c].squeeze())
mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.res1 = mod.fit(cov_type=cov_type)
cls.idx_p_uc = np.arange(cls.exogc.shape[1])
class TestGLMGaussianConstrainedHC(ConstrainedCompareMixin):
@classmethod
def init(cls):
cov_type = 'HC0'
cls.res2 = cls.mod2.fit(cov_type=cov_type)
mod = GLM(cls.endog, cls.exog)
mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']
cls.res1 = mod.fit_constrained('x1=0.5', cov_type=cov_type)
class ConstrainedCompareWtdMixin(ConstrainedCompareMixin):
@classmethod
def setup_class(cls):
nobs, k_exog = 100, 5
np.random.seed(987125)
x = np.random.randn(nobs, k_exog - 1)
x = add_constant(x)
cls.aweights = np.random.randint(1, 10, nobs)
y_true = x.sum(1) / 2
y = y_true + 2 * np.random.randn(nobs)
cls.endog = y
cls.exog = x
cls.idx_uc = [0, 2, 3, 4]
cls.idx_p_uc = np.array(cls.idx_uc)
cls.idx_c = [1]
cls.exogc = xc = x[:, cls.idx_uc]
mod_ols_c = WLS(y - 0.5 * x[:, 1], xc, weights=cls.aweights)
mod_ols_c.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.mod2 = mod_ols_c
cls.init()
class TestGLMWtdGaussianOffset(ConstrainedCompareWtdMixin):
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit()
mod = GLM(cls.endog, cls.exogc,
offset=0.5 * cls.exog[:, cls.idx_c].squeeze(),
var_weights=cls.aweights)
mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.res1 = mod.fit()
cls.idx_p_uc = np.arange(cls.exogc.shape[1])
class TestGLMWtdGaussianConstrained(ConstrainedCompareWtdMixin):
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit()
mod = GLM(cls.endog, cls.exog, var_weights=cls.aweights)
mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']
cls.res1 = mod.fit_constrained('x1=0.5')
class TestGLMWtdGaussianOffsetHC(ConstrainedCompareWtdMixin):
@classmethod
def init(cls):
cov_type = 'HC0'
cls.res2 = cls.mod2.fit(cov_type=cov_type)
mod = GLM(cls.endog, cls.exogc,
offset=0.5 * cls.exog[:, cls.idx_c].squeeze(),
var_weights=cls.aweights)
mod.exog_names[:] = ['const', 'x2', 'x3', 'x4']
cls.res1 = mod.fit(cov_type=cov_type)
cls.idx_p_uc = np.arange(cls.exogc.shape[1])
class TestGLMWtdGaussianConstrainedHC(ConstrainedCompareWtdMixin):
@classmethod
def init(cls):
cov_type = 'HC0'
cls.res2 = cls.mod2.fit(cov_type=cov_type)
mod = GLM(cls.endog, cls.exog, var_weights=cls.aweights)
mod.exog_names[:] = ['const', 'x1', 'x2', 'x3', 'x4']
cls.res1 = mod.fit_constrained('x1=0.5', cov_type=cov_type)
class TestGLMBinomialCountConstrained(ConstrainedCompareMixin):
@classmethod
def setup_class(cls):
from statsmodels.datasets.star98 import load
#from statsmodels.genmod.tests.results.results_glm import Star98
data = load()
data.exog = np.asarray(data.exog)
data.endog = np.asarray(data.endog)
exog = add_constant(data.exog, prepend=True)
offset = np.ones(len(data.endog))
exog_keep = exog[:, :-5]
cls.mod2 = GLM(data.endog, exog_keep, family=family.Binomial(),
offset=offset)
cls.mod1 = GLM(data.endog, exog, family=family.Binomial(),
offset=offset)
cls.init()
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit()
k = cls.mod1.exog.shape[1]
cls.idx_p_uc = np.arange(k - 5)
constraints = np.eye(k)[-5:]
cls.res1 = cls.mod1.fit_constrained(constraints)
def test_resid(self):
# need to override because res2 does not have resid
res1 = self.res1
res2 = self.res2
assert_allclose(res1.resid_response, res2.resid_response, rtol=1e-8)
def test_glm_attr(self):
for attr in ['llf', 'null_deviance', 'aic', 'df_resid',
'df_model', 'pearson_chi2', 'scale']:
assert_allclose(getattr(self.res1, attr),
getattr(self.res2, attr), rtol=1e-10)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# FutureWarning to silence BIC warning
assert_allclose(self.res1.bic, self.res2.bic, rtol=1e-10)
def test_wald(self):
res1 = self.res1
res2 = self.res2
k1 = len(res1.params)
k2 = len(res2.params)
use_f = False
with warnings.catch_warnings():
warnings.simplefilter('ignore', ValueWarning)
wt2 = res2.wald_test(np.eye(k2)[1:], use_f=use_f)
wt1 = res1.wald_test(np.eye(k1)[1:], use_f=use_f)
assert_allclose(wt2.pvalue, wt1.pvalue, atol=1e-20) # pvalue = 0
assert_allclose(wt2.statistic, wt1.statistic, rtol=1e-8)
assert_equal(wt2.df_denom, wt1.df_denom)
use_f = True
with warnings.catch_warnings():
warnings.simplefilter('ignore', ValueWarning)
wt2 = res2.wald_test(np.eye(k2)[1:], use_f=use_f)
wt1 = res1.wald_test(np.eye(k1)[1:], use_f=use_f)
assert_allclose(wt2.pvalue, wt1.pvalue, rtol=1) # pvalue = 8e-273
assert_allclose(wt2.statistic, wt1.statistic, rtol=1e-8)
assert_equal(wt2.df_denom, wt1.df_denom)
assert_equal(wt2.df_num, wt1.df_num)
assert_equal(wt2.summary()[-30:], wt1.summary()[-30:])
# smoke
with warnings.catch_warnings():
# RuntimeWarnings because of truedivide and scipy distributions
# Future to silence BIC warning
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter('ignore', ValueWarning)
warnings.simplefilter('ignore', RuntimeWarning)
self.res1.summary()
self.res1.summary2()
class TestGLMBinomialCountConstrainedHC(TestGLMBinomialCountConstrained):
@classmethod
def init(cls):
cls.res2 = cls.mod2.fit(cov_type='HC0')
k = cls.mod1.exog.shape[1]
cls.idx_p_uc = np.arange(k - 5)
constraints = np.eye(k)[-5:]
cls.res1 = cls.mod1.fit_constrained(constraints, cov_type='HC0')
|
netket/experimental/dynamics/__init__.py | NetKet/netket | 352 | 11065768 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"RungeKuttaIntegrator",
"RKIntegratorConfig",
"Euler",
"Heun",
"Midpoint",
"RK4",
"RK12",
"RK23",
"RK45",
]
from ._rk_solver import RungeKuttaIntegrator, RKIntegratorConfig
from ._rk_solver import Euler, Heun, Midpoint, RK4, RK12, RK23, RK45
from netket.utils import _hide_submodules
_hide_submodules(__name__)
|
models/vision/detection/configs/mask_rcnn/SM/CI/2/sagemaker_2x8.py | piyushghai/deep-learning-models | 129 | 11065773 | import os.path as osp
# date time settings to update paths for jobs
from datetime import datetime
now = datetime.now()
time_str = now.strftime("%d-%m-%Y-%H-%M")
date_str = now.strftime("%d-%m-%Y")
# sagemaker settings
sagemaker_user=dict(
user_id='CI',
s3_bucket='mzanur-sagemaker',
docker_image='578276202366.dkr.ecr.us-east-1.amazonaws.com/mzanur-awsdet-ecr:awsdet',
hvd_processes_per_host=8,
hvd_instance_type='ml.p3.16xlarge', #'ml.p3dn.24xlarge',
hvd_instance_count=2,
)
# settings for distributed training on sagemaker
mpi_options="\
-x OMPI_MCA_plm_rsh_no_tree_spawn=1 -bind-to none -map-by slot -x OMPI_MCA_pml=ob1 \
-x OMPI_MCA_btl_vader_single_copy_mechanism=none \
-x OMPI_MCA_btl=tcp,self \
-x NCCL_TREE_THRESHOLD=4294967296 \
-x HOROVOD_CYCLE_TIME=0.5 \
-x HOROVOD_FUSION_THRESHOLD=67108864"
distributions=dict(
mpi=dict(
enabled=True,
processes_per_host=sagemaker_user['hvd_processes_per_host'],
custom_mpi_options=mpi_options,
)
)
# sagemaker channels
channels=dict(
coco='s3://{}/awsdet/data/coco/'.format(sagemaker_user['s3_bucket']),
weights='s3://{}/awsdet/data/weights/'.format(sagemaker_user['s3_bucket'])
)
job_str='{}x{}-{}'.format(sagemaker_user['hvd_instance_count'], sagemaker_user['hvd_processes_per_host'], time_str)
sagemaker_job=dict(
s3_path='s3://{}/mask-rcnn/outputs/{}'.format(sagemaker_user['s3_bucket'], time_str),
job_name='{}-mrcnn-{}'.format(sagemaker_user['user_id'], job_str),
output_path='',
)
sagemaker_job['output_path']='{}/output/{}'.format(sagemaker_job['s3_path'], sagemaker_job['job_name'])
|
umap/tests/test_parametric_umap.py | worldbeater/umap | 5,537 | 11065795 | <gh_stars>1000+
import numpy as np
import tempfile
import pytest
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
try:
import tensorflow as tf
IMPORT_TF = True
except ImportError:
IMPORT_TF = False
else:
from umap.parametric_umap import ParametricUMAP, load_ParametricUMAP
tf_only = pytest.mark.skipif(not IMPORT_TF, reason="TensorFlow >= 2.0 is not installed")
@pytest.fixture(scope="session")
def moon_dataset():
X, _ = make_moons(200)
return X
@tf_only
def test_create_model(moon_dataset):
"""test a simple parametric UMAP network"""
embedder = ParametricUMAP()
embedding = embedder.fit_transform(moon_dataset)
# completes successfully
assert embedding is not None
assert embedding.shape == (moon_dataset.shape[0], 2)
@tf_only
def test_global_loss(moon_dataset):
"""test a simple parametric UMAP network"""
embedder = ParametricUMAP(global_correlation_loss_weight=1.0)
embedding = embedder.fit_transform(moon_dataset)
# completes successfully
assert embedding is not None
assert embedding.shape == (moon_dataset.shape[0], 2)
@tf_only
def test_inverse_transform(moon_dataset):
"""tests inverse_transform"""
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
X = norm(moon_dataset)
embedder = ParametricUMAP(parametric_reconstruction=True)
Z = embedder.fit_transform(X)
X_r = embedder.inverse_transform(Z)
# completes successfully
assert X_r is not None
assert X_r.shape == X.shape
@tf_only
def test_nonparametric(moon_dataset):
"""test nonparametric embedding"""
embedder = ParametricUMAP(parametric_embedding=False)
embedding = embedder.fit_transform(moon_dataset)
# completes successfully
assert embedding is not None
assert embedding.shape == (moon_dataset.shape[0], 2)
@tf_only
def test_custom_encoder_decoder(moon_dataset):
"""test using a custom encoder / decoder"""
dims = (2,)
n_components = 2
encoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=dims),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(units=n_components, name="z"),
]
)
decoder = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=n_components),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(units=100, activation="relu"),
tf.keras.layers.Dense(
units=np.product(dims), name="recon", activation=None
),
tf.keras.layers.Reshape(dims),
]
)
embedder = ParametricUMAP(
encoder=encoder,
decoder=decoder,
dims=dims,
parametric_reconstruction=True,
verbose=True,
)
embedding = embedder.fit_transform(moon_dataset)
# completes successfully
assert embedding is not None
assert embedding.shape == (moon_dataset.shape[0], 2)
@tf_only
def test_validation(moon_dataset):
"""tests adding a validation dataset"""
X_train, X_valid = train_test_split(moon_dataset, train_size=0.5)
embedder = ParametricUMAP(
parametric_reconstruction=True, reconstruction_validation=X_valid, verbose=True
)
embedding = embedder.fit_transform(X_train)
# completes successfully
assert embedding is not None
assert embedding.shape == (X_train.shape[0], 2)
@tf_only
def test_save_load(moon_dataset):
"""tests saving and loading"""
embedder = ParametricUMAP()
embedding = embedder.fit_transform(moon_dataset)
# completes successfully
assert embedding is not None
assert embedding.shape == (moon_dataset.shape[0], 2)
# if platform.system() != "Windows":
# Portable tempfile
model_path = tempfile.mkdtemp(suffix="_umap_model")
embedder.save(model_path)
loaded_model = load_ParametricUMAP(model_path)
assert loaded_model is not None
|
DQMServices/FwkIO/test/create_lumi_only_file_cfg.py | ckamtsikis/cmssw | 852 | 11065820 | <gh_stars>100-1000
from builtins import range
import FWCore.ParameterSet.Config as cms
process =cms.Process("TEST")
process.source = cms.Source("EmptySource", numberEventsInRun = cms.untracked.uint32(1))
elements = list()
for i in range(0,10):
elements.append(cms.untracked.PSet(lowX=cms.untracked.double(0),
highX=cms.untracked.double(10),
nchX=cms.untracked.int32(10),
name=cms.untracked.string("Foo"+str(i)),
title=cms.untracked.string("Foo"+str(i)),
value=cms.untracked.double(i)))
process.filler = cms.EDProducer("DummyFillDQMStore",
elements=cms.untracked.VPSet(*elements),
fillRuns = cms.untracked.bool(False),
fillLumis = cms.untracked.bool(True))
process.out = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("dqm_lumi_only.root"))
process.p = cms.Path(process.filler)
process.o = cms.EndPath(process.out)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.add_(cms.Service("DQMStore"))
|
test/test_protocol.py | informatique-cdc/kafka-python | 4,389 | 11065822 | <gh_stars>1000+
#pylint: skip-file
import io
import struct
import pytest
from kafka.protocol.api import RequestHeader
from kafka.protocol.commit import GroupCoordinatorRequest
from kafka.protocol.fetch import FetchRequest, FetchResponse
from kafka.protocol.message import Message, MessageSet, PartialMessage
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.types import Int16, Int32, Int64, String, UnsignedVarInt32, CompactString, CompactArray, CompactBytes
def test_create_message():
payload = b'test'
key = b'key'
msg = Message(payload, key=key)
assert msg.magic == 0
assert msg.attributes == 0
assert msg.key == key
assert msg.value == payload
def test_encode_message_v0():
message = Message(b'test', key=b'key')
encoded = message.encode()
expect = b''.join([
struct.pack('>i', -1427009701), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 3), # Length of key
b'key', # key
struct.pack('>i', 4), # Length of value
b'test', # value
])
assert encoded == expect
def test_encode_message_v1():
message = Message(b'test', key=b'key', magic=1, timestamp=1234)
encoded = message.encode()
expect = b''.join([
struct.pack('>i', 1331087195), # CRC
struct.pack('>bb', 1, 0), # Magic, flags
struct.pack('>q', 1234), # Timestamp
struct.pack('>i', 3), # Length of key
b'key', # key
struct.pack('>i', 4), # Length of value
b'test', # value
])
assert encoded == expect
def test_decode_message():
encoded = b''.join([
struct.pack('>i', -1427009701), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 3), # Length of key
b'key', # key
struct.pack('>i', 4), # Length of value
b'test', # value
])
decoded_message = Message.decode(encoded)
msg = Message(b'test', key=b'key')
msg.encode() # crc is recalculated during encoding
assert decoded_message == msg
def test_decode_message_validate_crc():
encoded = b''.join([
struct.pack('>i', -1427009701), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 3), # Length of key
b'key', # key
struct.pack('>i', 4), # Length of value
b'test', # value
])
decoded_message = Message.decode(encoded)
assert decoded_message.validate_crc() is True
encoded = b''.join([
struct.pack('>i', 1234), # Incorrect CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 3), # Length of key
b'key', # key
struct.pack('>i', 4), # Length of value
b'test', # value
])
decoded_message = Message.decode(encoded)
assert decoded_message.validate_crc() is False
def test_encode_message_set():
messages = [
Message(b'v1', key=b'k1'),
Message(b'v2', key=b'k2')
]
encoded = MessageSet.encode([(0, msg.encode())
for msg in messages])
expect = b''.join([
struct.pack('>q', 0), # MsgSet Offset
struct.pack('>i', 18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
struct.pack('>q', 0), # MsgSet Offset
struct.pack('>i', 18), # Msg Size
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 2), # Length of value
b'v2', # Value
])
expect = struct.pack('>i', len(expect)) + expect
assert encoded == expect
def test_decode_message_set():
encoded = b''.join([
struct.pack('>q', 0), # MsgSet Offset
struct.pack('>i', 18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
struct.pack('>q', 1), # MsgSet Offset
struct.pack('>i', 18), # Msg Size
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 2), # Length of value
b'v2', # Value
])
msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
assert len(msgs) == 2
msg1, msg2 = msgs
returned_offset1, message1_size, decoded_message1 = msg1
returned_offset2, message2_size, decoded_message2 = msg2
assert returned_offset1 == 0
message1 = Message(b'v1', key=b'k1')
message1.encode()
assert decoded_message1 == message1
assert returned_offset2 == 1
message2 = Message(b'v2', key=b'k2')
message2.encode()
assert decoded_message2 == message2
def test_encode_message_header():
expect = b''.join([
struct.pack('>h', 10), # API Key
struct.pack('>h', 0), # API Version
struct.pack('>i', 4), # Correlation Id
struct.pack('>h', len('client3')), # Length of clientId
b'client3', # ClientId
])
req = GroupCoordinatorRequest[0]('foo')
header = RequestHeader(req, correlation_id=4, client_id='client3')
assert header.encode() == expect
def test_decode_message_set_partial():
encoded = b''.join([
struct.pack('>q', 0), # Msg Offset
struct.pack('>i', 18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
struct.pack('>q', 1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
])
msgs = MessageSet.decode(encoded, bytes_to_read=len(encoded))
assert len(msgs) == 2
msg1, msg2 = msgs
returned_offset1, message1_size, decoded_message1 = msg1
returned_offset2, message2_size, decoded_message2 = msg2
assert returned_offset1 == 0
message1 = Message(b'v1', key=b'k1')
message1.encode()
assert decoded_message1 == message1
assert returned_offset2 is None
assert message2_size is None
assert decoded_message2 == PartialMessage()
def test_decode_fetch_response_partial():
encoded = b''.join([
Int32.encode(1), # Num Topics (Array)
String('utf-8').encode('foobar'),
Int32.encode(2), # Num Partitions (Array)
Int32.encode(0), # Partition id
Int16.encode(0), # Error Code
Int64.encode(1234), # Highwater offset
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
Int32.encode(1),
Int16.encode(0),
Int64.encode(2345),
Int32.encode(52), # MessageSet size
Int64.encode(0), # Msg Offset
Int32.encode(18), # Msg Size
struct.pack('>i', 1474775406), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k1', # Key
struct.pack('>i', 2), # Length of value
b'v1', # Value
Int64.encode(1), # Msg Offset
struct.pack('>i', 24), # Msg Size (larger than remaining MsgSet size)
struct.pack('>i', -16383415), # CRC
struct.pack('>bb', 0, 0), # Magic, flags
struct.pack('>i', 2), # Length of key
b'k2', # Key
struct.pack('>i', 8), # Length of value
b'ar', # Value (truncated)
])
resp = FetchResponse[0].decode(io.BytesIO(encoded))
assert len(resp.topics) == 1
topic, partitions = resp.topics[0]
assert topic == 'foobar'
assert len(partitions) == 2
m1 = MessageSet.decode(
partitions[0][3], bytes_to_read=len(partitions[0][3]))
assert len(m1) == 2
assert m1[1] == (None, None, PartialMessage())
def test_struct_unrecognized_kwargs():
try:
mr = MetadataRequest[0](topicz='foo')
assert False, 'Structs should not allow unrecognized kwargs'
except ValueError:
pass
def test_struct_missing_kwargs():
fr = FetchRequest[0](max_wait_time=100)
assert fr.min_bytes is None
def test_unsigned_varint_serde():
pairs = {
0: [0],
-1: [0xff, 0xff, 0xff, 0xff, 0x0f],
1: [1],
63: [0x3f],
-64: [0xc0, 0xff, 0xff, 0xff, 0x0f],
64: [0x40],
8191: [0xff, 0x3f],
-8192: [0x80, 0xc0, 0xff, 0xff, 0x0f],
8192: [0x80, 0x40],
-8193: [0xff, 0xbf, 0xff, 0xff, 0x0f],
1048575: [0xff, 0xff, 0x3f],
}
for value, expected_encoded in pairs.items():
value &= 0xffffffff
encoded = UnsignedVarInt32.encode(value)
assert encoded == b''.join(struct.pack('>B', x) for x in expected_encoded)
assert value == UnsignedVarInt32.decode(io.BytesIO(encoded))
def test_compact_data_structs():
cs = CompactString()
encoded = cs.encode(None)
assert encoded == struct.pack('B', 0)
decoded = cs.decode(io.BytesIO(encoded))
assert decoded is None
assert b'\x01' == cs.encode('')
assert '' == cs.decode(io.BytesIO(b'\x01'))
encoded = cs.encode("foobarbaz")
assert cs.decode(io.BytesIO(encoded)) == "foobarbaz"
arr = CompactArray(CompactString())
assert arr.encode(None) == b'\x00'
assert arr.decode(io.BytesIO(b'\x00')) is None
enc = arr.encode([])
assert enc == b'\x01'
assert [] == arr.decode(io.BytesIO(enc))
encoded = arr.encode(["foo", "bar", "baz", "quux"])
assert arr.decode(io.BytesIO(encoded)) == ["foo", "bar", "baz", "quux"]
enc = CompactBytes.encode(None)
assert enc == b'\x00'
assert CompactBytes.decode(io.BytesIO(b'\x00')) is None
enc = CompactBytes.encode(b'')
assert enc == b'\x01'
assert CompactBytes.decode(io.BytesIO(b'\x01')) is b''
enc = CompactBytes.encode(b'foo')
assert CompactBytes.decode(io.BytesIO(enc)) == b'foo'
|
example/example/settings.py | tomchristie/django-vanilla-views | 476 | 11065827 | import os
import django
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "sqlite3.db",
}
}
STATIC_URL = "/static/"
STATICFILES_DIRS = [
"statics",
]
SECRET_KEY = "not-secret"
if django.VERSION >= (1, 10):
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
]
else:
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
]
ROOT_URLCONF = "example.urls"
WSGI_APPLICATION = "example.wsgi.application"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
},
]
INSTALLED_APPS = [
"django.contrib.staticfiles",
"example.notes",
]
|
docs/snippets/function_annotations_rst.py | WillDaSilva/mkdocstrings | 354 | 11065837 | from typing import Optional
def my_function(param1: int, param2: Optional[str] = None) -> str:
"""A short description of this function.
Complex markup is supported in the main description section.
I'm a code block!
:param param1: An integer?
:param param2: A string? If you have a long description,
you can split it on multiple lines.
"""
return f"{param2}{param1}"
|
common/src/stack/command/stack/commands/remove/repo/__init__.py | sammeidinger/stack | 123 | 11065842 | <reponame>sammeidinger/stack<gh_stars>100-1000
from stack.argument_processors.repo import RepoArgProcessor
import stack.commands
import stack.deferable
from stack.exception import CommandError
class Command(RepoArgProcessor, stack.commands.remove.command):
"""
Remove remote software repositories from stacki.
<arg type='string' name='repo'>
A list of repo's to remove. This can be the repo name or alias.
</arg>
<example cmd='remove repo ceph_pkgs'>
Remove the 'ceph_pkgs' repository from stacki
</example>
"""
@stack.deferable.rewrite_frontend_repo_file
def run(self, params, args):
if not args:
raise CommandError(self, 'either a repo name or alias must be specified.')
for repo in self.get_repos(args):
self.delete_repo(repo.alias)
|
blender/hecl/sact/SACTSubtype.py | linkmauve/hecl | 267 | 11065843 | import bpy
# Subtype update (if anything important changes)
def active_subtype_update(self, context):
if context.scene.hecl_type == 'ACTOR' and context.scene.hecl_auto_select:
if SACTSubtype_load.poll(context):
bpy.ops.scene.sactsubtype_load()
# Actor subtype overlay class
class SACTSubtypeOverlay(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(name="Overlay Name")
linked_mesh: bpy.props.StringProperty(name="Linked Mesh Object Source", update=active_subtype_update)
show_overlay: bpy.props.BoolProperty(name="Show Overlay Mesh", update=active_subtype_update)
# Actor attachment class
class SACTAttachment(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(name="Attachment Name")
linked_armature: bpy.props.StringProperty(name="Linked Armature Object Source", update=active_subtype_update)
linked_mesh: bpy.props.StringProperty(name="Linked Mesh Object Source", update=active_subtype_update)
show_attachment: bpy.props.BoolProperty(name="Show Attachment Mesh", update=active_subtype_update)
# Actor subtype class
class SACTSubtype(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(name="Actor Mesh Name")
linked_armature: bpy.props.StringProperty(name="Linked Armature Object Source", update=active_subtype_update)
linked_mesh: bpy.props.StringProperty(name="Linked Mesh Object Source", update=active_subtype_update)
show_mesh: bpy.props.BoolProperty(name="Show Mesh", default=True, update=active_subtype_update)
overlays: bpy.props.CollectionProperty(type=SACTSubtypeOverlay, name="Subtype Overlay List")
active_overlay: bpy.props.IntProperty(name="Active Subtype Overlay", default=0, update=active_subtype_update)
# Panel draw
def draw(layout, context):
actor_data = context.scene.hecl_sact_data
row = layout.row(align=True)
row.alignment = 'LEFT'
row.prop(actor_data, 'show_subtypes', text="Subtypes", icon='MESH_DATA', emboss=False)
if actor_data.show_subtypes:
row = layout.row()
row.template_list("UI_UL_list", "SCENE_UL_SACTSubtypes",
actor_data, 'subtypes', actor_data, 'active_subtype')
col = row.column(align=True)
col.operator("scene.sactsubtype_add", icon="ADD", text="")
col.operator("scene.sactsubtype_remove", icon="REMOVE", text="")
if len(actor_data.subtypes) and actor_data.active_subtype >= 0:
subtype = actor_data.subtypes[actor_data.active_subtype]
# Load subtype operator
if not bpy.context.scene.hecl_auto_select:
layout.operator("scene.sactsubtype_load", icon='FILE_TICK', text="Load Subtype")
# Name edit field
layout.prop(subtype, 'name', text="Name")
# Link external armature search
layout.prop_search(subtype, 'linked_armature', bpy.data, 'objects', text="Armature")
linked_armature = None
if subtype.linked_armature in bpy.data.objects:
linked_armature = bpy.data.objects[subtype.linked_armature]
# Validate
if linked_armature is None:
layout.label(text="Source armature not set", icon='ERROR')
elif linked_armature is not None and linked_armature.type != 'ARMATURE':
layout.label(text="Source armature is not an 'ARMATURE'", icon='ERROR')
# Link external mesh search
layout.prop_search(subtype, 'linked_mesh', bpy.data, 'objects', text="Mesh")
linked_mesh = None
if subtype.linked_mesh in bpy.data.objects:
linked_mesh = bpy.data.objects[subtype.linked_mesh]
layout.prop(subtype, 'show_mesh', text="Show Mesh")
# Mesh overlays
layout.label(text="Overlay Meshes:")
row = layout.row()
row.template_list("UI_UL_list", "SCENE_UL_SACTSubtypeOverlays",
subtype, 'overlays', subtype, 'active_overlay')
col = row.column(align=True)
col.operator("scene.sactsubtypeoverlay_add", icon="ADD", text="")
col.operator("scene.sactsubtypeoverlay_remove", icon="REMOVE", text="")
overlay_mesh = None
if len(subtype.overlays) and subtype.active_overlay >= 0:
overlay = subtype.overlays[subtype.active_overlay]
layout.prop(overlay, 'name', text="Name")
layout.prop_search(overlay, 'linked_mesh', bpy.data, 'objects', text="Mesh")
if overlay.linked_mesh in bpy.data.objects:
overlay_mesh = bpy.data.objects[overlay.linked_mesh]
layout.prop(overlay, 'show_overlay', text="Show Overlay")
# Mesh attachments
layout.label(text="Attachment Meshes:")
row = layout.row()
row.template_list("UI_UL_list", "SCENE_UL_SACTAttachments",
actor_data, 'attachments', actor_data, 'active_attachment')
col = row.column(align=True)
col.operator("scene.sactattachment_add", icon="ADD", text="")
col.operator("scene.sactattachment_remove", icon="REMOVE", text="")
attachment_armature = linked_armature
attachment_mesh = None
if len(actor_data.attachments) and actor_data.active_attachment >= 0:
attachment = actor_data.attachments[actor_data.active_attachment]
layout.prop(attachment, 'name', text="Name")
layout.prop_search(attachment, 'linked_armature', bpy.data, 'objects', text="Armature")
if attachment.linked_armature in bpy.data.objects:
attachment_armature = bpy.data.objects[attachment.linked_armature]
layout.prop_search(attachment, 'linked_mesh', bpy.data, 'objects', text="Mesh")
if attachment.linked_mesh in bpy.data.objects:
attachment_mesh = bpy.data.objects[attachment.linked_mesh]
layout.prop(attachment, 'show_attachment', text="Show Attachment")
# Validate
if linked_mesh is None:
layout.label(text="Source mesh not set", icon='ERROR')
elif linked_mesh.type != 'MESH':
layout.label(text="Source mesh not 'MESH'", icon='ERROR')
elif linked_armature is not None and linked_mesh not in linked_armature.children:
layout.label(linked_mesh.name+" not a child of "+linked_armature.name, icon='ERROR')
elif linked_mesh.parent_type != 'ARMATURE':
layout.label(text="Source mesh not 'ARMATURE' parent type", icon='ERROR')
if overlay_mesh:
if overlay_mesh.type != 'MESH':
layout.label(text="Overlay mesh not 'MESH'", icon='ERROR')
elif overlay_mesh.parent_type != 'ARMATURE':
layout.label(text="Overlay mesh not 'ARMATURE' parent type", icon='ERROR')
if attachment_mesh:
if attachment_mesh.type != 'MESH':
layout.label(text="Attachment mesh not 'MESH'", icon='ERROR')
elif attachment_armature is not None and attachment_mesh not in attachment_armature.children:
layout.label(attachment_mesh.name+" not a child of "+attachment_armature.name, icon='ERROR')
elif attachment_mesh.parent_type != 'ARMATURE':
layout.label(text="Attachment mesh not 'ARMATURE' parent type", icon='ERROR')
# Subtype 'add' operator
class SACTSubtype_add(bpy.types.Operator):
bl_idname = "scene.sactsubtype_add"
bl_label = "New HECL Actor Subtype"
bl_description = "Add New HECL Actor Subtype to active scene"
@classmethod
def poll(cls, context):
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR')
def execute(self, context):
actor_data = context.scene.hecl_sact_data
mesh_name = 'ActorMesh'
if mesh_name in actor_data.subtypes:
mesh_name = 'ActorMesh.001'
mesh_idx = 1
while mesh_name in actor_data.subtypes:
mesh_idx += 1
mesh_name = 'ActorMesh.{:0>3}'.format(mesh_idx)
mesh = actor_data.subtypes.add()
mesh.name = mesh_name
actor_data.active_subtype = len(actor_data.subtypes)-1
return {'FINISHED'}
# Subtype 'remove' operator
class SACTSubtype_remove(bpy.types.Operator):
bl_idname = "scene.sactsubtype_remove"
bl_label = "Remove HECL Actor Subtype"
bl_description = "Remove HECL Actor Subtype from active scene"
@classmethod
def poll(cls, context):
actor_data = context.scene.hecl_sact_data
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR' and
actor_data.active_subtype >= 0 and
len(actor_data.subtypes))
def execute(self, context):
actor_data = context.scene.hecl_sact_data
actor_data.subtypes.remove(actor_data.active_subtype)
actor_data.active_subtype -= 1
if actor_data.active_subtype == -1:
actor_data.active_subtype = 0
return {'FINISHED'}
def parent_armature(mesh_obj, arm_obj):
mesh_obj.parent = None
for mod in mesh_obj.modifiers:
if mod.type == 'ARMATURE':
mod.object = arm_obj
return
mod = mesh_obj.modifiers.new('Parent', 'ARMATURE')
mod.object = arm_obj
#mesh_obj.parent = arm_obj
#mesh_obj.parent_type = 'ARMATURE'
# Subtype 'load' operator
class SACTSubtype_load(bpy.types.Operator):
bl_idname = "scene.sactsubtype_load"
bl_label = "Load HECL Actor Subtype"
bl_description = "Loads Subtype for viewing in active scene"
@classmethod
def poll(cls, context):
return (context.scene is not None and
context.scene.hecl_type == 'ACTOR' and
len(context.scene.hecl_sact_data.subtypes) and
context.scene.hecl_sact_data.active_subtype >= 0)
def execute(self, context):
actor_data = context.scene.hecl_sact_data
subtype = actor_data.subtypes[actor_data.active_subtype]
# Armature
linked_armature = None
if subtype.linked_armature in bpy.data.objects:
linked_armature = bpy.data.objects[subtype.linked_armature]
else:
return {'FINISHED'}
# Hide armature children
for object in linked_armature.children:
if object.name in context.scene.objects:
object.hide_set(True)
# Hide all meshes (incl overlays)
for subtype_data in actor_data.subtypes:
if subtype_data.linked_mesh in bpy.data.objects:
mesh = bpy.data.objects[subtype_data.linked_mesh]
if mesh.name in context.scene.objects:
mesh.hide_set(True)
for overlay in subtype_data.overlays:
if overlay.linked_mesh in bpy.data.objects:
mesh = bpy.data.objects[overlay.linked_mesh]
if mesh.name in context.scene.objects:
mesh.hide_set(True)
# Hide/Show selected attachment meshes
for attachment in actor_data.attachments:
if attachment.linked_mesh in bpy.data.objects:
mesh_obj = bpy.data.objects[attachment.linked_mesh]
if mesh_obj.name in context.scene.objects:
mesh_obj.hide_set(not attachment.show_attachment)
attachment_armature = linked_armature
if attachment.linked_armature in bpy.data.objects:
attachment_armature = bpy.data.objects[attachment.linked_armature]
if mesh_obj != attachment_armature:
parent_armature(mesh_obj, attachment_armature)
# Show only the chosen subtype (and selected overlays)
if subtype.linked_mesh in bpy.data.objects:
mesh_obj = bpy.data.objects[subtype.linked_mesh]
if subtype.show_mesh:
mesh_obj.hide_set(False)
if mesh_obj != linked_armature:
parent_armature(mesh_obj, linked_armature)
for overlay in subtype.overlays:
if overlay.linked_mesh in bpy.data.objects:
mesh_obj = bpy.data.objects[overlay.linked_mesh]
if overlay.show_overlay:
mesh_obj.hide_set(False)
if mesh_obj != linked_armature:
parent_armature(mesh_obj, linked_armature)
return {'FINISHED'}
# Subtype overlay 'add' operator
class SACTSubtypeOverlay_add(bpy.types.Operator):
bl_idname = "scene.sactsubtypeoverlay_add"
bl_label = "New HECL Actor Subtype Overlay"
bl_description = "Add New HECL Actor Subtype Overlay"
@classmethod
def poll(cls, context):
actor_data = context.scene.hecl_sact_data
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR' and
len(actor_data.subtypes) and actor_data.active_subtype >= 0)
def execute(self, context):
actor_data = context.scene.hecl_sact_data
subtype = actor_data.subtypes[actor_data.active_subtype]
overlay_name = 'ActorOverlay'
if overlay_name in subtype.overlays:
overlay_name = 'ActorOverlay.001'
overlay_idx = 1
while overlay_name in subtype.overlays:
overlay_idx += 1
overlay_name = 'ActorOverlay.{:0>3}'.format(overlay_idx)
overlay = subtype.overlays.add()
overlay.name = overlay_name
subtype.active_overlay = len(subtype.overlays)-1
return {'FINISHED'}
# Subtype overlay 'remove' operator
class SACTSubtypeOverlay_remove(bpy.types.Operator):
bl_idname = "scene.sactsubtypeoverlay_remove"
bl_label = "Remove HECL Actor Subtype Overlay"
bl_description = "Remove HECL Actor Subtype Overlay"
@classmethod
def poll(cls, context):
actor_data = context.scene.hecl_sact_data
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR' and
actor_data.active_subtype >= 0 and
len(actor_data.subtypes) and
actor_data.subtypes[actor_data.active_subtype].active_overlay >= 0 and
len(actor_data.subtypes[actor_data.active_subtype].overlays))
def execute(self, context):
actor_data = context.scene.hecl_sact_data
subtype = actor_data.subtypes[actor_data.active_subtype]
subtype.overlays.remove(subtype.active_overlay)
subtype.active_overlay -= 1
if subtype.active_overlay == -1:
subtype.active_overlay = 0
return {'FINISHED'}
# Subtype overlay 'add' operator
class SACTAttachment_add(bpy.types.Operator):
bl_idname = "scene.sactattachment_add"
bl_label = "New HECL Actor Attachment"
bl_description = "Add New HECL Actor Attachment"
@classmethod
def poll(cls, context):
actor_data = context.scene.hecl_sact_data
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR')
def execute(self, context):
actor_data = context.scene.hecl_sact_data
attachment_name = 'ActorAttachment'
if attachment_name in actor_data.attachments:
attachment_name = 'ActorAttachment.001'
attachment_idx = 1
while attachment_name in actor_data.attachments:
attachment_idx += 1
attachment_name = 'ActorAttachment.{:0>3}'.format(attachment_idx)
attachment = actor_data.attachments.add()
attachment.name = attachment_name
actor_data.active_attachment = len(actor_data.attachments)-1
return {'FINISHED'}
# Subtype overlay 'remove' operator
class SACTAttachment_remove(bpy.types.Operator):
bl_idname = "scene.sactattachment_remove"
bl_label = "Remove HECL Actor Attachment"
bl_description = "Remove HECL Actor Attachment"
@classmethod
def poll(cls, context):
actor_data = context.scene.hecl_sact_data
return (context.scene is not None and
not context.scene.library and
context.scene.hecl_type == 'ACTOR' and
actor_data.active_attachment >= 0 and
len(actor_data.attachments))
def execute(self, context):
actor_data = context.scene.hecl_sact_data
actor_data.attachments.remove(actor_data.active_attachment)
actor_data.active_attachment -= 1
if actor_data.active_attachment == -1:
actor_data.active_attachment = 0
return {'FINISHED'}
# Registration
def register():
bpy.utils.register_class(SACTSubtypeOverlay)
bpy.utils.register_class(SACTSubtypeOverlay_add)
bpy.utils.register_class(SACTSubtypeOverlay_remove)
bpy.utils.register_class(SACTAttachment)
bpy.utils.register_class(SACTAttachment_add)
bpy.utils.register_class(SACTAttachment_remove)
bpy.utils.register_class(SACTSubtype)
bpy.utils.register_class(SACTSubtype_add)
bpy.utils.register_class(SACTSubtype_remove)
bpy.utils.register_class(SACTSubtype_load)
def unregister():
bpy.utils.unregister_class(SACTSubtype)
bpy.utils.unregister_class(SACTSubtype_add)
bpy.utils.unregister_class(SACTSubtype_remove)
bpy.utils.unregister_class(SACTSubtype_load)
bpy.utils.unregister_class(SACTAttachment)
bpy.utils.unregister_class(SACTAttachment_add)
bpy.utils.unregister_class(SACTAttachment_remove)
bpy.utils.unregister_class(SACTSubtypeOverlay)
bpy.utils.unregister_class(SACTSubtypeOverlay_add)
bpy.utils.unregister_class(SACTSubtypeOverlay_remove)
|
third_party/blink/web_tests/http/tests/websocket/fragmented-binary-frames_wsh.py | zealoussnow/chromium | 14,668 | 11065851 | <reponame>zealoussnow/chromium
from six.moves import range
from mod_pywebsocket import common
from mod_pywebsocket import stream
from mod_pywebsocket import util
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
# pyformat: disable
messages_to_send = [[b'Hello, ', b'world!'], [b'', b'Hello, ', b'', b'world!', b''], [b'', b'', b''],
[util.pack_byte(i) for i in range(256)]]
# pyformat: enable
for message_list in messages_to_send:
for index, message in enumerate(message_list):
# FIXME: Should use better API to send binary messages when
# pywebsocket supports it.
if index == 0:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_CONTINUATION
if index < len(message_list) - 1:
final = 0
else:
final = 1
header = stream.create_header(opcode,
len(message), final, 0, 0, 0, 0)
request.connection.write(header + message)
|
var/spack/repos/builtin/packages/py-py6s/package.py | LiamBindle/spack | 2,360 | 11065877 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPy6s(PythonPackage):
"""
A wrapper for the 6S Radiative Transfer Model to make it easy to run
simulations with a variety of input parameters, and to produce outputs in
an easily processable form.
"""
homepage = "https://py6s.rtwilson.com/"
pypi = "py6s/Py6S-1.8.0.tar.gz"
version('1.8.0', sha256='256162d2f1f558e601d4f79022c037a0051838ba307b9f4d1f5fcf0b46a0c277')
depends_on('python@3:', type=('build', 'run'), when='@1.8.0')
depends_on('py-setuptools', type='build')
depends_on('[email protected]', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
|
examples/python/elf_unstrip.py | rafael-santiago/LIEF | 2,999 | 11065882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description
# -----------
# In this example, we assume that we found
# the ``main`` function at address 0x402A00
# and we add a static symbol to the binary
# so that we can do:
#
# (gdb) break main
# Breakpoint 1 at 0x402a00
from lief import ELF
import sys
binary = ELF.parse(sys.argv[1])
symtab_section = ELF.Section()
symtab_section.name = ""
symtab_section.type = ELF.SECTION_TYPES.SYMTAB
symtab_section.entry_size = 0x18
symtab_section.alignment = 8
symtab_section.link = len(binary.sections) + 1
symtab_section.content = [0] * 100
symstr_section = ELF.Section()
symstr_section.name = ""
symstr_section.type = ELF.SECTION_TYPES.STRTAB
symstr_section.entry_size = 1
symstr_section.alignment = 1
symstr_section.content = [0] * 100
symtab_section = binary.add(symtab_section, loaded=False)
symstr_section = binary.add(symstr_section, loaded=False)
symbol = ELF.Symbol()
symbol.name = ""
symbol.type = ELF.SYMBOL_TYPES.NOTYPE
symbol.value = 0
symbol.binding = ELF.SYMBOL_BINDINGS.LOCAL
symbol.size = 0
symbol.shndx = 0
symbol = binary.add_static_symbol(symbol)
symbol = ELF.Symbol()
symbol.name = "main"
symbol.type = ELF.SYMBOL_TYPES.FUNC
symbol.value = 0x402A00
symbol.binding = ELF.SYMBOL_BINDINGS.LOCAL
symbol.shndx = 14
symbol = binary.add_static_symbol(symbol)
print(symbol)
binary.write(sys.argv[2])
|
neural_parts/models/resnet.py | naynasa/neural_parts_fork | 137 | 11065889 | <gh_stars>100-1000
import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, in_dims, out_dims, norm_method="batch_norm"):
super().__init__()
self.proj = (
nn.Sequential() if in_dims == out_dims else
nn.Linear(in_dims, out_dims)
)
self.fc1 = nn.Linear(out_dims, out_dims)
self.fc2 = nn.Linear(out_dims, out_dims)
if norm_method == "layer_norm":
self.norm1 = nn.LayerNorm(out_dims)
self.norm2 = nn.LayerNorm(out_dims)
elif norm_method == "no_norm":
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
else:
raise ValueError(("Invalid normalization method "
"{}").format(norm_method))
def _norm(self, norm_layer, x):
B, N, M, D = x.shape
x = x.permute(0, 3, 1, 2)
return norm_layer(x).permute(0, 2, 3, 1).contiguous()
def forward(self, x):
x = self.proj(x)
out = self.fc1(x)
out = self.norm1(out).relu()
out = self.fc2(out)
out = self.norm2(out).relu()
return out + x
|
neutron/agent/l3/dvr_fip_ns.py | congnt95/neutron | 1,080 | 11065899 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
from neutron_lib import constants as lib_constants
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.utils import runtime
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from neutron._i18n import _
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import utils as common_utils
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
FIP_NS_PREFIX = 'fip-'
FIP_EXT_DEV_PREFIX = 'fg-'
FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX
# Route Table index for FIPs
FIP_RT_TBL = 16
# Rule priority range for FIPs
FIP_PR_START = 32768
FIP_PR_END = FIP_PR_START + 40000
# Fixed rule priority for Fast Path Exit rules
FAST_PATH_EXIT_PR = 80000
class FipNamespace(namespaces.Namespace):
def __init__(self, ext_net_id, agent_conf, driver, use_ipv6):
name = self._get_ns_name(ext_net_id)
super(FipNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
self._ext_net_id = ext_net_id
self.agent_conf = agent_conf
self.driver = driver
self.use_ipv6 = use_ipv6
self.agent_gateway_port = None
self._subscribers = set()
path = os.path.join(agent_conf.state_path, 'fip-priorities')
self._rule_priorities = frpa.FipRulePriorityAllocator(path,
FIP_PR_START,
FIP_PR_END)
self._iptables_manager = iptables_manager.IptablesManager(
namespace=self.get_name(),
use_ipv6=self.use_ipv6)
path = os.path.join(agent_conf.state_path, 'fip-linklocal-networks')
self.local_subnets = lla.LinkLocalAllocator(
path, lib_constants.DVR_FIP_LL_CIDR)
self.destroyed = False
self._stale_fips_checked = False
@classmethod
def _get_ns_name(cls, ext_net_id):
return namespaces.build_ns_name(FIP_NS_PREFIX, ext_net_id)
def get_name(self):
return self._get_ns_name(self._ext_net_id)
def get_ext_device_name(self, port_id):
return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_int_device_name(self, router_id):
return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def get_rtr_ext_device_name(self, router_id):
return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def has_subscribers(self):
return len(self._subscribers) != 0
def subscribe(self, external_net_id):
is_first = not self.has_subscribers()
self._subscribers.add(external_net_id)
return is_first
def unsubscribe(self, external_net_id):
self._subscribers.discard(external_net_id)
return not self.has_subscribers()
def lookup_rule_priority(self, floating_ip):
return self._rule_priorities.lookup(floating_ip)
def allocate_rule_priority(self, floating_ip):
return self._rule_priorities.allocate(floating_ip)
def deallocate_rule_priority(self, floating_ip):
self._rule_priorities.release(floating_ip)
@contextlib.contextmanager
def _fip_port_lock(self, interface_name):
# Use a namespace and port-specific lock semaphore to allow for
# concurrency
lock_name = 'port-lock-' + self.name + '-' + interface_name
with lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX):
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('DVR: FIP namespace config failure '
'for interface %s', interface_name)
def create_or_update_gateway_port(self, agent_gateway_port):
interface_name = self.get_ext_device_name(agent_gateway_port['id'])
# The lock is used to make sure another thread doesn't call to
# update the gateway port before we are done initializing things.
with self._fip_port_lock(interface_name):
is_first = self.subscribe(agent_gateway_port['network_id'])
if is_first:
# Check for subnets that are populated for the agent
# gateway port that was created on the server.
if 'subnets' not in agent_gateway_port:
self.unsubscribe(agent_gateway_port['network_id'])
LOG.debug('DVR: Missing subnet in agent_gateway_port: %s',
agent_gateway_port)
return
self._create_gateway_port(agent_gateway_port, interface_name)
else:
try:
self._update_gateway_port(
agent_gateway_port, interface_name)
except Exception:
# If an exception occurs at this point, then it is
# good to clean up the namespace that has been created
# and reraise the exception in order to resync the router
with excutils.save_and_reraise_exception():
self.unsubscribe(agent_gateway_port['network_id'])
self.delete()
LOG.exception('DVR: Gateway update in '
'FIP namespace failed')
def _create_gateway_port(self, ex_gw_port, interface_name):
"""Create namespace, request port creationg from Plugin,
then configure Floating IP gateway port.
"""
self.create()
LOG.debug("DVR: adding gateway interface: %s", interface_name)
ns_name = self.get_name()
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
namespace=ns_name,
prefix=FIP_EXT_DEV_PREFIX,
mtu=ex_gw_port.get('mtu'))
# Remove stale fg devices
ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
devices = ip_wrapper.get_devices()
for device in devices:
name = device.name
if name.startswith(FIP_EXT_DEV_PREFIX) and name != interface_name:
LOG.debug('DVR: unplug: %s', name)
self.driver.unplug(name,
namespace=ns_name,
prefix=FIP_EXT_DEV_PREFIX)
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name,
clean_connections=True)
gw_cidrs = [sn['cidr'] for sn in ex_gw_port['subnets']
if sn.get('cidr')]
self.driver.set_onlink_routes(
interface_name, ns_name, ex_gw_port.get('extra_subnets', []),
preserve_ips=gw_cidrs, is_ipv6=False)
self.agent_gateway_port = ex_gw_port
cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name]
ip_wrapper.netns.execute(cmd, check_exit_code=False, privsep_exec=True)
def create(self):
LOG.debug("DVR: add fip namespace: %s", self.name)
# parent class will ensure the namespace exists and turn-on forwarding
super(FipNamespace, self).create()
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1,
root_namespace=True)
# no connection tracking needed in fip namespace
self._iptables_manager.ipv4['raw'].add_rule('PREROUTING',
'-j CT --notrack')
self._iptables_manager.apply()
def delete(self):
self.destroyed = True
self._delete()
self.agent_gateway_port = None
@namespaces.check_ns_existence
def _delete(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.name)
for d in ip_wrapper.get_devices():
if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX):
# internal link between IRs and FIP NS
ip_wrapper.del_veth(d.name)
elif d.name.startswith(FIP_EXT_DEV_PREFIX):
# single port from FIP NS to br-ext
# TODO(carl) Where does the port get deleted?
LOG.debug('DVR: unplug: %s', d.name)
self.driver.unplug(d.name,
namespace=self.name,
prefix=FIP_EXT_DEV_PREFIX)
# TODO(mrsmith): add LOG warn if fip count != 0
LOG.debug('DVR: destroy fip namespace: %s', self.name)
super(FipNamespace, self).delete()
def _check_for_gateway_ip_change(self, new_agent_gateway_port):
def get_gateway_ips(gateway_port):
gw_ips = {}
if gateway_port:
for subnet in gateway_port.get('subnets', []):
gateway_ip = subnet.get('gateway_ip', None)
if gateway_ip:
ip_version = common_utils.get_ip_version(gateway_ip)
gw_ips[ip_version] = gateway_ip
return gw_ips
new_gw_ips = get_gateway_ips(new_agent_gateway_port)
old_gw_ips = get_gateway_ips(self.agent_gateway_port)
return new_gw_ips != old_gw_ips
def get_fip_table_indexes(self, ip_version):
ip_rules_list = ip_lib.list_ip_rules(self.get_name(), ip_version)
tbl_index_list = []
for ip_rule in ip_rules_list:
tbl_index = ip_rule['table']
if tbl_index in ['local', 'default', 'main']:
continue
tbl_index_list.append(tbl_index)
return tbl_index_list
def _add_default_gateway_for_fip(self, gw_ip, ip_device, tbl_index):
"""Adds default gateway for fip based on the tbl_index passed."""
if tbl_index is None:
ip_version = common_utils.get_ip_version(gw_ip)
tbl_index_list = self.get_fip_table_indexes(ip_version)
for tbl_index in tbl_index_list:
ip_device.route.add_gateway(gw_ip, table=tbl_index)
else:
ip_device.route.add_gateway(gw_ip, table=tbl_index)
def _add_rtr_ext_route_rule_to_route_table(self, ri, fip_2_rtr,
fip_2_rtr_name):
"""Creates external route table and adds routing rules."""
# TODO(Swami): Rename the _get_snat_idx function to some
# generic name that can be used for SNAT and FIP
rt_tbl_index = ri._get_snat_idx(fip_2_rtr)
interface_name = self.get_ext_device_name(
self.agent_gateway_port['id'])
try:
# The lock is used to make sure another thread doesn't call to
# update the gateway route before we are done initializing things.
with self._fip_port_lock(interface_name):
self._update_gateway_route(self.agent_gateway_port,
interface_name,
tbl_index=rt_tbl_index)
except Exception:
# If an exception occurs at this point, then it is
# good to unsubscribe this external network so that
# the next call will trigger the interface to be plugged.
# We reraise the exception in order to resync the router.
with excutils.save_and_reraise_exception():
self.unsubscribe(self.agent_gateway_port['network_id'])
self.agent_gateway_port = None
LOG.exception('DVR: Gateway setup in FIP namespace '
'failed')
# Now add the filter match rule for the table.
ip_lib.add_ip_rule(namespace=self.get_name(), ip=str(fip_2_rtr.ip),
iif=fip_2_rtr_name, table=rt_tbl_index,
priority=rt_tbl_index)
def _update_gateway_port(self, agent_gateway_port, interface_name):
if (not self.agent_gateway_port or
self._check_for_gateway_ip_change(agent_gateway_port)):
# Caller already holding lock
self._update_gateway_route(
agent_gateway_port, interface_name, tbl_index=None)
# Cache the agent gateway port after successfully updating
# the gateway route, so that checking on self.agent_gateway_port
# will be a valid check
self.agent_gateway_port = agent_gateway_port
gw_cidrs = [sn['cidr'] for sn in agent_gateway_port['subnets']
if sn.get('cidr')]
self.driver.set_onlink_routes(
interface_name, self.get_name(),
agent_gateway_port.get('extra_subnets', []), preserve_ips=gw_cidrs,
is_ipv6=False)
def _update_gateway_route(self, agent_gateway_port,
interface_name, tbl_index):
ns_name = self.get_name()
ipd = ip_lib.IPDevice(interface_name, namespace=ns_name)
# If the 'fg-' device doesn't exist in the namespace then trying
# to send advertisements or configure the default route will just
# throw exceptions. Unsubscribe this external network so that
# the next call will trigger the interface to be plugged.
if not ipd.exists():
LOG.warning('DVR: FIP gateway port with interface '
'name: %(device)s does not exist in the given '
'namespace: %(ns)s', {'device': interface_name,
'ns': ns_name})
msg = _('DVR: Gateway update route in FIP namespace failed, retry '
'should be attempted on next call')
raise l3_exc.FloatingIpSetupException(msg)
for fixed_ip in agent_gateway_port['fixed_ips']:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,
fixed_ip['ip_address'])
for subnet in agent_gateway_port['subnets']:
gw_ip = subnet.get('gateway_ip')
if gw_ip:
is_gateway_not_in_subnet = not ipam_utils.check_subnet_ip(
subnet.get('cidr'), gw_ip)
if is_gateway_not_in_subnet:
ipd.route.add_route(gw_ip, scope='link')
self._add_default_gateway_for_fip(gw_ip, ipd, tbl_index)
else:
current_gateway = ipd.route.get_gateway()
if current_gateway and current_gateway.get('gateway'):
ipd.route.delete_gateway(current_gateway.get('gateway'))
def _add_cidr_to_device(self, device, ip_cidr):
to = common_utils.cidr_to_ip(ip_cidr)
if not device.addr.list(to=to):
device.addr.add(ip_cidr, add_broadcast=False)
def delete_rtr_2_fip_link(self, ri):
"""Delete the interface between router and FloatingIP namespace."""
LOG.debug("Delete FIP link interfaces for router: %s", ri.router_id)
rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id)
fip_2_rtr_name = self.get_int_device_name(ri.router_id)
fip_ns_name = self.get_name()
# remove default route entry
if ri.rtr_fip_subnet is None:
# see if there is a local subnet in the cache
ri.rtr_fip_subnet = self.local_subnets.lookup(ri.router_id)
if ri.rtr_fip_subnet:
rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name)
if device.exists():
device.route.delete_gateway(str(fip_2_rtr.ip),
table=FIP_RT_TBL)
if self.agent_gateway_port:
interface_name = self.get_ext_device_name(
self.agent_gateway_port['id'])
fg_device = ip_lib.IPDevice(
interface_name, namespace=fip_ns_name)
if fg_device.exists():
# Remove the fip namespace rules and routes associated to
# fpr interface route table.
tbl_index = ri._get_snat_idx(fip_2_rtr)
# Flush the table
fg_device.route.flush(lib_constants.IP_VERSION_4,
table=tbl_index)
fg_device.route.flush(lib_constants.IP_VERSION_6,
table=tbl_index)
# Remove the rule lookup
# /0 addresses for IPv4 and IPv6 are used to pass
# IP protocol version information based on a
# link-local address IP version. Using any of those
# is equivalent to using 'from all' for iproute2.
rule_ip = lib_constants.IP_ANY[fip_2_rtr.ip.version]
ip_lib.delete_ip_rule(fip_ns_name, ip=rule_ip,
iif=fip_2_rtr_name, table=tbl_index,
priority=tbl_index)
self.local_subnets.release(ri.router_id)
ri.rtr_fip_subnet = None
# Check for namespace before deleting the device
if not self.destroyed:
fns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
if fns_ip.device(fip_2_rtr_name).exists():
fns_ip.del_veth(fip_2_rtr_name)
def create_rtr_2_fip_link(self, ri):
"""Create interface between router and Floating IP namespace."""
LOG.debug("Create FIP link interfaces for router %s", ri.router_id)
rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id)
fip_2_rtr_name = self.get_int_device_name(ri.router_id)
fip_ns_name = self.get_name()
# add link local IP to interface
if ri.rtr_fip_subnet is None:
ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id)
rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
rtr_2_fip_dev = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name)
fip_2_rtr_dev = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
if not rtr_2_fip_dev.exists():
ip_wrapper = ip_lib.IPWrapper(namespace=ri.ns_name)
rtr_2_fip_dev, fip_2_rtr_dev = ip_wrapper.add_veth(rtr_2_fip_name,
fip_2_rtr_name,
fip_ns_name)
rtr_2_fip_dev.link.set_up()
fip_2_rtr_dev.link.set_up()
mtu = ri.get_ex_gw_port().get('mtu')
if mtu:
rtr_2_fip_dev.link.set_mtu(mtu)
fip_2_rtr_dev.link.set_mtu(mtu)
self._add_cidr_to_device(rtr_2_fip_dev, str(rtr_2_fip))
self._add_cidr_to_device(fip_2_rtr_dev, str(fip_2_rtr))
# Add permanant ARP entries on each side of veth pair
rtr_2_fip_dev.neigh.add(common_utils.cidr_to_ip(fip_2_rtr),
fip_2_rtr_dev.link.address)
fip_2_rtr_dev.neigh.add(common_utils.cidr_to_ip(rtr_2_fip),
rtr_2_fip_dev.link.address)
self._add_rtr_ext_route_rule_to_route_table(ri, fip_2_rtr,
fip_2_rtr_name)
# add default route for the link local interface
rtr_2_fip_dev.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
def scan_fip_ports(self, ri):
# scan system for any existing fip ports
rtr_2_fip_interface = self.get_rtr_ext_device_name(ri.router_id)
device = ip_lib.IPDevice(rtr_2_fip_interface, namespace=ri.ns_name)
if device.exists():
if len(ri.get_router_cidrs(device)):
self.rtr_fip_connect = True
else:
self.rtr_fip_connect = False
# On upgrade, there could be stale IP addresses configured, check
# and remove them once.
# TODO(haleyb): this can go away after a cycle or two
if not self._stale_fips_checked:
stale_cidrs = (
ip for ip in router_info.RouterInfo.get_router_cidrs(
ri, device)
if common_utils.is_cidr_host(ip))
for ip_cidr in stale_cidrs:
LOG.debug("Removing stale floating ip %s from interface "
"%s in namespace %s",
ip_cidr, rtr_2_fip_interface, ri.ns_name)
device.delete_addr_and_conntrack_state(ip_cidr)
self._stale_fips_checked = True
|
tests/conftest.py | DK99/python-betterproto | 708 | 11065908 | import pytest
def pytest_addoption(parser):
parser.addoption(
"--repeat", type=int, default=1, help="repeat the operation multiple times"
)
@pytest.fixture(scope="session")
def repeat(request):
return request.config.getoption("repeat")
|
crawler/base_crawler.py | hridaydutta123/Bitcluster | 102 | 11065911 | import bitcoin
import bitcoin.rpc
import bitcoin.core.script
import socket
import binascii
import http.client
from crawler.address_utils import Addressutils
from bitcoin.core import CTransaction
from settings import settings
from bitcoin.core.script import OP_FALSE
class BaseCrawler:
def __init__(self):
self.block_id = -1
self.proxy = None
self.connect_to_bitcoind_rpc()
self.address_utils = Addressutils()
def connect_to_bitcoind_rpc(self):
for i in range(1,settings.rcp_reconnect_max_retry+1):
try:
self.proxy = bitcoin.rpc.Proxy()
return
except http.client.HTTPException:
print("Caught a connection error from Bitcoind RCP, Reconnecting...(%d/%d)" %(i,settings.rcp_reconnect_max_retry))
def crawl_block(self,block_id):
for i in range(1,settings.rcp_reconnect_max_retry+1):
try:
try:
self.block_id = block_id
block_hash = self.proxy.getblockhash(block_id)
except IndexError:
print("Block not found")
return False
block = self.proxy.getblock(block_hash)
for tx in block.vtx[1:]: #ignore mining tx
self.parse_transaction(tx,block)
return True
except socket.error:
print("Caught an error from Bitcoind RCP, Reconnecting and retrying...(%d/%d)" %(i,settings.rcp_reconnect_max_retry))
self.connect_to_bitcoind_rpc()
def parse_transaction(self,transaction,block):
assert isinstance(transaction,CTransaction)
input_addresses = set()
trx_hash = binascii.hexlify(transaction.GetHash()[::-1]).decode('utf-8')
for vin in transaction.vin:
try:
sign_script = vin.scriptSig
push_data_sig = sign_script[0]
sign_script = sign_script[1:]
sign_script = sign_script[push_data_sig:]
if len(sign_script) > 0:
input_addresses.add(self.address_utils.convert_hash160_to_addr(self.address_utils.convert_public_key_to_hash160(sign_script)))
else:
prevtxout = self.proxy.getrawtransaction(vin.prevout.hash).vout[vin.prevout.n]
input_addresses.add(self.address_utils.get_hash160_from_cscript(prevtxout.scriptPubKey))
except Exception as ex:
if settings.debug:
print("Transaction %s Unable To Parse SigScript %s"%(trx_hash,binascii.hexlify(vin.scriptSig)))
print(ex)
self.do_work(input_addresses, transaction.vout,block,trx_hash)
def do_work(self,inputs_addresses,outputs_scripts,block,trx_hash):
raise NotImplementedError("Not implemented method do_work") |
tests/test_simplevardecl.py | rbarzic/PyCParser | 289 | 11065942 |
from pprint import pprint
import helpers_test
from cparser import *
def test_simplevardecl():
testcode = """
int16_t a;
int b = 42;
void* c = &b;
int* d = &b;
char e, *f = "abc", g, **h = &f;
"""
state = helpers_test.parse(testcode)
a = state.vars["a"]
b = state.vars["b"]
c = state.vars["c"]
d = state.vars["d"]
e = state.vars["e"]
f = state.vars["f"]
g = state.vars["g"]
h = state.vars["h"]
for v in "abcdefgh":
var = locals()[v]
assert state.vars[v] is var
assert var.name == v
assert a.type == CStdIntType("int16_t")
assert a.body is None
assert b.type == CBuiltinType(("int",))
assert b.body is not None
assert b.body.getConstValue(state) == 42
assert c.type == CBuiltinType(("void","*"))
#pprint(c.body) TODO: check <CStatement <COp '&'> <CStatement <CVarDecl 'b' ...
assert d.type == CPointerType(CBuiltinType(("int",)))
assert e.type == CBuiltinType(("char",))
assert f.type == CPointerType(e.type)
assert h.type == CPointerType(f.type)
assert f.body.getConstValue(state) == "abc"
#pprint(h.body)
|
tests/layers/attentive_gru_test.py | richarajpal/deep_qa | 459 | 11065990 | # pylint: disable=no-self-use
import numpy
from keras.layers import Input, Embedding, merge
from keras.models import Model
import keras.backend as K
from deep_qa.layers.encoders import AttentiveGru
class TestAttentiveGRU:
def test_on_unmasked_input(self):
sentence_length = 5
embedding_dim = 10
vocabulary_size = 15
input_layer = Input(shape=(sentence_length,), dtype='int32')
attention = Input(shape=(sentence_length,), dtype='float32')
# Embedding does not mask zeros
embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim)
attentive_gru = AttentiveGru(output_dim=embedding_dim,
input_length=sentence_length,
return_sequences=True,
name='attentive_gru_test')
embedded_input = embedding(input_layer)
concat_mode = lambda layer_outs: K.concatenate([K.expand_dims(layer_outs[0], axis=2),
layer_outs[1]],
axis=2)
combined_sentence_with_attention = merge([attention, embedded_input],
mode=concat_mode,
output_shape=(5, 11))
sequence_of_outputs = attentive_gru(combined_sentence_with_attention)
model = Model(inputs=[input_layer, attention], outputs=sequence_of_outputs)
model.compile(loss="mse", optimizer="sgd") # Will not train this model
test_input = numpy.asarray([[0, 3, 1, 7, 10]], dtype='int32')
attention_input = numpy.asarray([[1., 0., 0., 0., 0.]], dtype='float32')
# To debug this model, we are going to check that if we pass an attention mask into
# the attentive_gru which has all zeros apart from the first element which is one,
# all the elements should be equal to the first output as the state won't change over
# time, as we add in none of the memory. This is not the intended use of this class,
# but if this works, the intended use will be correct.
actual_sequence_of_outputs = numpy.squeeze(model.predict([test_input, attention_input]))
for i in range(sentence_length - 1):
assert numpy.array_equal(actual_sequence_of_outputs[i, :], actual_sequence_of_outputs[i+1, :])
|
jsonrpc/tests/test_pep3107.py | massover/json-rpc | 409 | 11066000 | <filename>jsonrpc/tests/test_pep3107.py<gh_stars>100-1000
from ..manager import JSONRPCResponseManager
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestJSONRPCResponseManager(unittest.TestCase):
@unittest.skipIf(sys.version_info < (3, 5), "Test Py3.5+ functionality")
def test_typeerror_with_annotations(self):
"""If a function has Python3 annotations and is called with improper
arguments, make sure the framework doesn't fail with inspect.getargspec
"""
from .py35_utils import distance
dispatcher = {
"distance": distance,
}
req = '{"jsonrpc": "2.0", "method": "distance", "params": [], "id": 1}'
result = JSONRPCResponseManager.handle(req, dispatcher)
# Make sure this returns JSONRPCInvalidParams rather than raising
# UnboundLocalError
self.assertEqual(result.error['code'], -32602)
|
influxdb/tests/influxdb08/dataframe_client_test.py | timgates42/influxdb-python | 1,429 | 11066009 | <reponame>timgates42/influxdb-python
# -*- coding: utf-8 -*-
"""Unit tests for misc module."""
from datetime import timedelta
import copy
import json
import unittest
import warnings
import requests_mock
from nose.tools import raises
from influxdb.tests import skip_if_pypy, using_pypy
from .client_test import _mocked_session
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
from influxdb.influxdb08 import DataFrameClient
@skip_if_pypy
class TestDataFrameClient(unittest.TestCase):
"""Define the DataFramClient test object."""
def setUp(self):
"""Set up an instance of TestDataFrameClient object."""
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
def test_write_points_from_dataframe(self):
"""Test write points from dataframe."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_float_nan(self):
"""Test write points from dataframe with NaN float."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[[1, float("NaN"), 1.0], [2, 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
[1, None, 1.0, 0],
[2, 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_in_batches(self):
"""Test write points from dataframe in batches."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
self.assertTrue(cli.write_points({"foo": dataframe}, batch_size=1))
def test_write_points_from_dataframe_with_numeric_column_names(self):
"""Test write points from dataframe with numeric columns."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ['0', '1', '2', "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_period_index(self):
"""Test write points from dataframe with period index."""
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[pd.Period('1970-01-01'),
pd.Period('1970-01-02')],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 86400]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_time_precision(self):
"""Test write points from dataframe with time precision."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
points_ms = copy.deepcopy(points)
points_ms[0]["points"][1][-1] = 3600 * 1000
points_us = copy.deepcopy(points)
points_us[0]["points"][1][-1] = 3600 * 1000000
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe}, time_precision='s')
self.assertListEqual(json.loads(m.last_request.body), points)
cli.write_points({"foo": dataframe}, time_precision='m')
self.assertListEqual(json.loads(m.last_request.body), points_ms)
cli.write_points({"foo": dataframe}, time_precision='u')
self.assertListEqual(json.loads(m.last_request.body), points_us)
@raises(TypeError)
def test_write_points_from_dataframe_fails_without_time_index(self):
"""Test write points from dataframe that fails without time index."""
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
@raises(TypeError)
def test_write_points_from_dataframe_fails_with_series(self):
"""Test failed write points from dataframe with series."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.Series(data=[1.0, 2.0],
index=[now, now + timedelta(hours=1)])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
def test_query_into_dataframe(self):
"""Test query into a dataframe."""
data = [
{
"name": "foo",
"columns": ["time", "sequence_number", "column_one"],
"points": [
[3600, 16, 2], [3600, 15, 1],
[0, 14, 2], [0, 13, 1]
]
}
]
# dataframe sorted ascending by time first, then sequence_number
dataframe = pd.DataFrame(data=[[13, 1], [14, 2], [15, 1], [16, 2]],
index=pd.to_datetime([0, 0,
3600, 3600],
unit='s', utc=True),
columns=['sequence_number', 'column_one'])
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
assert_frame_equal(dataframe, result)
def test_query_multiple_time_series(self):
"""Test query for multiple time series."""
data = [
{
"name": "series1",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, 323048, 323048, 323048, 0]]
},
{
"name": "series2",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -2.8233, -2.8503, -2.7832, 0.0173]]
},
{
"name": "series3",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -0.01220, -0.01220, -0.01220, 0]]
}
]
dataframes = {
'series1': pd.DataFrame(data=[[323048, 323048, 323048, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series2': pd.DataFrame(data=[[-2.8233, -2.8503, -2.7832, 0.0173]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series3': pd.DataFrame(data=[[-0.01220, -0.01220, -0.01220, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev'])
}
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query("""select mean(value), min(value), max(value),
stddev(value) from series1, series2, series3""")
self.assertEqual(dataframes.keys(), result.keys())
for key in dataframes.keys():
assert_frame_equal(dataframes[key], result[key])
def test_query_with_empty_result(self):
"""Test query with empty results."""
with _mocked_session('get', 200, []):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
self.assertEqual(result, [])
def test_list_series(self):
"""Test list of series for dataframe object."""
response = [
{
'columns': ['time', 'name'],
'name': 'list_series_result',
'points': [[0, 'seriesA'], [0, 'seriesB']]
}
]
with _mocked_session('get', 200, response):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
series_list = cli.get_list_series()
self.assertEqual(series_list, ['seriesA', 'seriesB'])
def test_datetime_to_epoch(self):
"""Test convert datetime to epoch."""
timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
self.assertEqual(
cli._datetime_to_epoch(timestamp),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='s'),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='m'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='ms'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='u'),
1356998400000000.0
)
|
v2/dataset.py | KevinJeon/DanceRevolution | 209 | 11066016 | # This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this open-source project.
""" Define the dance dataset. """
import numpy as np
import torch
import torch.utils.data
from torch.utils.data import Dataset
def paired_collate_fn(insts):
src_seq, tgt_seq = list(zip(*insts))
src_pos = np.array([
[pos_i + 1 for pos_i, v_i in enumerate(inst)] for inst in src_seq])
src_seq = torch.FloatTensor(src_seq)
src_pos = torch.LongTensor(src_pos)
tgt_seq = torch.FloatTensor(tgt_seq)
return src_seq, src_pos, tgt_seq
class DanceDataset(Dataset):
def __init__(self, musics, dances=None):
if dances is not None:
assert (len(musics) == len(dances)), \
'the number of dances should be equal to the number of musics'
self.musics = musics
self.dances = dances
def __len__(self):
return len(self.musics)
def __getitem__(self, index):
if self.dances is not None:
return self.musics[index], self.dances[index]
else:
return self.musics[index]
|
hand_eye_calibration/python/hand_eye_calibration/extrinsic_calibration.py | Chatoyant19/handeye_calibration | 333 | 11066028 | import os
import json
import numpy as np
from hand_eye_calibration.dual_quaternion import DualQuaternion
class ExtrinsicCalibration:
def __init__(self, time_offset, pose_dual_quat):
self.time_offset = time_offset
self.pose_dq = pose_dual_quat
def writeJson(self, out_file, switchConvention = False):
pose = self.pose_dq.to_pose()
if switchConvention:
pose[3:6] *= -1.0; # convert to JPL
calib = {
'delay' : self.time_offset,
'rotation' : { name : float(pose[i + 3]) for i, name in enumerate('ijkw') },
'translation' : { name : float(pose[i]) for i, name in enumerate('xyz') }
}
with open(out_file, 'w') as f:
json.dump(calib, f, indent = 3, sort_keys=True)
@classmethod
def fromJson(cls, in_file, switchConvention = False):
with open(in_file, 'r') as f:
data = json.load(f)
p = [ float(data['translation'][name]) for name in 'xyz' ]
q = np.array([ float(data['rotation'][name]) for name in 'ijkw' ])
if switchConvention:
q[:3] *= -1.0
dq = DualQuaternion.from_pose_vector(np.hstack((p, q)))
return ExtrinsicCalibration(float(data['delay']), dq)
def __str__(self):
return "[delta_time: %f, delta_pose: %s]" %(self.time_offset, str(self.pose_dq.to_pose()))
def __mul__(self, other):
if not isinstance(other, ExtrinsicCalibration):
return NotImplemented
return ExtrinsicCalibration(self.time_offset + other.time_offset, self.pose_dq * other.pose_dq)
|
hl7apy/v2_5/groups.py | ryoung29/hl7apy | 163 | 11066034 | <filename>hl7apy/v2_5/groups.py
from hl7apy.utils import iteritems
from .segments import SEGMENTS
GROUPS = {
'ADR_A19_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADR_A19_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADR_A19_QUERY_RESPONSE': ('sequence',
(['EVN', SEGMENTS['EVN'], (0, 1), 'SEG'],
['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],
['DB1', SEGMENTS['DB1'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['DRG', SEGMENTS['DRG'], (0, 1), 'SEG'],
['ADR_A19_PROCEDURE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['ADR_A19_INSURANCE', None, (0, -1), 'GRP'],
['ACC', SEGMENTS['ACC'], (0, 1), 'SEG'],
['UB1', SEGMENTS['UB1'], (0, 1), 'SEG'],
['UB2', SEGMENTS['UB2'], (0, 1), 'SEG'],)),
'ADT_A01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A01_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A03_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A03_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A05_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A05_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A06_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A06_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A16_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A16_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'ADT_A39_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['MRG', SEGMENTS['MRG'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],)),
'ADT_A43_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['MRG', SEGMENTS['MRG'], (1, 1), 'SEG'],)),
'ADT_A45_MERGE_INFO': ('sequence',
(['MRG', SEGMENTS['MRG'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],)),
'BAR_P01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'BAR_P01_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'BAR_P01_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],
['DB1', SEGMENTS['DB1'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['DRG', SEGMENTS['DRG'], (0, 1), 'SEG'],
['BAR_P01_PROCEDURE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['BAR_P01_INSURANCE', None, (0, -1), 'GRP'],
['ACC', SEGMENTS['ACC'], (0, 1), 'SEG'],
['UB1', SEGMENTS['UB1'], (0, 1), 'SEG'],
['UB2', SEGMENTS['UB2'], (0, 1), 'SEG'],)),
'BAR_P02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['DB1', SEGMENTS['DB1'], (0, -1), 'SEG'],)),
'BAR_P05_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'BAR_P05_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'BAR_P05_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],
['DB1', SEGMENTS['DB1'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['DRG', SEGMENTS['DRG'], (0, 1), 'SEG'],
['BAR_P05_PROCEDURE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['BAR_P05_INSURANCE', None, (0, -1), 'GRP'],
['ACC', SEGMENTS['ACC'], (0, 1), 'SEG'],
['UB1', SEGMENTS['UB1'], (0, 1), 'SEG'],
['UB2', SEGMENTS['UB2'], (0, 1), 'SEG'],
['ABS', SEGMENTS['ABS'], (0, 1), 'SEG'],
['BLC', SEGMENTS['BLC'], (0, -1), 'SEG'],
['RMI', SEGMENTS['RMI'], (0, 1), 'SEG'],)),
'BAR_P06_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],)),
'BAR_P10_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['GP2', SEGMENTS['GP2'], (0, 1), 'SEG'],)),
'BAR_P12_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'BPS_O29_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['BPS_O29_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['BPS_O29_PRODUCT', None, (0, -1), 'GRP'],)),
'BPS_O29_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['BPS_O29_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'BPS_O29_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'BPS_O29_PRODUCT': ('sequence',
(['BPX', SEGMENTS['BPX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'BPS_O29_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'BRP_O30_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['BRP_O30_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (0, 1), 'SEG'],
['BPX', SEGMENTS['BPX'], (0, -1), 'SEG'],)),
'BRP_O30_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['BRP_O30_ORDER', None, (0, -1), 'GRP'],)),
'BRP_O30_RESPONSE': ('sequence',
(['BRP_O30_PATIENT', None, (0, 1), 'GRP'],)),
'BRP_O30_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'BRT_O32_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['BRT_O32_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (0, 1), 'SEG'],
['BTX', SEGMENTS['BTX'], (0, -1), 'SEG'],)),
'BRT_O32_RESPONSE': ('sequence',
(['PID', SEGMENTS['PID'], (0, 1), 'SEG'],
['BRT_O32_ORDER', None, (0, -1), 'GRP'],)),
'BRT_O32_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'BTS_O31_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['BTS_O31_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['BTS_O31_PRODUCT_STATUS', None, (0, -1), 'GRP'],)),
'BTS_O31_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['BTS_O31_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'BTS_O31_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'BTS_O31_PRODUCT_STATUS': ('sequence',
(['BTX', SEGMENTS['BTX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'BTS_O31_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'CRM_C01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['CSR', SEGMENTS['CSR'], (1, 1), 'SEG'],
['CSP', SEGMENTS['CSP'], (0, -1), 'SEG'],)),
'CSU_C09_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CSU_C09_VISIT', None, (0, 1), 'GRP'],
['CSR', SEGMENTS['CSR'], (1, 1), 'SEG'],
['CSU_C09_STUDY_PHASE', None, (1, -1), 'GRP'],)),
'CSU_C09_RX_ADMIN': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, 1), 'SEG'],)),
'CSU_C09_STUDY_OBSERVATION': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['CSU_C09_TIMING_QTY', None, (0, -1), 'GRP'],
['OBX', SEGMENTS['OBX'], (1, -1), 'SEG'],)),
'CSU_C09_STUDY_PHARM': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['CSU_C09_RX_ADMIN', None, (1, -1), 'GRP'],)),
'CSU_C09_STUDY_PHASE': ('sequence',
(['CSP', SEGMENTS['CSP'], (0, 1), 'SEG'],
['CSU_C09_STUDY_SCHEDULE', None, (1, -1), 'GRP'],)),
'CSU_C09_STUDY_SCHEDULE': ('sequence',
(['CSS', SEGMENTS['CSS'], (0, 1), 'SEG'],
['CSU_C09_STUDY_OBSERVATION', None, (1, -1), 'GRP'],
['CSU_C09_STUDY_PHARM', None, (1, -1), 'GRP'],)),
'CSU_C09_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'CSU_C09_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'DFT_P03_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['DFT_P03_TIMING_QUANTITY', None, (0, -1), 'GRP'],
['DFT_P03_ORDER', None, (0, 1), 'GRP'],
['DFT_P03_OBSERVATION', None, (0, -1), 'GRP'],)),
'DFT_P03_FINANCIAL': ('sequence',
(['FT1', SEGMENTS['FT1'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, 1), 'SEG'],
['DFT_P03_FINANCIAL_PROCEDURE', None, (0, -1), 'GRP'],
['DFT_P03_FINANCIAL_COMMON_ORDER', None, (0, -1), 'GRP'],)),
'DFT_P03_FINANCIAL_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['DFT_P03_FINANCIAL_TIMING_QUANTITY', None, (0, -1), 'GRP'],
['DFT_P03_FINANCIAL_ORDER', None, (0, 1), 'GRP'],
['DFT_P03_FINANCIAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'DFT_P03_FINANCIAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P03_FINANCIAL_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P03_FINANCIAL_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'DFT_P03_FINANCIAL_TIMING_QUANTITY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'DFT_P03_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'DFT_P03_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P03_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P03_TIMING_QUANTITY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'DFT_P11_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['DFT_P11_TIMING_QUANTITY', None, (0, -1), 'GRP'],
['DFT_P11_ORDER', None, (0, 1), 'GRP'],
['DFT_P11_OBSERVATION', None, (0, -1), 'GRP'],)),
'DFT_P11_FINANCIAL': ('sequence',
(['FT1', SEGMENTS['FT1'], (1, 1), 'SEG'],
['DFT_P11_FINANCIAL_PROCEDURE', None, (0, -1), 'GRP'],
['DFT_P11_FINANCIAL_COMMON_ORDER', None, (0, -1), 'GRP'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['DRG', SEGMENTS['DRG'], (0, 1), 'SEG'],
['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['DFT_P11_FINANCIAL_INSURANCE', None, (0, -1), 'GRP'],)),
'DFT_P11_FINANCIAL_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['DFT_P11_FINANCIAL_TIMING_QUANTITY', None, (0, -1), 'GRP'],
['DFT_P11_FINANCIAL_ORDER', None, (0, 1), 'GRP'],
['DFT_P11_FINANCIAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'DFT_P11_FINANCIAL_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'DFT_P11_FINANCIAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P11_FINANCIAL_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P11_FINANCIAL_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'DFT_P11_FINANCIAL_TIMING_QUANTITY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'DFT_P11_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, -1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'DFT_P11_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P11_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'DFT_P11_TIMING_QUANTITY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'DOC_T12_RESULT': ('sequence',
(['EVN', SEGMENTS['EVN'], (0, 1), 'SEG'],
['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['TXA', SEGMENTS['TXA'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'EAC_U07_COMMAND': ('sequence',
(['ECD', SEGMENTS['ECD'], (1, 1), 'SEG'],
['TQ1', SEGMENTS['TQ1'], (0, 1), 'SEG'],
['EAC_U07_SPECIMEN_CONTAINER', None, (0, 1), 'GRP'],
['CNS', SEGMENTS['CNS'], (0, 1), 'SEG'],)),
'EAC_U07_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['SPM', SEGMENTS['SPM'], (0, -1), 'SEG'],)),
'EAN_U09_NOTIFICATION': ('sequence',
(['NDS', SEGMENTS['NDS'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, 1), 'SEG'],)),
'EAR_U08_COMMAND_RESPONSE': ('sequence',
(['ECD', SEGMENTS['ECD'], (1, 1), 'SEG'],
['EAR_U08_SPECIMEN_CONTAINER', None, (0, 1), 'GRP'],
['ECR', SEGMENTS['ECR'], (1, 1), 'SEG'],)),
'EAR_U08_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['SPM', SEGMENTS['SPM'], (0, -1), 'SEG'],)),
'MDM_T01_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['MDM_T01_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'MDM_T01_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'MDM_T02_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['MDM_T02_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'MDM_T02_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'MDM_T02_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'MFN_M01_MF': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (0, 1), 'SEG'],)),
'MFN_M02_MF_STAFF': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['STF', SEGMENTS['STF'], (1, 1), 'SEG'],
['PRA', SEGMENTS['PRA'], (0, -1), 'SEG'],
['ORG', SEGMENTS['ORG'], (0, -1), 'SEG'],
['AFF', SEGMENTS['AFF'], (0, -1), 'SEG'],
['LAN', SEGMENTS['LAN'], (0, -1), 'SEG'],
['EDU', SEGMENTS['EDU'], (0, -1), 'SEG'],
['CER', SEGMENTS['CER'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'MFN_M03_MF_TEST': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'MFN_M04_MF_CDM': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CDM', SEGMENTS['CDM'], (1, 1), 'SEG'],
['PRC', SEGMENTS['PRC'], (0, -1), 'SEG'],)),
'MFN_M05_MF_LOCATION': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['LOC', SEGMENTS['LOC'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LRL', SEGMENTS['LRL'], (0, -1), 'SEG'],
['MFN_M05_MF_LOC_DEPT', None, (1, -1), 'GRP'],)),
'MFN_M05_MF_LOC_DEPT': ('sequence',
(['LDP', SEGMENTS['LDP'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LCC', SEGMENTS['LCC'], (0, -1), 'SEG'],)),
'MFN_M06_MF_CLIN_STUDY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CM0', SEGMENTS['CM0'], (1, 1), 'SEG'],
['MFN_M06_MF_PHASE_SCHED_DETAIL', None, (0, -1), 'GRP'],)),
'MFN_M06_MF_PHASE_SCHED_DETAIL': ('sequence',
(['CM1', SEGMENTS['CM1'], (1, 1), 'SEG'],
['CM2', SEGMENTS['CM2'], (0, -1), 'SEG'],)),
'MFN_M07_MF_CLIN_STUDY_SCHED': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CM0', SEGMENTS['CM0'], (1, 1), 'SEG'],
['CM2', SEGMENTS['CM2'], (0, -1), 'SEG'],)),
'MFN_M08_MF_TEST_NUMERIC': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['OM2', SEGMENTS['OM2'], (0, 1), 'SEG'],
['OM3', SEGMENTS['OM3'], (0, 1), 'SEG'],
['OM4', SEGMENTS['OM4'], (0, 1), 'SEG'],)),
'MFN_M09_MF_TEST_CATEGORICAL': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['MFN_M09_MF_TEST_CAT_DETAIL', None, (0, 1), 'GRP'],)),
'MFN_M09_MF_TEST_CAT_DETAIL': ('sequence',
(['OM3', SEGMENTS['OM3'], (1, 1), 'SEG'],
['OM4', SEGMENTS['OM4'], (0, -1), 'SEG'],)),
'MFN_M10_MF_TEST_BATTERIES': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['MFN_M10_MF_TEST_BATT_DETAIL', None, (0, 1), 'GRP'],)),
'MFN_M10_MF_TEST_BATT_DETAIL': ('sequence',
(['OM5', SEGMENTS['OM5'], (1, 1), 'SEG'],
['OM4', SEGMENTS['OM4'], (0, -1), 'SEG'],)),
'MFN_M11_MF_TEST_CALCULATED': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['MFN_M11_MF_TEST_CALC_DETAIL', None, (0, 1), 'GRP'],)),
'MFN_M11_MF_TEST_CALC_DETAIL': ('sequence',
(['OM6', SEGMENTS['OM6'], (1, 1), 'SEG'],
['OM2', SEGMENTS['OM2'], (1, 1), 'SEG'],)),
'MFN_M12_MF_OBS_ATTRIBUTES': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['OM1', SEGMENTS['OM1'], (1, 1), 'SEG'],
['OM7', SEGMENTS['OM7'], (0, 1), 'SEG'],)),
'MFN_M15_MF_INV_ITEM': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['IIM', SEGMENTS['IIM'], (1, 1), 'SEG'],)),
'MFN_ZNN': ('sequence',
(['MSH', SEGMENTS['MSH'], (1, 1), 'SEG'],
['SFT', SEGMENTS['SFT'], (0, -1), 'SEG'],
['MFI', SEGMENTS['MFI'], (1, 1), 'SEG'],
['MFN_ZNN_MF_SITE_DEFINED', None, (1, -1), 'GRP'],)),
'MFN_ZNN_MF_SITE_DEFINED': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'MFR_M01_MF_QUERY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (0, 1), 'SEG'],)),
'MFR_M04_MF_QUERY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CDM', SEGMENTS['CDM'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['PRC', SEGMENTS['PRC'], (0, -1), 'SEG'],)),
'MFR_M05_MF_QUERY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['LOC', SEGMENTS['LOC'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LRL', SEGMENTS['LRL'], (0, -1), 'SEG'],
['LDP', SEGMENTS['LDP'], (1, -1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LCC', SEGMENTS['LCC'], (0, -1), 'SEG'],)),
'MFR_M06_MF_QUERY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CM0', SEGMENTS['CM0'], (1, 1), 'SEG'],
['CM1', SEGMENTS['CM1'], (0, -1), 'SEG'],
['CM2', SEGMENTS['CM2'], (0, -1), 'SEG'],)),
'MFR_M07_MF_QUERY': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['CM0', SEGMENTS['CM0'], (1, 1), 'SEG'],
['CM2', SEGMENTS['CM2'], (0, -1), 'SEG'],)),
'NMD_N02_APP_STATS': ('sequence',
(['NST', SEGMENTS['NST'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'NMD_N02_APP_STATUS': ('sequence',
(['NSC', SEGMENTS['NSC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'NMD_N02_CLOCK': ('sequence',
(['NCK', SEGMENTS['NCK'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'NMD_N02_CLOCK_AND_STATS_WITH_NOTES': ('sequence',
(['NMD_N02_CLOCK', None, (0, 1), 'GRP'],
['NMD_N02_APP_STATS', None, (0, 1), 'GRP'],
['NMD_N02_APP_STATUS', None, (0, 1), 'GRP'],)),
'NMQ_N01_CLOCK_AND_STATISTICS': ('sequence',
(['NCK', SEGMENTS['NCK'], (0, 1), 'SEG'],
['NST', SEGMENTS['NST'], (0, 1), 'SEG'],
['NSC', SEGMENTS['NSC'], (0, 1), 'SEG'],)),
'NMQ_N01_QRY_WITH_DETAIL': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],)),
'NMR_N01_CLOCK_AND_STATS_WITH_NOTES_ALT': ('sequence',
(['NCK', SEGMENTS['NCK'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NST', SEGMENTS['NST'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NSC', SEGMENTS['NSC'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMB_O27_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMB_O27_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMB_O27_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMB_O27_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (1, 1), 'SEG'],
['SPM', SEGMENTS['SPM'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OMB_O27_OBSERVATION', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OMB_O27_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMB_O27_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMB_O27_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMB_O27_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMB_O27_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMD_O03_DIET': ('sequence',
(['ODS', SEGMENTS['ODS'], (1, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMD_O03_OBSERVATION', None, (0, -1), 'GRP'],)),
'OMD_O03_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMD_O03_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMD_O03_ORDER_DIET': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMD_O03_TIMING_DIET', None, (0, -1), 'GRP'],
['OMD_O03_DIET', None, (0, 1), 'GRP'],)),
'OMD_O03_ORDER_TRAY': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMD_O03_TIMING_TRAY', None, (0, -1), 'GRP'],
['ODT', SEGMENTS['ODT'], (1, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMD_O03_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMD_O03_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMD_O03_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMD_O03_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMD_O03_TIMING_DIET': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMD_O03_TIMING_TRAY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMG_O19_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'OMG_O19_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMG_O19_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMG_O19_OBSERVATION_PRIOR': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMG_O19_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMG_O19_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OMG_O19_OBSERVATION', None, (0, -1), 'GRP'],
['OMG_O19_SPECIMEN', None, (0, -1), 'GRP'],
['OMG_O19_PRIOR_RESULT', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OMG_O19_ORDER_PRIOR': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['OMG_O19_TIMING_PRIOR', None, (0, -1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['OMG_O19_OBSERVATION_PRIOR', None, (1, -1), 'GRP'],)),
'OMG_O19_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['OMG_O19_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMG_O19_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMG_O19_PATIENT_PRIOR': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],)),
'OMG_O19_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMG_O19_PATIENT_VISIT_PRIOR': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMG_O19_PRIOR_RESULT': ('sequence',
(['OMG_O19_PATIENT_PRIOR', None, (0, 1), 'GRP'],
['OMG_O19_PATIENT_VISIT_PRIOR', None, (0, 1), 'GRP'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['OMG_O19_ORDER_PRIOR', None, (1, -1), 'GRP'],)),
'OMG_O19_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OMG_O19_CONTAINER', None, (0, -1), 'GRP'],)),
'OMG_O19_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMG_O19_TIMING_PRIOR': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMI_O23_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMI_O23_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMI_O23_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMI_O23_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OMI_O23_OBSERVATION', None, (0, -1), 'GRP'],
['IPC', SEGMENTS['IPC'], (1, -1), 'SEG'],)),
'OMI_O23_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMI_O23_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMI_O23_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMI_O23_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMI_O23_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O21_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'OML_O21_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OML_O21_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O21_OBSERVATION_PRIOR': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O21_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OML_O21_OBSERVATION', None, (0, -1), 'GRP'],
['OML_O21_SPECIMEN', None, (0, -1), 'GRP'],
['OML_O21_PRIOR_RESULT', None, (0, -1), 'GRP'],)),
'OML_O21_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OML_O21_TIIMING', None, (0, -1), 'GRP'],
['OML_O21_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OML_O21_ORDER_PRIOR': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OML_O21_TIMING_PRIOR', None, (0, -1), 'GRP'],
['OML_O21_OBSERVATION_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O21_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['OML_O21_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OML_O21_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OML_O21_PATIENT_PRIOR': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],)),
'OML_O21_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O21_PATIENT_VISIT_PRIOR': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O21_PRIOR_RESULT': ('sequence',
(['OML_O21_PATIENT_PRIOR', None, (0, 1), 'GRP'],
['OML_O21_PATIENT_VISIT_PRIOR', None, (0, 1), 'GRP'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['OML_O21_ORDER_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O21_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OML_O21_CONTAINER', None, (0, -1), 'GRP'],)),
'OML_O21_TIIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O21_TIMING_PRIOR': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O33_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OML_O33_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O33_OBSERVATION_PRIOR': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O33_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OML_O33_OBSERVATION', None, (0, -1), 'GRP'],
['OML_O33_PRIOR_RESULT', None, (0, -1), 'GRP'],)),
'OML_O33_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OML_O33_TIMING', None, (0, -1), 'GRP'],
['OML_O33_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OML_O33_ORDER_PRIOR': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OML_O33_TIMING_PRIOR', None, (0, -1), 'GRP'],
['OML_O33_OBSERVATION_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O33_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['OML_O33_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OML_O33_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OML_O33_PATIENT_PRIOR': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],)),
'OML_O33_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O33_PATIENT_VISIT_PRIOR': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O33_PRIOR_RESULT': ('sequence',
(['OML_O33_PATIENT_PRIOR', None, (0, 1), 'GRP'],
['OML_O33_PATIENT_VISIT_PRIOR', None, (0, 1), 'GRP'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['OML_O33_ORDER_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O33_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['SAC', SEGMENTS['SAC'], (0, -1), 'SEG'],
['OML_O33_ORDER', None, (1, -1), 'GRP'],)),
'OML_O33_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O33_TIMING_PRIOR': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O35_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OML_O35_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O35_OBSERVATION_PRIOR': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OML_O35_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['OML_O35_OBSERVATION', None, (0, -1), 'GRP'],
['OML_O35_PRIOR_RESULT', None, (0, -1), 'GRP'],)),
'OML_O35_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OML_O35_TIIMING', None, (0, -1), 'GRP'],
['OML_O35_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OML_O35_ORDER_PRIOR': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OML_O35_TIMING_PRIOR', None, (0, -1), 'GRP'],
['OML_O35_OBSERVATION_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O35_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['OML_O35_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OML_O35_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OML_O35_PATIENT_PRIOR': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],)),
'OML_O35_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O35_PATIENT_VISIT_PRIOR': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OML_O35_PRIOR_RESULT': ('sequence',
(['OML_O35_PATIENT_PRIOR', None, (0, 1), 'GRP'],
['OML_O35_PATIENT_VISIT_PRIOR', None, (0, 1), 'GRP'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['OML_O35_ORDER_PRIOR', None, (1, -1), 'GRP'],)),
'OML_O35_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OML_O35_SPECIMEN_CONTAINER', None, (1, -1), 'GRP'],)),
'OML_O35_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['OML_O35_ORDER', None, (1, -1), 'GRP'],)),
'OML_O35_TIIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OML_O35_TIMING_PRIOR': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMN_O07_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMN_O07_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMN_O07_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMN_O07_TIMING', None, (0, -1), 'GRP'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMN_O07_OBSERVATION', None, (0, -1), 'GRP'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OMN_O07_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMN_O07_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMN_O07_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMN_O07_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMN_O07_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMP_O09_COMPONENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMP_O09_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMP_O09_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMP_O09_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMP_O09_TIMING', None, (0, -1), 'GRP'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['OMP_O09_COMPONENT', None, (0, -1), 'GRP'],
['OMP_O09_OBSERVATION', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OMP_O09_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMP_O09_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMP_O09_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMP_O09_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMP_O09_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OMS_O05_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'OMS_O05_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OMS_O05_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OMS_O05_TIMING', None, (0, -1), 'GRP'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMS_O05_OBSERVATION', None, (0, -1), 'GRP'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'OMS_O05_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OMS_O05_PATIENT_VISIT', None, (0, 1), 'GRP'],
['OMS_O05_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'OMS_O05_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OMS_O05_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORB_O28_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORB_O28_TIMING', None, (0, -1), 'GRP'],
['BPO', SEGMENTS['BPO'], (0, 1), 'SEG'],)),
'ORB_O28_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['ORB_O28_ORDER', None, (0, -1), 'GRP'],)),
'ORB_O28_RESPONSE': ('sequence',
(['ORB_O28_PATIENT', None, (0, 1), 'GRP'],)),
'ORB_O28_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORD_O04_ORDER_DIET': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORD_O04_TIMING_DIET', None, (0, -1), 'GRP'],
['ODS', SEGMENTS['ODS'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORD_O04_ORDER_TRAY': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORD_O04_TIMING_TRAY', None, (0, -1), 'GRP'],
['ODT', SEGMENTS['ODT'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORD_O04_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORD_O04_RESPONSE': ('sequence',
(['ORD_O04_PATIENT', None, (0, 1), 'GRP'],
['ORD_O04_ORDER_DIET', None, (1, -1), 'GRP'],
['ORD_O04_ORDER_TRAY', None, (0, -1), 'GRP'],)),
'ORD_O04_TIMING_DIET': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORD_O04_TIMING_TRAY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORF_R04_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORF_R04_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORF_R04_TIMING_QTY', None, (0, -1), 'GRP'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['ORF_R04_OBSERVATION', None, (1, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'ORF_R04_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORF_R04_QUERY_RESPONSE': ('sequence',
(['ORF_R04_PATIENT', None, (0, 1), 'GRP'],
['ORF_R04_ORDER', None, (1, -1), 'GRP'],)),
'ORF_R04_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORG_O20_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORG_O20_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['ORG_O20_SPECIMEN', None, (0, -1), 'GRP'],)),
'ORG_O20_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORG_O20_RESPONSE': ('sequence',
(['ORG_O20_PATIENT', None, (0, 1), 'GRP'],
['ORG_O20_ORDER', None, (1, -1), 'GRP'],)),
'ORG_O20_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['SAC', SEGMENTS['SAC'], (0, -1), 'SEG'],)),
'ORG_O20_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORI_O24_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORI_O24_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['IPC', SEGMENTS['IPC'], (1, -1), 'SEG'],)),
'ORI_O24_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORI_O24_RESPONSE': ('sequence',
(['ORI_O24_PATIENT', None, (0, 1), 'GRP'],
['ORI_O24_ORDER', None, (1, -1), 'GRP'],)),
'ORI_O24_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORL_O22_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ORL_O22_SPECIMEN', None, (0, -1), 'GRP'],)),
'ORL_O22_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORL_O22_TIMING', None, (0, -1), 'GRP'],
['ORL_O22_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],)),
'ORL_O22_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['ORL_O22_ORDER', None, (0, -1), 'GRP'],)),
'ORL_O22_RESPONSE': ('sequence',
(['ORL_O22_PATIENT', None, (0, 1), 'GRP'],)),
'ORL_O22_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['SAC', SEGMENTS['SAC'], (0, -1), 'SEG'],)),
'ORL_O22_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORL_O34_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ORL_O34_SPMSAC_SUPPGRP2', None, (0, -1), 'GRP'],)),
'ORL_O34_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORL_O34_TIMING', None, (0, -1), 'GRP'],
['ORL_O34_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],)),
'ORL_O34_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['ORL_O34_SPECIMEN', None, (1, -1), 'GRP'],)),
'ORL_O34_RESPONSE': ('sequence',
(['ORL_O34_PATIENT', None, (0, 1), 'GRP'],)),
'ORL_O34_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['SAC', SEGMENTS['SAC'], (0, -1), 'SEG'],
['ORL_O34_ORDER', None, (0, -1), 'GRP'],)),
'ORL_O34_SPMSAC_SUPPGRP2': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['SAC', SEGMENTS['SAC'], (0, -1), 'SEG'],)),
'ORL_O34_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORL_O36_OBSERVATION_REQUEST': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],)),
'ORL_O36_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORL_O36_TIMING', None, (0, -1), 'GRP'],
['ORL_O36_OBSERVATION_REQUEST', None, (0, 1), 'GRP'],)),
'ORL_O36_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['ORL_O36_SPECIMEN', None, (1, -1), 'GRP'],)),
'ORL_O36_RESPONSE': ('sequence',
(['ORL_O36_PATIENT', None, (0, 1), 'GRP'],)),
'ORL_O36_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['ORL_O36_SPECIMEN_CONTAINER', None, (1, -1), 'GRP'],)),
'ORL_O36_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['ORL_O36_ORDER', None, (0, -1), 'GRP'],)),
'ORL_O36_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORM_O01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'ORM_O01_OBRRQDRQ1RXOODSODT_SUPPGRP': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],)),
'ORM_O01_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORM_O01_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORM_O01_ORDER_DETAIL', None, (0, 1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],)),
'ORM_O01_ORDER_DETAIL': ('sequence',
(['ORM_O01_OBRRQDRQ1RXOODSODT_SUPPGRP', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],
['ORM_O01_OBSERVATION', None, (0, -1), 'GRP'],)),
'ORM_O01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORM_O01_PATIENT_VISIT', None, (0, 1), 'GRP'],
['ORM_O01_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'ORM_O01_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'ORN_O08_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORN_O08_TIMING', None, (0, -1), 'GRP'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORN_O08_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORN_O08_RESPONSE': ('sequence',
(['ORN_O08_PATIENT', None, (0, 1), 'GRP'],
['ORN_O08_ORDER', None, (1, -1), 'GRP'],)),
'ORN_O08_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORP_O10_COMPONENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORP_O10_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORP_O10_TIMING', None, (0, -1), 'GRP'],
['ORP_O10_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'ORP_O10_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['ORP_O10_COMPONENT', None, (0, -1), 'GRP'],)),
'ORP_O10_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORP_O10_RESPONSE': ('sequence',
(['ORP_O10_PATIENT', None, (0, 1), 'GRP'],
['ORP_O10_ORDER', None, (1, -1), 'GRP'],)),
'ORP_O10_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORR_O02_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],)),
'ORR_O02_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORR_O02_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'ORR_O02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORR_O02_RESPONSE': ('sequence',
(['ORR_O02_PATIENT', None, (0, 1), 'GRP'],
['ORR_O02_ORDER', None, (1, -1), 'GRP'],)),
'ORS_O06_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['ORS_O06_TIMING', None, (0, -1), 'GRP'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORS_O06_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORS_O06_RESPONSE': ('sequence',
(['ORS_O06_PATIENT', None, (0, 1), 'GRP'],
['ORS_O06_ORDER', None, (1, -1), 'GRP'],)),
'ORS_O06_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORU_R01_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORU_R01_ORDER_OBSERVATION': ('sequence',
(['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['ORU_R01_TIMING_QTY', None, (0, -1), 'GRP'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['ORU_R01_OBSERVATION', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],
['ORU_R01_SPECIMEN', None, (0, -1), 'GRP'],)),
'ORU_R01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['ORU_R01_VISIT', None, (0, 1), 'GRP'],)),
'ORU_R01_PATIENT_RESULT': ('sequence',
(['ORU_R01_PATIENT', None, (0, 1), 'GRP'],
['ORU_R01_ORDER_OBSERVATION', None, (1, -1), 'GRP'],)),
'ORU_R01_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'ORU_R01_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORU_R01_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'ORU_R30_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'ORU_R30_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ORU_R30_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OSR_Q06_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['RQD', SEGMENTS['RQD'], (1, 1), 'SEG'],
['RQ1', SEGMENTS['RQ1'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['ODS', SEGMENTS['ODS'], (1, 1), 'SEG'],
['ODT', SEGMENTS['ODT'], (1, 1), 'SEG'],)),
'OSR_Q06_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['OSR_Q06_TIMING', None, (0, -1), 'GRP'],
['OSR_Q06_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OSR_Q06_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OSR_Q06_RESPONSE': ('sequence',
(['OSR_Q06_PATIENT', None, (0, 1), 'GRP'],
['OSR_Q06_ORDER', None, (1, -1), 'GRP'],)),
'OSR_Q06_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OUL_R21_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['SID', SEGMENTS['SID'], (0, 1), 'SEG'],)),
'OUL_R21_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['SID', SEGMENTS['SID'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R21_ORDER_OBSERVATION': ('sequence',
(['OUL_R21_CONTAINER', None, (0, 1), 'GRP'],
['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OUL_R21_TIMING_QTY', None, (0, -1), 'GRP'],
['OUL_R21_OBSERVATION', None, (1, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OUL_R21_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R21_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OUL_R21_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OUL_R22_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['INV', SEGMENTS['INV'], (0, 1), 'SEG'],)),
'OUL_R22_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OUL_R22_TIMING_QTY', None, (0, -1), 'GRP'],
['OUL_R22_RESULT', None, (0, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OUL_R22_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R22_RESULT': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['SID', SEGMENTS['SID'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R22_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OUL_R22_CONTAINER', None, (0, -1), 'GRP'],
['OUL_R22_ORDER', None, (1, -1), 'GRP'],)),
'OUL_R22_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OUL_R22_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OUL_R23_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['INV', SEGMENTS['INV'], (0, 1), 'SEG'],
['OUL_R23_ORDER', None, (1, -1), 'GRP'],)),
'OUL_R23_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OUL_R23_TIMING_QTY', None, (0, -1), 'GRP'],
['OUL_R23_RESULT', None, (0, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OUL_R23_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R23_RESULT': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['SID', SEGMENTS['SID'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R23_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OUL_R23_CONTAINER', None, (1, -1), 'GRP'],)),
'OUL_R23_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OUL_R23_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'OUL_R24_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['INV', SEGMENTS['INV'], (0, 1), 'SEG'],)),
'OUL_R24_ORDER': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ORC', SEGMENTS['ORC'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['OUL_R24_TIMING_QTY', None, (0, -1), 'GRP'],
['OUL_R24_SPECIMEN', None, (0, -1), 'GRP'],
['OUL_R24_RESULT', None, (0, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'OUL_R24_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R24_RESULT': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['TCD', SEGMENTS['TCD'], (0, 1), 'SEG'],
['SID', SEGMENTS['SID'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'OUL_R24_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['OUL_R24_CONTAINER', None, (0, -1), 'GRP'],)),
'OUL_R24_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'OUL_R24_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PEX_P07_ASSOCIATED_PERSON': ('sequence',
(['NK1', SEGMENTS['NK1'], (1, 1), 'SEG'],
['PEX_P07_ASSOCIATED_RX_ORDER', None, (0, 1), 'GRP'],
['PEX_P07_ASSOCIATED_RX_ADMIN', None, (0, -1), 'GRP'],
['PRB', SEGMENTS['PRB'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'PEX_P07_ASSOCIATED_RX_ADMIN': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],)),
'PEX_P07_ASSOCIATED_RX_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['PEX_P07_NK1_TIMING_QTY', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (0, -1), 'SEG'],)),
'PEX_P07_EXPERIENCE': ('sequence',
(['PES', SEGMENTS['PES'], (1, 1), 'SEG'],
['PEX_P07_PEX_OBSERVATION', None, (1, -1), 'GRP'],)),
'PEX_P07_NK1_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'PEX_P07_PEX_CAUSE': ('sequence',
(['PCR', SEGMENTS['PCR'], (1, 1), 'SEG'],
['PEX_P07_RX_ORDER', None, (0, 1), 'GRP'],
['PEX_P07_RX_ADMINISTRATION', None, (0, -1), 'GRP'],
['PRB', SEGMENTS['PRB'], (0, -1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['PEX_P07_ASSOCIATED_PERSON', None, (0, 1), 'GRP'],
['PEX_P07_STUDY', None, (0, -1), 'GRP'],)),
'PEX_P07_PEX_OBSERVATION': ('sequence',
(['PEO', SEGMENTS['PEO'], (1, 1), 'SEG'],
['PEX_P07_PEX_CAUSE', None, (1, -1), 'GRP'],)),
'PEX_P07_RX_ADMINISTRATION': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],)),
'PEX_P07_RX_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['PEX_P07_TIMING_QTY', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (0, -1), 'SEG'],)),
'PEX_P07_STUDY': ('sequence',
(['CSR', SEGMENTS['CSR'], (1, 1), 'SEG'],
['CSP', SEGMENTS['CSP'], (0, -1), 'SEG'],)),
'PEX_P07_TIMING_QTY': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'PEX_P07_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PGL_PC6_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PGL_PC6_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_GOAL_ROLE', None, (0, -1), 'GRP'],
['PGL_PC6_PATHWAY', None, (0, -1), 'GRP'],
['PGL_PC6_OBSERVATION', None, (0, -1), 'GRP'],
['PGL_PC6_PROBLEM', None, (0, -1), 'GRP'],
['PGL_PC6_ORDER', None, (0, -1), 'GRP'],)),
'PGL_PC6_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PGL_PC6_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PGL_PC6_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PGL_PC6_ORDER_DETAIL': ('sequence',
(['PGL_PC6_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PGL_PC6_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PGL_PC6_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PGL_PC6_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PGL_PC6_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PGL_PC6_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PGL_PC6_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PGL_PC6_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PMU_B07_CERTIFICATE': ('sequence',
(['CER', SEGMENTS['CER'], (1, 1), 'SEG'],
['ROL', SEGMENTS['ROL'], (0, -1), 'SEG'],)),
'PPG_PCG_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PPG_PCG_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_GOAL_OBSERVATION', None, (0, -1), 'GRP'],
['PPG_PCG_PROBLEM', None, (0, -1), 'GRP'],
['PPG_PCG_ORDER', None, (0, -1), 'GRP'],)),
'PPG_PCG_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPG_PCG_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPG_PCG_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPG_PCG_ORDER_DETAIL': ('sequence',
(['PPG_PCG_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPG_PCG_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_PATHWAY_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_GOAL', None, (0, -1), 'GRP'],)),
'PPG_PCG_PATHWAY_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPG_PCG_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPG_PCG_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPG_PCG_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPG_PCG_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPG_PCG_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPG_PCG_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PPP_PCB_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPP_PCB_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPP_PCB_GOAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPP_PCB_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPP_PCB_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPP_PCB_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPP_PCB_ORDER_DETAIL': ('sequence',
(['PPP_PCB_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPP_PCB_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPP_PCB_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPP_PCB_PATHWAY_ROLE', None, (0, -1), 'GRP'],
['PPP_PCB_PROBLEM', None, (0, -1), 'GRP'],)),
'PPP_PCB_PATHWAY_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPP_PCB_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPP_PCB_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPP_PCB_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPP_PCB_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],
['PPP_PCB_GOAL', None, (0, -1), 'GRP'],
['PPP_PCB_ORDER', None, (0, -1), 'GRP'],)),
'PPP_PCB_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPP_PCB_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPR_PC1_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PPR_PC1_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPR_PC1_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPR_PC1_GOAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPR_PC1_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPR_PC1_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPR_PC1_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPR_PC1_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPR_PC1_ORDER_DETAIL': ('sequence',
(['PPR_PC1_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPR_PC1_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPR_PC1_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPR_PC1_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPR_PC1_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPR_PC1_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPR_PC1_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPR_PC1_PATHWAY', None, (0, -1), 'GRP'],
['PPR_PC1_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],
['PPR_PC1_GOAL', None, (0, -1), 'GRP'],
['PPR_PC1_ORDER', None, (0, -1), 'GRP'],)),
'PPR_PC1_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPR_PC1_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPT_PCL_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PPT_PCL_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPT_PCL_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPT_PCL_GOAL_OBSERVATION', None, (0, -1), 'GRP'],
['PPT_PCL_PROBLEM', None, (0, -1), 'GRP'],
['PPT_PCL_ORDER', None, (0, -1), 'GRP'],)),
'PPT_PCL_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPT_PCL_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPT_PCL_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPT_PCL_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPT_PCL_ORDER_DETAIL': ('sequence',
(['PPT_PCL_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPT_PCL_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPT_PCL_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPT_PCL_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPT_PCL_PATHWAY_ROLE', None, (0, -1), 'GRP'],
['PPT_PCL_GOAL', None, (0, -1), 'GRP'],)),
'PPT_PCL_PATHWAY_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPT_PCL_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PPT_PCL_PATIENT_VISIT', None, (0, 1), 'GRP'],
['PPT_PCL_PATHWAY', None, (1, -1), 'GRP'],)),
'PPT_PCL_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPT_PCL_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPT_PCL_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPT_PCL_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPT_PCL_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPT_PCL_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPV_PCA_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPV_PCA_GOAL_ROLE', None, (0, -1), 'GRP'],
['PPV_PCA_GOAL_PATHWAY', None, (0, -1), 'GRP'],
['PPV_PCA_GOAL_OBSERVATION', None, (0, -1), 'GRP'],
['PPV_PCA_PROBLEM', None, (0, -1), 'GRP'],
['PPV_PCA_ORDER', None, (0, -1), 'GRP'],)),
'PPV_PCA_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPV_PCA_GOAL_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPV_PCA_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPV_PCA_OBRANYHL7SEGMENT_SUPPGRP': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PPV_PCA_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PPV_PCA_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PPV_PCA_ORDER_DETAIL': ('sequence',
(['PPV_PCA_OBRANYHL7SEGMENT_SUPPGRP', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPV_PCA_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPV_PCA_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PPV_PCA_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PPV_PCA_PATIENT_VISIT', None, (0, 1), 'GRP'],
['PPV_PCA_GOAL', None, (1, -1), 'GRP'],)),
'PPV_PCA_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PPV_PCA_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PPV_PCA_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PPV_PCA_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],)),
'PPV_PCA_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PPV_PCA_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PRR_PC5_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PRR_PC5_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PRR_PC5_GOAL_ROLE', None, (0, -1), 'GRP'],
['PRR_PC5_GOAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'PRR_PC5_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PRR_PC5_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PRR_PC5_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PRR_PC5_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PRR_PC5_ORDER_DETAIL': ('sequence',
(['PRR_PC5_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PRR_PC5_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PRR_PC5_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PRR_PC5_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PRR_PC5_PATIENT_VISIT', None, (0, 1), 'GRP'],
['PRR_PC5_PROBLEM', None, (1, -1), 'GRP'],)),
'PRR_PC5_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PRR_PC5_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PRR_PC5_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PRR_PC5_PROBLEM_PATHWAY', None, (0, -1), 'GRP'],
['PRR_PC5_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],
['PRR_PC5_GOAL', None, (0, -1), 'GRP'],
['PRR_PC5_ORDER', None, (0, -1), 'GRP'],)),
'PRR_PC5_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PRR_PC5_PROBLEM_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PRR_PC5_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PTR_PCF_CHOICE': ('choice',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],)),
'PTR_PCF_GOAL': ('sequence',
(['GOL', SEGMENTS['GOL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PTR_PCF_GOAL_ROLE', None, (0, -1), 'GRP'],
['PTR_PCF_GOAL_OBSERVATION', None, (0, -1), 'GRP'],)),
'PTR_PCF_GOAL_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PTR_PCF_GOAL_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PTR_PCF_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['PTR_PCF_ORDER_DETAIL', None, (0, 1), 'GRP'],)),
'PTR_PCF_ORDER_DETAIL': ('sequence',
(['PTR_PCF_CHOICE', None, (1, 1), 'GRP'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PTR_PCF_ORDER_OBSERVATION', None, (0, -1), 'GRP'],)),
'PTR_PCF_ORDER_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PTR_PCF_PATHWAY': ('sequence',
(['PTH', SEGMENTS['PTH'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PTR_PCF_PATHWAY_ROLE', None, (0, -1), 'GRP'],
['PTR_PCF_PROBLEM', None, (0, -1), 'GRP'],)),
'PTR_PCF_PATHWAY_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'PTR_PCF_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PTR_PCF_PATIENT_VISIT', None, (0, 1), 'GRP'],
['PTR_PCF_PATHWAY', None, (1, -1), 'GRP'],)),
'PTR_PCF_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'PTR_PCF_PROBLEM': ('sequence',
(['PRB', SEGMENTS['PRB'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],
['PTR_PCF_PROBLEM_ROLE', None, (0, -1), 'GRP'],
['PTR_PCF_PROBLEM_OBSERVATION', None, (0, -1), 'GRP'],
['PTR_PCF_GOAL', None, (0, -1), 'GRP'],
['PTR_PCF_ORDER', None, (0, -1), 'GRP'],)),
'PTR_PCF_PROBLEM_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'PTR_PCF_PROBLEM_ROLE': ('sequence',
(['ROL', SEGMENTS['ROL'], (1, 1), 'SEG'],
['VAR', SEGMENTS['VAR'], (0, -1), 'SEG'],)),
'QBP_K13_ROW_DEFINITION': ('sequence',
(['RDF', SEGMENTS['RDF'], (1, 1), 'SEG'],
['RDT', SEGMENTS['RDT'], (0, -1), 'SEG'],)),
'QBP_QNN': ('sequence',
(['MSH', SEGMENTS['MSH'], (1, 1), 'SEG'],
['SFT', SEGMENTS['SFT'], (0, -1), 'SEG'],
['QPD', SEGMENTS['QPD'], (1, 1), 'SEG'],
['RDF', SEGMENTS['RDF'], (0, 1), 'SEG'],
['RCP', SEGMENTS['RCP'], (1, 1), 'SEG'],
['DSC', SEGMENTS['DSC'], (0, 1), 'SEG'],)),
'QVR_Q17_QBP': ('sequence',
(['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (0, 1), 'SEG'],)),
'RAR_RAR_DEFINITION': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],
['RAR_RAR_PATIENT', None, (0, 1), 'GRP'],
['RAR_RAR_ORDER', None, (1, -1), 'GRP'],)),
'RAR_RAR_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RAR_RAR_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RAR_RAR_ENCODING', None, (0, 1), 'GRP'],
['RXA', SEGMENTS['RXA'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, 1), 'SEG'],)),
'RAR_RAR_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RAS_O17_ADMINISTRATION': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, 1), 'SEG'],
['RAS_O17_OBSERVATION', None, (0, -1), 'GRP'],)),
'RAS_O17_COMPONENTS': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RAS_O17_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RAS_O17_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RAS_O17_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RAS_O17_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RAS_O17_TIMING', None, (0, -1), 'GRP'],
['RAS_O17_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RAS_O17_ENCODING', None, (0, 1), 'GRP'],
['RAS_O17_ADMINISTRATION', None, (1, -1), 'GRP'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'RAS_O17_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RAS_O17_ORDER_DETAIL_SUPPLEMENT', None, (0, 1), 'GRP'],)),
'RAS_O17_ORDER_DETAIL_SUPPLEMENT': ('sequence',
(['NTE', SEGMENTS['NTE'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RAS_O17_COMPONENTS', None, (0, -1), 'GRP'],)),
'RAS_O17_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['RAS_O17_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'RAS_O17_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RAS_O17_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RAS_O17_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RCI_I05_OBSERVATION': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RCI_I05_RESULTS', None, (0, -1), 'GRP'],)),
'RCI_I05_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RCI_I05_RESULTS': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RCL_I06_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RDE_O11_COMPONENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RDE_O11_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RDE_O11_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RDE_O11_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RDE_O11_TIMING', None, (0, -1), 'GRP'],
['RDE_O11_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RDE_O11_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RDE_O11_OBSERVATION', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],
['BLG', SEGMENTS['BLG'], (0, 1), 'SEG'],
['CTI', SEGMENTS['CTI'], (0, -1), 'SEG'],)),
'RDE_O11_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RDE_O11_COMPONENT', None, (0, -1), 'GRP'],)),
'RDE_O11_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RDE_O11_PATIENT_VISIT', None, (0, 1), 'GRP'],
['RDE_O11_INSURANCE', None, (0, -1), 'GRP'],
['GT1', SEGMENTS['GT1'], (0, 1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'RDE_O11_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RDE_O11_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RDE_O11_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RDR_RDR_DEFINITION': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],
['RDR_RDR_PATIENT', None, (0, 1), 'GRP'],
['RDR_RDR_ORDER', None, (1, -1), 'GRP'],)),
'RDR_RDR_DISPENSE': ('sequence',
(['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RDR_RDR_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RDR_RDR_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RDR_RDR_ENCODING', None, (0, 1), 'GRP'],
['RDR_RDR_DISPENSE', None, (1, -1), 'GRP'],)),
'RDR_RDR_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RDS_O13_COMPONENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RDS_O13_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RDS_O13_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RDS_O13_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RDS_O13_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RDS_O13_TIMING', None, (0, -1), 'GRP'],
['RDS_O13_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RDS_O13_ENCODING', None, (0, 1), 'GRP'],
['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RDS_O13_OBSERVATION', None, (0, -1), 'GRP'],
['FT1', SEGMENTS['FT1'], (0, -1), 'SEG'],)),
'RDS_O13_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RDS_O13_ORDER_DETAIL_SUPPLEMENT', None, (0, 1), 'GRP'],)),
'RDS_O13_ORDER_DETAIL_SUPPLEMENT': ('sequence',
(['NTE', SEGMENTS['NTE'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RDS_O13_COMPONENT', None, (0, -1), 'GRP'],)),
'RDS_O13_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['RDS_O13_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'RDS_O13_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RDS_O13_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RDS_O13_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'REF_I12_AUTCTD_SUPPGRP2': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'REF_I12_AUTHORIZATION_CONTACT': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'REF_I12_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'REF_I12_OBSERVATION': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['REF_I12_RESULTS_NOTES', None, (0, -1), 'GRP'],)),
'REF_I12_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'REF_I12_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['REF_I12_AUTCTD_SUPPGRP2', None, (0, 1), 'GRP'],)),
'REF_I12_PROVIDER_CONTACT': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'REF_I12_RESULTS_NOTES': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RER_RER_DEFINITION': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],
['RER_RER_PATIENT', None, (0, 1), 'GRP'],
['RER_RER_ORDER', None, (1, -1), 'GRP'],)),
'RER_RER_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RER_RER_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RGR_RGR_DEFINITION': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],
['RGR_RGR_PATIENT', None, (0, 1), 'GRP'],
['RGR_RGR_ORDER', None, (1, -1), 'GRP'],)),
'RGR_RGR_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RGR_RGR_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RGR_RGR_ENCODING', None, (0, 1), 'GRP'],
['RXG', SEGMENTS['RXG'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RGR_RGR_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RGV_O15_COMPONENTS': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RGV_O15_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RGV_O15_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RGV_O15_GIVE': ('sequence',
(['RXG', SEGMENTS['RXG'], (1, 1), 'SEG'],
['RGV_O15_TIMING_GIVE', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RGV_O15_OBSERVATION', None, (1, -1), 'GRP'],)),
'RGV_O15_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RGV_O15_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RGV_O15_TIMING', None, (0, -1), 'GRP'],
['RGV_O15_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RGV_O15_ENCODING', None, (0, 1), 'GRP'],
['RGV_O15_GIVE', None, (1, -1), 'GRP'],)),
'RGV_O15_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RGV_O15_ORDER_DETAIL_SUPPLEMENT', None, (0, 1), 'GRP'],)),
'RGV_O15_ORDER_DETAIL_SUPPLEMENT': ('sequence',
(['NTE', SEGMENTS['NTE'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RGV_O15_COMPONENTS', None, (0, -1), 'GRP'],)),
'RGV_O15_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['RGV_O15_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'RGV_O15_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RGV_O15_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RGV_O15_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RGV_O15_TIMING_GIVE': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'ROR_ROR_DEFINITION': ('sequence',
(['QRD', SEGMENTS['QRD'], (1, 1), 'SEG'],
['QRF', SEGMENTS['QRF'], (0, 1), 'SEG'],
['ROR_ROR_PATIENT', None, (0, 1), 'GRP'],
['ROR_ROR_ORDER', None, (1, -1), 'GRP'],)),
'ROR_ROR_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'ROR_ROR_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RPA_I08_AUTHORIZATION_1': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RPA_I08_AUTHORIZATION_2': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RPA_I08_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RPA_I08_OBSERVATION': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RPA_I08_RESULTS', None, (0, -1), 'GRP'],)),
'RPA_I08_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['RPA_I08_AUTHORIZATION_2', None, (0, 1), 'GRP'],)),
'RPA_I08_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RPA_I08_RESULTS': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RPA_I08_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RPI_I01_GUARANTOR_INSURANCE': ('sequence',
(['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['RPI_I01_INSURANCE', None, (1, -1), 'GRP'],)),
'RPI_I01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RPI_I01_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RPI_I04_GUARANTOR_INSURANCE': ('sequence',
(['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['RPI_I04_INSURANCE', None, (1, -1), 'GRP'],)),
'RPI_I04_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RPI_I04_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RPL_I02_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RPR_I03_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RQA_I08_AUTCTD_SUPPGRP2': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RQA_I08_AUTHORIZATION': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RQA_I08_GUARANTOR_INSURANCE': ('sequence',
(['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['RQA_I08_INSURANCE', None, (1, -1), 'GRP'],)),
'RQA_I08_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RQA_I08_OBSERVATION': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RQA_I08_RESULTS', None, (0, -1), 'GRP'],)),
'RQA_I08_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['RQA_I08_AUTCTD_SUPPGRP2', None, (0, 1), 'GRP'],)),
'RQA_I08_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RQA_I08_RESULTS': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RQA_I08_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RQC_I05_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RQI_I01_GUARANTOR_INSURANCE': ('sequence',
(['GT1', SEGMENTS['GT1'], (0, -1), 'SEG'],
['RQI_I01_INSURANCE', None, (1, -1), 'GRP'],)),
'RQI_I01_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'RQI_I01_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RQP_I04_PROVIDER': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RRA_O18_ADMINISTRATION': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, 1), 'SEG'],)),
'RRA_O18_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RRA_O18_TIMING', None, (0, -1), 'GRP'],
['RRA_O18_ADMINISTRATION', None, (0, 1), 'GRP'],)),
'RRA_O18_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RRA_O18_RESPONSE': ('sequence',
(['RRA_O18_PATIENT', None, (0, 1), 'GRP'],
['RRA_O18_ORDER', None, (1, -1), 'GRP'],)),
'RRA_O18_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRD_O14_DISPENSE': ('sequence',
(['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RRD_O14_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RRD_O14_TIMING', None, (0, -1), 'GRP'],
['RRD_O14_DISPENSE', None, (0, 1), 'GRP'],)),
'RRD_O14_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RRD_O14_RESPONSE': ('sequence',
(['RRD_O14_PATIENT', None, (0, 1), 'GRP'],
['RRD_O14_ORDER', None, (1, -1), 'GRP'],)),
'RRD_O14_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRE_O12_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RRE_O12_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RRE_O12_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RRE_O12_TIMING', None, (0, -1), 'GRP'],
['RRE_O12_ENCODING', None, (0, 1), 'GRP'],)),
'RRE_O12_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RRE_O12_RESPONSE': ('sequence',
(['RRE_O12_PATIENT', None, (0, 1), 'GRP'],
['RRE_O12_ORDER', None, (1, -1), 'GRP'],)),
'RRE_O12_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRE_O12_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRG_O16_GIVE': ('sequence',
(['RXG', SEGMENTS['RXG'], (1, 1), 'SEG'],
['RRG_O16_TIMING_GIVE', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RRG_O16_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RRG_O16_TIMING', None, (0, -1), 'GRP'],
['RRG_O16_GIVE', None, (0, 1), 'GRP'],)),
'RRG_O16_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RRG_O16_RESPONSE': ('sequence',
(['RRG_O16_PATIENT', None, (0, 1), 'GRP'],
['RRG_O16_ORDER', None, (1, -1), 'GRP'],)),
'RRG_O16_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRG_O16_TIMING_GIVE': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RRI_I12_AUTCTD_SUPPGRP2': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RRI_I12_AUTHORIZATION_CONTACT': ('sequence',
(['AUT', SEGMENTS['AUT'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],)),
'RRI_I12_OBSERVATION': ('sequence',
(['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RRI_I12_RESULTS_NOTES', None, (0, -1), 'GRP'],)),
'RRI_I12_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RRI_I12_PROCEDURE': ('sequence',
(['PR1', SEGMENTS['PR1'], (1, 1), 'SEG'],
['RRI_I12_AUTCTD_SUPPGRP2', None, (0, 1), 'GRP'],)),
'RRI_I12_PROVIDER_CONTACT': ('sequence',
(['PRD', SEGMENTS['PRD'], (1, 1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, -1), 'SEG'],)),
'RRI_I12_RESULTS_NOTES': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_K11_ROW_DEFINITION': ('sequence',
(['RDF', SEGMENTS['RDF'], (1, 1), 'SEG'],
['RDT', SEGMENTS['RDT'], (0, -1), 'SEG'],)),
'RSP_K21_QUERY_RESPONSE': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['QRI', SEGMENTS['QRI'], (1, 1), 'SEG'],)),
'RSP_K23_QUERY_RESPONSE': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],)),
'RSP_K25_STAFF': ('sequence',
(['STF', SEGMENTS['STF'], (1, 1), 'SEG'],
['PRA', SEGMENTS['PRA'], (0, -1), 'SEG'],
['ORG', SEGMENTS['ORG'], (0, -1), 'SEG'],
['AFF', SEGMENTS['AFF'], (0, -1), 'SEG'],
['LAN', SEGMENTS['LAN'], (0, -1), 'SEG'],
['EDU', SEGMENTS['EDU'], (0, -1), 'SEG'],
['CER', SEGMENTS['CER'], (0, -1), 'SEG'],)),
'RSP_K31_COMPONENTS': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_K31_ENCODING': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RSP_K31_TIMING_ENCODED', None, (1, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_K31_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_K31_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RSP_K31_TIMING', None, (0, -1), 'GRP'],
['RSP_K31_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RSP_K31_ENCODING', None, (0, 1), 'GRP'],
['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RSP_K31_OBSERVATION', None, (1, -1), 'GRP'],)),
'RSP_K31_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RSP_K31_COMPONENTS', None, (0, -1), 'GRP'],)),
'RSP_K31_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],
['RSP_K31_PATIENT_VISIT', None, (0, 1), 'GRP'],)),
'RSP_K31_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RSP_K31_RESPONSE': ('sequence',
(['RSP_K31_PATIENT', None, (0, 1), 'GRP'],
['RSP_K31_ORDER', None, (1, -1), 'GRP'],)),
'RSP_K31_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_K31_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Q11_MF_LOC_DEPT': ('sequence',
(['LDP', SEGMENTS['LDP'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LCC', SEGMENTS['LCC'], (0, -1), 'SEG'],)),
'RSP_Q11_QUERY_RESULT_CLUSTER': ('sequence',
(['MFE', SEGMENTS['MFE'], (1, 1), 'SEG'],
['LOC', SEGMENTS['LOC'], (1, 1), 'SEG'],
['LCH', SEGMENTS['LCH'], (0, -1), 'SEG'],
['LRL', SEGMENTS['LRL'], (0, -1), 'SEG'],
['RSP_Q11_MF_LOC_DEPT', None, (1, -1), 'GRP'],)),
'RSP_Z82_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RSP_Z82_TIMING', None, (0, -1), 'GRP'],
['RSP_Z82_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RSP_Z82_ENCODED_ORDER', None, (0, 1), 'GRP'],
['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RSP_Z82_OBSERVATION', None, (1, -1), 'GRP'],)),
'RSP_Z82_ENCODED_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RSP_Z82_TIMING_ENCODED', None, (0, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z82_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z82_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RSP_Z82_TREATMENT', None, (0, 1), 'GRP'],)),
'RSP_Z82_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RSP_Z82_VISIT', None, (0, 1), 'GRP'],)),
'RSP_Z82_QUERY_RESPONSE': ('sequence',
(['RSP_Z82_PATIENT', None, (0, 1), 'GRP'],
['RSP_Z82_COMMON_ORDER', None, (1, -1), 'GRP'],)),
'RSP_Z82_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z82_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z82_TREATMENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z82_VISIT': ('sequence',
(['AL1', SEGMENTS['AL1'], (1, -1), 'SEG'],
['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RSP_Z86_ADMINISTRATION': ('sequence',
(['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z86_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RSP_Z86_TIMING', None, (0, -1), 'GRP'],
['RSP_Z86_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RSP_Z86_ENCODED_ORDER', None, (0, 1), 'GRP'],
['RSP_Z86_DISPENSE', None, (0, 1), 'GRP'],
['RSP_Z86_GIVE', None, (0, 1), 'GRP'],
['RSP_Z86_ADMINISTRATION', None, (0, 1), 'GRP'],
['RSP_Z86_OBSERVATION', None, (1, -1), 'GRP'],)),
'RSP_Z86_DISPENSE': ('sequence',
(['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z86_ENCODED_ORDER': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RSP_Z86_TIMING_ENCODED', None, (0, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z86_GIVE': ('sequence',
(['RXG', SEGMENTS['RXG'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z86_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z86_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z86_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['AL1', SEGMENTS['AL1'], (0, -1), 'SEG'],)),
'RSP_Z86_QUERY_RESPONSE': ('sequence',
(['RSP_Z86_PATIENT', None, (0, 1), 'GRP'],
['RSP_Z86_COMMON_ORDER', None, (1, -1), 'GRP'],)),
'RSP_Z86_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z86_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z88_ALLERGY': ('sequence',
(['AL1', SEGMENTS['AL1'], (1, -1), 'SEG'],
['RSP_Z88_VISIT', None, (0, 1), 'GRP'],)),
'RSP_Z88_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RSP_Z88_TIMING', None, (0, -1), 'GRP'],
['RSP_Z88_ORDER_DETAIL', None, (0, 1), 'GRP'],
['RSP_Z88_ORDER_ENCODED', None, (0, 1), 'GRP'],
['RXD', SEGMENTS['RXD'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],
['RSP_Z88_OBSERVATION', None, (1, -1), 'GRP'],)),
'RSP_Z88_COMPONENT': ('sequence',
(['RXC', SEGMENTS['RXC'], (1, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z88_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z88_ORDER_DETAIL': ('sequence',
(['RXO', SEGMENTS['RXO'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RSP_Z88_COMPONENT', None, (0, 1), 'GRP'],)),
'RSP_Z88_ORDER_ENCODED': ('sequence',
(['RXE', SEGMENTS['RXE'], (1, 1), 'SEG'],
['RSP_Z88_TIMING_ENCODED', None, (0, -1), 'GRP'],
['RXR', SEGMENTS['RXR'], (1, -1), 'SEG'],
['RXC', SEGMENTS['RXC'], (0, -1), 'SEG'],)),
'RSP_Z88_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RSP_Z88_ALLERGY', None, (0, 1), 'GRP'],)),
'RSP_Z88_QUERY_RESPONSE': ('sequence',
(['RSP_Z88_PATIENT', None, (0, 1), 'GRP'],
['RSP_Z88_COMMON_ORDER', None, (1, -1), 'GRP'],)),
'RSP_Z88_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z88_TIMING_ENCODED': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z88_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RSP_Z90_COMMON_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['RSP_Z90_TIMING', None, (0, -1), 'GRP'],
['OBR', SEGMENTS['OBR'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['CTD', SEGMENTS['CTD'], (0, 1), 'SEG'],
['RSP_Z90_OBSERVATION', None, (1, -1), 'GRP'],)),
'RSP_Z90_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'RSP_Z90_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['RSP_Z90_VISIT', None, (0, 1), 'GRP'],)),
'RSP_Z90_QUERY_RESPONSE': ('sequence',
(['RSP_Z90_PATIENT', None, (0, 1), 'GRP'],
['RSP_Z90_COMMON_ORDER', None, (1, -1), 'GRP'],
['RSP_Z90_SPECIMEN', None, (0, -1), 'GRP'],)),
'RSP_Z90_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'RSP_Z90_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'RSP_Z90_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'RTB_K13_ROW_DEFINITION': ('sequence',
(['RDF', SEGMENTS['RDF'], (1, 1), 'SEG'],
['RDT', SEGMENTS['RDT'], (0, -1), 'SEG'],)),
'RTB_KNN': ('sequence',
(['MSH', SEGMENTS['MSH'], (1, 1), 'SEG'],
['SFT', SEGMENTS['SFT'], (0, -1), 'SEG'],
['MSA', SEGMENTS['MSA'], (1, 1), 'SEG'],
['ERR', SEGMENTS['ERR'], (0, 1), 'SEG'],
['QAK', SEGMENTS['QAK'], (1, 1), 'SEG'],
['QPD', SEGMENTS['QPD'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],
['ANYHL7SEGMENT', SEGMENTS['ANYHL7SEGMENT'], (1, 1), 'SEG'],
['DSC', SEGMENTS['DSC'], (0, 1), 'SEG'],)),
'RTB_Z74_ROW_DEFINITION': ('sequence',
(['RDF', SEGMENTS['RDF'], (1, 1), 'SEG'],
['RDT', SEGMENTS['RDT'], (0, -1), 'SEG'],)),
'SIU_S12_GENERAL_RESOURCE': ('sequence',
(['AIG', SEGMENTS['AIG'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SIU_S12_LOCATION_RESOURCE': ('sequence',
(['AIL', SEGMENTS['AIL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SIU_S12_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PD1', SEGMENTS['PD1'], (0, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],)),
'SIU_S12_PERSONNEL_RESOURCE': ('sequence',
(['AIP', SEGMENTS['AIP'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SIU_S12_RESOURCES': ('sequence',
(['RGS', SEGMENTS['RGS'], (1, 1), 'SEG'],
['SIU_S12_SERVICE', None, (0, -1), 'GRP'],
['SIU_S12_GENERAL_RESOURCE', None, (0, -1), 'GRP'],
['SIU_S12_LOCATION_RESOURCE', None, (0, -1), 'GRP'],
['SIU_S12_PERSONNEL_RESOURCE', None, (0, -1), 'GRP'],)),
'SIU_S12_SERVICE': ('sequence',
(['AIS', SEGMENTS['AIS'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SQM_S25_GENERAL_RESOURCE': ('sequence',
(['AIG', SEGMENTS['AIG'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],)),
'SQM_S25_LOCATION_RESOURCE': ('sequence',
(['AIL', SEGMENTS['AIL'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],)),
'SQM_S25_PERSONNEL_RESOURCE': ('sequence',
(['AIP', SEGMENTS['AIP'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],)),
'SQM_S25_REQUEST': ('sequence',
(['ARQ', SEGMENTS['ARQ'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],
['PID', SEGMENTS['PID'], (0, 1), 'SEG'],
['SQM_S25_RESOURCES', None, (1, -1), 'GRP'],)),
'SQM_S25_RESOURCES': ('sequence',
(['RGS', SEGMENTS['RGS'], (1, 1), 'SEG'],
['SQM_S25_SERVICE', None, (0, -1), 'GRP'],
['SQM_S25_GENERAL_RESOURCE', None, (0, -1), 'GRP'],
['SQM_S25_PERSONNEL_RESOURCE', None, (0, -1), 'GRP'],
['SQM_S25_LOCATION_RESOURCE', None, (0, -1), 'GRP'],)),
'SQM_S25_SERVICE': ('sequence',
(['AIS', SEGMENTS['AIS'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],)),
'SQR_S25_GENERAL_RESOURCE': ('sequence',
(['AIG', SEGMENTS['AIG'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SQR_S25_LOCATION_RESOURCE': ('sequence',
(['AIL', SEGMENTS['AIL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SQR_S25_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, 1), 'SEG'],)),
'SQR_S25_PERSONNEL_RESOURCE': ('sequence',
(['AIP', SEGMENTS['AIP'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SQR_S25_RESOURCES': ('sequence',
(['RGS', SEGMENTS['RGS'], (1, 1), 'SEG'],
['SQR_S25_SERVICE', None, (0, -1), 'GRP'],
['SQR_S25_GENERAL_RESOURCE', None, (0, -1), 'GRP'],
['SQR_S25_PERSONNEL_RESOURCE', None, (0, -1), 'GRP'],
['SQR_S25_LOCATION_RESOURCE', None, (0, -1), 'GRP'],)),
'SQR_S25_SCHEDULE': ('sequence',
(['SCH', SEGMENTS['SCH'], (1, 1), 'SEG'],
['TQ1', SEGMENTS['TQ1'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['SQR_S25_PATIENT', None, (0, 1), 'GRP'],
['SQR_S25_RESOURCES', None, (1, -1), 'GRP'],)),
'SQR_S25_SERVICE': ('sequence',
(['AIS', SEGMENTS['AIS'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRM_S01_GENERAL_RESOURCE': ('sequence',
(['AIG', SEGMENTS['AIG'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRM_S01_LOCATION_RESOURCE': ('sequence',
(['AIL', SEGMENTS['AIL'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRM_S01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],)),
'SRM_S01_PERSONNEL_RESOURCE': ('sequence',
(['AIP', SEGMENTS['AIP'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRM_S01_RESOURCES': ('sequence',
(['RGS', SEGMENTS['RGS'], (1, 1), 'SEG'],
['SRM_S01_SERVICE', None, (0, -1), 'GRP'],
['SRM_S01_GENERAL_RESOURCE', None, (0, -1), 'GRP'],
['SRM_S01_LOCATION_RESOURCE', None, (0, -1), 'GRP'],
['SRM_S01_PERSONNEL_RESOURCE', None, (0, -1), 'GRP'],)),
'SRM_S01_SERVICE': ('sequence',
(['AIS', SEGMENTS['AIS'], (1, 1), 'SEG'],
['APR', SEGMENTS['APR'], (0, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRR_S01_GENERAL_RESOURCE': ('sequence',
(['AIG', SEGMENTS['AIG'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRR_S01_LOCATION_RESOURCE': ('sequence',
(['AIL', SEGMENTS['AIL'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRR_S01_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['PV1', SEGMENTS['PV1'], (0, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],
['DG1', SEGMENTS['DG1'], (0, -1), 'SEG'],)),
'SRR_S01_PERSONNEL_RESOURCE': ('sequence',
(['AIP', SEGMENTS['AIP'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SRR_S01_RESOURCES': ('sequence',
(['RGS', SEGMENTS['RGS'], (1, 1), 'SEG'],
['SRR_S01_SERVICE', None, (0, -1), 'GRP'],
['SRR_S01_GENERAL_RESOURCE', None, (0, -1), 'GRP'],
['SRR_S01_LOCATION_RESOURCE', None, (0, -1), 'GRP'],
['SRR_S01_PERSONNEL_RESOURCE', None, (0, -1), 'GRP'],)),
'SRR_S01_SCHEDULE': ('sequence',
(['SCH', SEGMENTS['SCH'], (1, 1), 'SEG'],
['TQ1', SEGMENTS['TQ1'], (0, -1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],
['SRR_S01_PATIENT', None, (0, -1), 'GRP'],
['SRR_S01_RESOURCES', None, (1, -1), 'GRP'],)),
'SRR_S01_SERVICE': ('sequence',
(['AIS', SEGMENTS['AIS'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'SSR_U04_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['SPM', SEGMENTS['SPM'], (0, -1), 'SEG'],)),
'SSU_U03_SPECIMEN': ('sequence',
(['SPM', SEGMENTS['SPM'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],)),
'SSU_U03_SPECIMEN_CONTAINER': ('sequence',
(['SAC', SEGMENTS['SAC'], (1, 1), 'SEG'],
['OBX', SEGMENTS['OBX'], (0, -1), 'SEG'],
['SSU_U03_SPECIMEN', None, (0, -1), 'GRP'],)),
'SUR_P09_FACILITY': ('sequence',
(['FAC', SEGMENTS['FAC'], (1, 1), 'SEG'],
['SUR_P09_PRODUCT', None, (1, -1), 'GRP'],
['PSH', SEGMENTS['PSH'], (1, 1), 'SEG'],
['SUR_P09_FACILITY_DETAIL', None, (1, -1), 'GRP'],
)),
'SUR_P09_FACILITY_DETAIL': ('sequence',
(['FAC', SEGMENTS['FAC'], (1, 1), 'SEG'],
['PDC', SEGMENTS['PDC'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (1, 1), 'SEG'],)),
'SUR_P09_PRODUCT': ('sequence',
(['PSH', SEGMENTS['PSH'], (1, 1), 'SEG'],
['PDC', SEGMENTS['PDC'], (1, 1), 'SEG'],)),
'TCU_U10_TEST_CONFIGURATION': ('sequence',
(['SPM', SEGMENTS['SPM'], (0, 1), 'SEG'],
['TCC', SEGMENTS['TCC'], (1, -1), 'SEG'],)),
'VXR_V03_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'VXR_V03_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'VXR_V03_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['VXR_V03_TIMING', None, (0, -1), 'GRP'],
['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],
['VXR_V03_OBSERVATION', None, (0, -1), 'GRP'],)),
'VXR_V03_PATIENT_VISIT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'VXR_V03_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'VXU_V04_INSURANCE': ('sequence',
(['IN1', SEGMENTS['IN1'], (1, 1), 'SEG'],
['IN2', SEGMENTS['IN2'], (0, 1), 'SEG'],
['IN3', SEGMENTS['IN3'], (0, 1), 'SEG'],)),
'VXU_V04_OBSERVATION': ('sequence',
(['OBX', SEGMENTS['OBX'], (1, 1), 'SEG'],
['NTE', SEGMENTS['NTE'], (0, -1), 'SEG'],)),
'VXU_V04_ORDER': ('sequence',
(['ORC', SEGMENTS['ORC'], (1, 1), 'SEG'],
['VXU_V04_TIMING', None, (0, -1), 'GRP'],
['RXA', SEGMENTS['RXA'], (1, 1), 'SEG'],
['RXR', SEGMENTS['RXR'], (0, 1), 'SEG'],
['VXU_V04_OBSERVATION', None, (0, -1), 'GRP'],)),
'VXU_V04_PATIENT': ('sequence',
(['PV1', SEGMENTS['PV1'], (1, 1), 'SEG'],
['PV2', SEGMENTS['PV2'], (0, 1), 'SEG'],)),
'VXU_V04_TIMING': ('sequence',
(['TQ1', SEGMENTS['TQ1'], (1, 1), 'SEG'],
['TQ2', SEGMENTS['TQ2'], (0, -1), 'SEG'],)),
'VXX_V02_PATIENT': ('sequence',
(['PID', SEGMENTS['PID'], (1, 1), 'SEG'],
['NK1', SEGMENTS['NK1'], (0, -1), 'SEG'],)),
}
for k, v in iteritems(GROUPS):
for item in v[1]:
if item[3] == 'GRP':
item[1] = GROUPS[item[0]]
|
state_of_sparsity/sparse_transformer/decoder.py | deepneuralmachine/google-research | 23,901 | 11066038 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode the test dataset with a trained model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import app
from absl import flags
from absl import logging
from tensor2tensor.bin import t2t_decoder
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib
from state_of_sparsity.sparse_transformer import common_flags
from state_of_sparsity.sparse_transformer.models import sparse_transformer # pylint: disable=unused-import
FLAGS = flags.FLAGS
def create_hparams(argv):
t2t_trainer.set_hparams_from_args(argv[1:])
return trainer_lib.create_hparams(
FLAGS.hparams_set,
FLAGS.hparams,
data_dir=os.path.expanduser(FLAGS.data_dir),
problem_name=FLAGS.problem)
def main(argv):
# HACK: redirect the create_hparams function to setup the hparams
# using the passed in command-line args
argv = common_flags.update_argv(argv)
t2t_decoder.create_hparams = functools.partial(create_hparams, argv)
t2t_decoder.main(None)
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
app.run(main)
|
inside/DB/Table_Class/__init__.py | kangzai228/learning-power | 318 | 11066048 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author : lisztomania
# @Date : 2021/1/15
# @Software : Pycharm
# @Version : Python 3.8.5
# @File : __init__.py.py
# @Function : 数据对象
|
powerfulseal/cli/__main__.py | fahedouch/powerfulseal | 1,362 | 11066065 | <gh_stars>1000+
# Copyright 2017 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import yaml
from configargparse import ArgumentParser, YAMLConfigFileParser
import logging
import coloredlogs
import textwrap
import sys
import os
from powerfulseal import makeLogger
import powerfulseal.version
from powerfulseal.k8s.metrics_server_client import MetricsServerClient
from prometheus_client import start_http_server
from powerfulseal.metriccollectors import StdoutCollector, PrometheusCollector, DatadogCollector
from powerfulseal.policy.label_runner import LabelRunner
from powerfulseal.web.server import start_server, ServerStateLogHandler
from ..node import NodeInventory
from ..node.inventory import read_inventory_file_to_dict
from ..clouddrivers import OpenStackDriver, AWSDriver, NoCloudDriver, AzureDriver, GCPDriver
from ..execute import SSHExecutor, KubernetesExecutor
from ..k8s import K8sClient, K8sInventory
from .pscmd import PSCmd
from ..policy import PolicyRunner
KUBECONFIG_DEFAULT_PATH = "~/.kube/config"
def parse_kubeconfig(args):
"""
if explicitly set, use the --kubeconfig value
otherwise, check if KUBECONFIG is set
if not, check if there is `~/.kube/config` available
else try to build in-cluster config
"""
logger = makeLogger(__name__)
kube_config = None
expanded_home_kube_config_path = os.path.expanduser(KUBECONFIG_DEFAULT_PATH)
if args.kubeconfig:
kube_config = args.kubeconfig
logger.info("Creating kubernetes client with config %s from --kubeconfig flag", kube_config)
elif os.environ.get("KUBECONFIG"):
kube_config = os.path.expanduser(os.environ.get("KUBECONFIG"))
logger.info("Creating kubernetes client with config %s from KUBECONFIG env var", kube_config)
elif os.path.exists(expanded_home_kube_config_path):
kube_config = expanded_home_kube_config_path
logger.info("Creating kubernetes client with config %s (path found for backwards compatibility)", kube_config)
else:
logger.info("Creating kubernetes client with in-cluster config")
return kube_config
def add_kubernetes_options(parser):
# Kubernetes
args_kubernetes = parser.add_argument_group('Kubernetes settings')
args_kubernetes.add_argument(
'--kubeconfig',
help='Location of kube-config file',
type=os.path.expanduser
)
args_kubernetes.add_argument(
'--use-pod-delete-instead-of-ssh-kill',
help='DEPRECATED! If set, will not require SSH (will delete pods instead) - DEPRECATED - now triggers --mode=kubernetes',
default=False,
action='store_true',
)
args_kubernetes.add_argument(
'--execution-mode',
help=(
'PowerfulSeal supports two ways of injecting failure: '
'1) through SSH and 2) by scheduling containers in Kubernetes. '
'Use of SSH leverages Docker directly and removes Kubernetes from the equation. It\'s typically faster too. '
'But it requires SSH access to all nodes in the cluster. '
'Alternatively, we can rely on Kubernetes to schedule our chaos pods. Slower, less reliable, but requires no special setup. '
'The default is now to use Kubernetes'
),
default="kubernetes",
choices=["kubernetes", "ssh"]
)
def add_ssh_options(parser):
# SSH
args_ssh = parser.add_argument_group('SSH settings')
args_ssh.add_argument(
'--remote-user',
default=os.environ.get("PS_REMOTE_USER", "cloud-user"),
help="the of the user for the ssh connections",
)
args_ssh.add_argument(
'--ssh-allow-missing-host-keys',
default=False,
action='store_true',
help='Allow connection to hosts not present in known_hosts',
)
args_ssh.add_argument(
'--override-ssh-host',
help=(
'If you\'d like to execute all commands on a different host '
'(for example for minikube) you can override it here'
)
)
args_ssh.add_argument(
'--use-private-ip',
default=False,
action='store_true',
help='Use the private IP of each node (vs public IP)',
)
ssh_options = args_ssh.add_mutually_exclusive_group()
ssh_options.add_argument(
'--ssh-path-to-private-key',
default=os.environ.get("PS_PRIVATE_KEY"),
help='Path to ssh private key',
)
ssh_options.add_argument(
'--ssh-password',
default=os.environ.get("PS_SSH_PASSWORD"),
help='ssh password',
)
args_ssh.add_argument(
'--ssh-kill-command',
default=os.environ.get("PS_SSH_KILL_COMMAND", "sudo docker kill -s {signal} {container_id}"),
help='The command to execute on remote host to kill the {container_id}',
)
def add_inventory_options(parser):
# Inventory
args = parser.add_argument_group('Inventory settings')
inventory_options = args.add_mutually_exclusive_group(required=False)
inventory_options.add_argument('-i', '--inventory-file',
default=os.environ.get("INVENTORY_FILE"),
help=('the inventory file (in ini format) of groups '
'of hosts to work with')
)
inventory_options.add_argument('--inventory-kubernetes',
help='reads all kubernetes cluster nodes as inventory',
action='store_true',
)
def add_cloud_options(parser):
# Cloud Driver
args = parser.add_argument_group('Cloud settings')
cloud_options = args.add_mutually_exclusive_group()
cloud_options.add_argument('--openstack',
default=os.environ.get("OPENSTACK_CLOUD"),
action='store_true',
help="use OpenStack cloud provider",
)
cloud_options.add_argument('--aws',
default=os.environ.get("AWS_CLOUD"),
action='store_true',
help="use AWS cloud provider",
)
cloud_options.add_argument('--azure',
default=os.environ.get("AZURE_CLOUD"),
action='store_true',
help="use Azure cloud provider",
)
cloud_options.add_argument('--gcp',
default=os.environ.get("GCP_CLOUD"),
action='store_true',
help="use GCP cloud provider",
)
cloud_options.add_argument('--no-cloud',
default=os.environ.get("NO_CLOUD"),
action='store_true',
help="don't use cloud provider",
)
# other options
args.add_argument('--openstack-cloud-name',
default=os.environ.get("OPENSTACK_CLOUD_NAME"),
help="optional name of the open stack cloud from your config file to use",
)
args.add_argument('--azure-resource-group-name',
default=os.environ.get("AZURE_RESORUCE_GROUP_NAME"),
help="optional name of the Azure vm cluster resource group. Used to determine azure-node-resource-group-name.",
)
args.add_argument('--azure-node-resource-group-name',
default=os.environ.get("AZURE_NODE_RESORUCE_GROUP_NAME"),
help="name of the Azure vm cluster node resource group",
)
args.add_argument('--gcp-config-file',
default=os.environ.get("GCP_CONFIG_FILE"),
help="name of the gcloud config file (in json) to use instead of the default one",
)
def add_namespace_options(parser):
args = parser.add_argument_group('Kubernetes options')
args.add_argument('--kubernetes-namespace',
default='default',
help='Namespace to use for label mode '
'or a comma-separated list of namespaces'
'(set to blank for all namespaces)'
)
def add_policy_options(parser):
# Policy
args = parser.add_argument_group('Policy settings')
args.add_argument('--policy-file',
default=os.environ.get("POLICY_FILE"),
help='the policy file to run',
required=False
)
def add_run_options(parser):
# policy settings
run_args = parser.add_argument_group(
title='Policy settings'
)
run_args.add_argument('--min-seconds-between-runs',
help='Minimum number of seconds between runs',
default=0,
type=int
)
run_args.add_argument('--max-seconds-between-runs',
help='Maximum number of seconds between runs',
default=300,
type=int
)
def add_metrics_options(parser):
# metrics settings
autonomous_args = parser.add_argument_group(
title='Metrics settings'
)
metric_options = autonomous_args.add_mutually_exclusive_group(required=False)
metric_options.add_argument('--stdout-collector',
default=os.environ.get("STDOUT_COLLECTOR"),
action='store_true',
help="print metrics collected to stdout"
)
metric_options.add_argument('--prometheus-collector',
default=os.environ.get("PROMETHEUS_COLLECTOR"),
action='store_true',
help="store metrics in Prometheus and expose metrics over a HTTP server"
)
metric_options.add_argument('--datadog-collector',
default=os.environ.get("DATADOG_COLLECTOR"),
action='store_true',
help="send collected metrics to Datadog using DogStatsD"
)
args_prometheus = parser.add_argument_group('Prometheus settings')
args_prometheus.add_argument(
'--prometheus-host',
default='0.0.0.0',
help=(
'Host to expose Prometheus metrics via the HTTP server when using '
'the --prometheus-collector flag'
),
)
args_prometheus.add_argument(
'--prometheus-port',
default=9000,
help=(
'Port to expose Prometheus metrics via the HTTP server '
'when using the --prometheus-collector flag'
),
type=check_valid_port
)
def add_common_options(parser):
add_kubernetes_options(parser)
add_cloud_options(parser)
add_inventory_options(parser)
add_ssh_options(parser)
def check_valid_port(value):
parsed = int(value)
min_port = 0
max_port = 65535
if parsed < min_port or parsed > max_port:
raise argparse.ArgumentTypeError("%s is an invalid port number" % value)
return parsed
def parse_args(args):
parser = ArgumentParser(
config_file_parser_class=YAMLConfigFileParser,
formatter_class=argparse.RawDescriptionHelpFormatter,
default_config_files=['~/.config/seal', '~/.seal'],
description=textwrap.dedent("""\
PowerfulSeal
The Chaos Engineering tool for Kubernetes
"""),
)
# General settings
parser.add_argument(
'-c', '--config',
is_config_file=True,
env_var="CONFIG",
help='Config file path',
)
parser.add_argument('-v', '--verbose',
action='count',
help='Verbose logging.'
)
parser.add_argument('-s', '--silent',
action='count',
help='Silent logging.'
)
parser.add_argument(
'-V', '--version',
action='version',
version='%(prog)s {version}'.format(version=powerfulseal.version.__version__),
help='Version.',
)
# subparsers
subparsers = parser.add_subparsers(
title='MODES OF OPERATION',
description=(
'Pick one of the following options to start the Seal in the '
'specified mode. Learn more at '
'https://github.com/powerfulseal/powerfulseal#introduction'
),
dest='mode'
)
##########################################################################
# INTERACTIVE MODE
##########################################################################
parser_interactive = subparsers.add_parser('interactive',
help=(
'Starts an interactive CLI, which allows to manually issue '
'commands on pods and nodes and provides a sweet autocomplete. '
'If you\'re reading this for the first time, you should probably '
'start here. '
),
)
add_common_options(parser_interactive)
##########################################################################
# AUTONOMOUS MODE
##########################################################################
parser_autonomous = subparsers.add_parser('autonomous',
help=(
'This is the main mode of operation. It reads the policy file and executes it.'
),
)
add_common_options(parser_autonomous)
add_policy_options(parser_autonomous)
add_metrics_options(parser_autonomous)
# web ui settings
web_args = parser_autonomous.add_argument_group(
title='Web UI settings'
)
web_args.add_argument(
'--headless',
help='Doesn\'t start the UI, just runs the policy',
action='store_true'
)
web_args.add_argument(
'--host',
help='Specify host for the PowerfulSeal web server',
default=os.environ.get('HOST', '0.0.0.0')
)
web_args.add_argument(
'--port',
help='Specify port for the PowerfulSeal web server',
default=int(os.environ.get('PORT', '8000')),
type=check_valid_port
)
web_args.add_argument(
'--accept-proxy-headers',
help='Set this flag for the webserver to accept X-Forwarded-* headers',
action='store_true'
)
##########################################################################
# LABEL MODE
##########################################################################
parser_label = subparsers.add_parser('label',
help=(
'Starts in label mode. '
'It reads Kubernetes pods in a specified namespace, and checks '
' their \'seal/*\' labels to decide which ones to kill.'
'There is no policy needed in this mode. '
'To learn about supported labels, read more at '
'https://github.com/powerfulseal/powerfulseal/ '
),
)
add_common_options(parser_label)
add_namespace_options(parser_label)
add_run_options(parser_label)
add_metrics_options(parser_label)
##########################################################################
# VALIDATE POLICY MODE
##########################################################################
parser_validate_policy = subparsers.add_parser('validate',
help=(
'Validates any file against the policy schema, returns.'
'You can use this to check that your policy is correct, '
'before using it in autonomous mode.'
)
)
add_policy_options(parser_validate_policy)
return parser.parse_args(args=args)
def main(argv):
"""
The main function to invoke the powerfulseal cli
"""
args = parse_args(args=argv)
if args.mode is None:
return parse_args(['--help'])
##########################################################################
# VALIDATE POLICY MODE
##########################################################################
if args.mode == 'validate':
policy = PolicyRunner.load_file(args.policy_file)
if PolicyRunner.is_policy_valid(policy):
return print('OK')
print("Policy not valid. See log output above.")
return sys.exit(1)
##########################################################################
# LOGGING
##########################################################################
# this is to calm down the flask stdout
# calm down the workzeug
logging.getLogger("werkzeug").setLevel(logging.ERROR)
try:
import click
def echo(*args, **kwargs):
pass
click.echo = echo
click.secho = echo
except:
pass
# parse the verbosity flags
if args.silent == 1:
log_level = logging.WARNING
elif args.silent == 2:
log_level = logging.ERROR
elif not args.verbose:
log_level = logging.INFO
else:
log_level = logging.DEBUG
server_log_handler = ServerStateLogHandler()
server_log_handler.setLevel(log_level)
# do a basic config with the server log handler
logging.basicConfig(level=log_level, handlers=[server_log_handler])
# this installs a stdout handler by default to the root
coloredlogs.install(
level=log_level,
fmt='%(asctime)s %(levelname)s %(name)s %(message)s'
)
# the main cli handler
logger = makeLogger(__name__)
logger.setLevel(log_level)
logger.info("verbosity: %s; log level: %s; handler level: %s", args.verbose, logging.getLevelName(logger.getEffectiveLevel()), logging.getLevelName(log_level) )
##########################################################################
# KUBERNETES
##########################################################################
kube_config = parse_kubeconfig(args)
k8s_client = K8sClient(kube_config=kube_config)
operation_mode = args.execution_mode
# backwards compatibility
if args.use_pod_delete_instead_of_ssh_kill:
operation_mode = "kubernetes"
k8s_inventory = K8sInventory(
k8s_client=k8s_client,
)
##########################################################################
# CLOUD DRIVER
##########################################################################
if args.openstack:
logger.info("Building OpenStack driver")
driver = OpenStackDriver(
cloud=args.openstack_cloud_name,
)
elif args.aws:
logger.info("Building AWS driver")
driver = AWSDriver()
elif args.azure:
logger.info("Building Azure driver")
driver = AzureDriver(
cluster_rg_name=args.azure_resource_group_name,
cluster_node_rg_name=args.azure_node_resource_group_name,
)
elif args.gcp:
logger.info("Building GCP driver")
driver = GCPDriver(config=args.gcp_config_file)
else:
logger.info("No cloud driver - some functionality disabled")
driver = NoCloudDriver()
##########################################################################
# INVENTORY
##########################################################################
if args.inventory_file:
logger.info("Reading inventory from %s", args.inventory_file)
groups_to_restrict_to = read_inventory_file_to_dict(
args.inventory_file
)
else:
logger.debug("Attempting to read the inventory from kubernetes")
groups_to_restrict_to = k8s_client.get_nodes_groups()
logger.debug("Restricting inventory to %s" % groups_to_restrict_to)
inventory = NodeInventory(
driver=driver,
restrict_to_groups=groups_to_restrict_to,
)
inventory.sync()
##########################################################################
# SSH EXECUTOR
##########################################################################
if operation_mode == "kubernetes":
executor = KubernetesExecutor(
k8s_client=k8s_client,
)
else:
if args.use_private_ip:
logger.info("Using each node's private IP address")
if args.override_ssh_host:
logger.info("Using each overriten host: %s", args.override_ssh_host)
executor = SSHExecutor(
user=args.remote_user,
ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys,
ssh_path_to_private_key=args.ssh_path_to_private_key,
override_host=args.override_ssh_host,
ssh_password=args.ssh_password,
use_private_ip=args.use_private_ip,
ssh_kill_command=args.ssh_kill_command,
)
##########################################################################
# INTERACTIVE MODE
##########################################################################
if args.mode == 'interactive':
# create a command parser
cmd = PSCmd(
inventory=inventory,
driver=driver,
executor=executor,
k8s_inventory=k8s_inventory,
)
logger.info("STARTING INTERACTIVE MODE")
while True:
try:
cmd.cmdloop()
except GeneratorExit:
print("Exiting")
sys.exit(0)
except KeyboardInterrupt:
print()
print("Ctrl-c again to quit")
try:
input()
except KeyboardInterrupt:
sys.exit(0)
return
##########################################################################
# METRICS
##########################################################################
metric_collector = StdoutCollector()
if args.prometheus_collector:
flask_debug = os.environ.get("FLASK_DEBUG")
flask_env = os.environ.get("FLASK_ENVIROMENT")
if flask_debug is not None or (flask_env is not None and flask_env != "production"):
logger.error("PROMETHEUS METRICS NOT SUPPORTED WHEN USING FLASK RELOAD. NOT STARTING THE SERVER")
else:
logger.info("Starting prometheus metrics server (%s:%s)", args.prometheus_host, args.prometheus_port)
start_http_server(args.prometheus_port, args.prometheus_host)
metric_collector = PrometheusCollector()
elif args.datadog_collector:
logger.info("Starting datadog collector")
metric_collector = DatadogCollector()
else:
logger.info("Using stdout metrics collector")
##########################################################################
# AUTONOMOUS MODE
##########################################################################
if args.mode == 'autonomous':
runner = PolicyRunner(args.policy_file, k8s_client, logger)
# run the metrics server if requested
if not args.headless:
# start the server
logger.info("Starting the UI server (%s:%s)", args.host, args.port)
start_server(
host=args.host,
port=args.port,
read_policy_fn=runner.read_policy,
accept_proxy_headers=args.accept_proxy_headers,
logger=server_log_handler,
)
else:
logger.info("NOT starting the UI server")
logger.info("STARTING AUTONOMOUS MODE")
success = runner.run(
inventory,
k8s_inventory,
driver,
executor,
metric_collector=metric_collector
)
if not success:
logger.error("Policy runner finishes with an error")
return sys.exit(1)
return sys.exit(0)
##########################################################################
# LABEL MODE
##########################################################################
elif args.mode == 'label':
label_runner = LabelRunner(
inventory,
k8s_inventory,
driver,
executor,
min_seconds_between_runs=args.min_seconds_between_runs,
max_seconds_between_runs=args.max_seconds_between_runs,
namespace=args.kubernetes_namespace,
metric_collector=metric_collector,
)
logger.info("STARTING LABEL MODE")
label_runner.run()
def start():
main(sys.argv[1:])
if __name__ == '__main__':
start()
|
jarviscli/tests/test_parser.py | baptoutiego/Jarvis | 2,605 | 11066068 | <gh_stars>1000+
import unittest
from Jarvis import Jarvis
class ParserTest(unittest.TestCase):
def setUp(self):
self.jarvis = Jarvis()
def test_chuck(self):
user_input = "Jarvis, I want to hear a joke about <NAME>, can you help me?"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual("joke", parsed_input[0])
def test_weather(self):
user_input = "Mmm... I want to go for a walk. What's the weather like today?"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual("weather", parsed_input[0])
def test_goodbye(self):
user_input = "Thanks for your hard work Jarvis, goodbye!"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual("goodbye", parsed_input[0])
def test_check_ram(self):
user_input = "It would be cool if you could check my computers ram"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual(["check"], parsed_input[0:1])
def test_say(self):
user_input = "Can you say I'm a robot"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual(["say", "i'm", "a", "robot"], parsed_input[0:])
def test_near(self):
user_input = "charities near Valencia"
parsed_input = self.jarvis.parse_input(user_input).split()
self.assertEqual(
["near", "charities", "|", "valencia"], parsed_input[0:])
if __name__ == '__main__':
unittest.main()
|
alephnull/examples/test_algo.py | flatM/AlephNull | 234 | 11066073 | <filename>alephnull/examples/test_algo.py
#!/usr/bin/env python
#
# Copyright 2013 <NAME> Wealth Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import string
import numpy as np
import pandas as pd
from pandas import DataFrame
from alephnull.algorithm import TradingAlgorithm
from alephnull.sources.futures_data_frame_source import FuturesDataFrameSource
from alephnull.roll_method import roll
source = DataFrame(np.random.uniform(100, 200, [60, 30]))
cols = ['price', 'volume', 'open_interest']
scale = (len(source.columns) / len(cols))
source.columns = [scale * cols]
sym = lambda x: np.random.choice([abc for abc in x],
np.random.choice([2, 3]))
month = lambda x: np.random.choice([abc for abc in x],
np.random.choice([1]))
contracts = np.ravel([[(''.join(month(string.letters[:26])) +
str(np.random.choice([14, 15, 16])))] * len(cols)
for x in xrange(len(source.columns) / len(cols) / 2)])
level_1 = len(source.columns) / len(contracts) * list(contracts)
numsyms = len(source.columns) / (len(set(level_1)) * len(cols))
underlyings = [''.join(sym(string.letters[:26])) for x in xrange(numsyms)]
level_0 = np.ravel([[sym] * len(set(level_1)) * len(cols) for sym in underlyings])
source.columns = pd.MultiIndex.from_tuples(zip(level_0, level_1, source.columns))
source.index = pd.date_range(start=dt.datetime.utcnow() - dt.timedelta(days=len(source.index) - 1),
end=dt.datetime.utcnow(), freq='D')
futdata = FuturesDataFrameSource(source.tz_localize('UTC'))
class FrontTrader(TradingAlgorithm):
@roll(lambda x: x[x['open_interest'] == x['open_interest'].max()])
def handle_data(self, data):
for sym in data.keys():
self.order((sym, data[sym]['contract']), 2)
return data
bot = FrontTrader()
stats = bot.run(futdata)
|
mmdet3d/models/roi_heads/mask_heads/__init__.py | Guangyun-Xu/mmdetection3d | 2,216 | 11066084 | <filename>mmdet3d/models/roi_heads/mask_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
from .pointwise_semantic_head import PointwiseSemanticHead
from .primitive_head import PrimitiveHead
__all__ = ['PointwiseSemanticHead', 'PrimitiveHead']
|
jina/proto/__init__.py | Rohitpandit021/jina | 15,179 | 11066096 | """
The :mod:`jina.proto` defines the protobuf used in jina. It is the core message protocol used in communicating between :class:`jina.peapods.base.BasePod`. It also defines the interface of a gRPC service.
"""
|
data_preparation/metadata_completion/text_cleaner.py | dweekly/libri-light | 246 | 11066099 | <gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from text_retrieval.guttenberg import is_guttenberg_url
from .utilities import get_txt_name
import os
import json
import progressbar
import sys
sys.path.append('..')
def loadData(pathFile):
with open(pathFile, 'r') as file:
data = file.readlines()
indexStartProject = -1
indexProducedBy = -1
indexEndProject = -1
for index, line in enumerate(data):
if indexStartProject < 0:
value = line.replace(' ', '').find("***START")
if value >= 0:
indexStartProject = index
elif line.find("CONTENTS") >= 0:
indexStartProject = index
else:
continue
value = line.replace(' ', '').find("***END")
if value >= 0:
indexEndProject = index
break
if indexProducedBy < 0:
value = line.find("Produced by")
if value >= 0:
indexProducedBy = index
if indexStartProject < 0:
return None
if indexEndProject < 0:
indexEndProject = len(data)
startIndex = indexProducedBy + 1 if indexProducedBy > 0 \
else indexStartProject + 1
while startIndex < len(data) and data[startIndex] == '\n':
startIndex += 1
return ''.join(data[startIndex:indexEndProject])
def find404Error(pathFile):
with open(pathFile, 'r') as file:
data = file.readlines()
return len(data) == 1 and \
data[0] == "<h1>404 Not Found</h1><p>File not found.</p>"
def clean_all_text_data(metadataList, pathInDir, pathOutDir):
pathInDir = os.path.abspath(pathInDir)
pathOutDir = os.path.abspath(pathOutDir)
if pathInDir == pathOutDir:
raise ValueError("Can't save the data in the same directory \
as the originals")
bar = progressbar.ProgressBar(maxval=len(metadataList))
bar.start()
nCleaned = 0
nMissing = 0
nNotWorking = 0
emptyTxt = []
out = []
for index, metadataName in enumerate(metadataList):
bar.update(index)
textFileName = get_txt_name(metadataName)
pathInFile = os.path.join(pathInDir, textFileName)
outPathFile = os.path.join(pathOutDir, textFileName)
if not os.path.isfile(pathInFile):
status = "missing"
nMissing += 1
else:
assert(pathInFile != outPathFile)
with open(os.path.join(pathInDir, metadataName), 'rb') as file:
urlSource = json.load(file)["url_text_source"]
if not is_guttenberg_url(urlSource):
os.popen(f'cp {pathInFile} {outPathFile}')
status = "clear"
else:
outData = loadData(pathInFile)
if outData is None:
nNotWorking += 1
if find404Error(pathInFile):
emptyTxt.append(pathInFile)
status = "missing"
else:
status = "noisy"
else:
with open(outPathFile, 'w') as file:
file.write(outData)
status = "clear"
out.append((metadataName, status))
nCleaned += 1
bar.finish()
print(f"Out of {len(metadataList)} items")
print(f"{nCleaned} files were cleaned and saved to {pathOutDir}")
print(f"{nNotWorking} files didn't match the good format among which {len(emptyTxt)} were empty")
print(f"{nMissing} files were missing")
return out
if __name__ == "__main__":
pathDirData = "/checkpoint/mriviere/LibriVox/"
pathOutData = "/checkpoint/mriviere/LibriVox_cleanTxt/"
if not os.path.isdir(pathOutData):
os.mkdir(pathOutData)
clean_all_text_data(pathDirData, pathOutData)
# pathTestFile = "/checkpoint/mriviere/LibriVox/sadhana_realisation_librivox_64kb_mp3_text.txt"
# print(find404Error(pathTestFile))
|
src/capb_parser.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 174 | 11066105 | import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
class CAPB():
def __init__(self, data_dir, set_type, train_val):
# set_name should be in ['blouse' 'dress' 'outwear' 'skirt' 'trousers']
self.data_dir = data_dir
set_df = pd.read_table(data_dir + 'Eval/list_eval_partition.txt', delim_whitespace=True, skiprows=1)
set_df = set_df[set_df['evaluation_status']==train_val]
cate_cloth_df = pd.read_table(data_dir + 'Anno/list_category_cloth.txt', delim_whitespace=True, skiprows=1)
cate_idcs = (cate_cloth_df.index[cate_cloth_df['category_type'] == set_type]+1).tolist()
cate_img_df = pd.read_table(data_dir + 'Anno/list_category_img.txt', delim_whitespace=True, skiprows=1)
cate_img_df = cate_img_df[cate_img_df['category_label'].isin(cate_idcs)]
bbox_df = pd.read_table(data_dir + 'Anno/list_bbox.txt', delim_whitespace=True, skiprows=1)
bbox_df = bbox_df[bbox_df['image_name'].isin(set_df['image_name'])]
self.anno_df = bbox_df[bbox_df['image_name'].isin(cate_img_df['image_name'])]
def size(self):
return len(self.anno_df)
def get_image_path(self, image_index):
row = self.anno_df.iloc[image_index]
image_path = self.data_dir + row['image_name']
return image_path
def get_bbox(self, image_index):
row = self.anno_df.iloc[image_index]
bbox = np.array([[row['x_1'], row['y_1'], row['x_2'], row['y_2']]], dtype=np.float32)
return bbox
if __name__ == '__main__':
db_path = '/home/storage/lsy/fashion/Category_and_Attribution_Prediction_Benchmark/'
for k in [1,2,3]:
for t in ['train', 'val', 'test']:
kpda = CAPB(db_path, k, t)
print('%d '%k + t + ' : %d' % kpda.size())
|
airflow/providers/google/cloud/hooks/cloud_memorystore.py | npodewitz/airflow | 8,092 | 11066125 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Hooks for Cloud Memorystore service.
.. spelling::
DataProtectionMode
FieldMask
pb
memcache
"""
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core import path_template
from google.api_core.exceptions import NotFound
from google.api_core.gapic_v1.method import DEFAULT, _MethodDefault
from google.api_core.retry import Retry
from google.cloud.memcache_v1beta2 import CloudMemcacheClient
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1 import (
CloudRedisClient,
FailoverInstanceRequest,
InputConfig,
Instance,
OutputConfig,
)
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
class CloudMemorystoreHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudRedisClient] = None
def get_conn(self) -> CloudRedisClient:
"""Retrieves client library object that allow access to Cloud Memorystore service."""
if not self._client:
self._client = CloudRedisClient(credentials=self._get_credentials())
return self._client
@staticmethod
def _append_label(instance: Instance, key: str, val: str) -> Instance:
"""
Append labels to provided Instance type
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: Union[Dict, Instance],
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Creates a Redis instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Redis instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Redis [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
parent = f"projects/{project_id}/locations/{location}"
instance_name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
try:
self.log.info("Fetching instance: %s", instance_name)
instance = client.get_instance(
request={'name': instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
request={'parent': parent, 'instance_id': instance_id, 'instance': instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
request={'name': instance_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Deletes a specific Redis instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def export_instance(
self,
location: str,
instance: str,
output_config: Union[Dict, OutputConfig],
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Export Redis instance data into a Redis RDB format file in Cloud Storage.
Redis will continue serving during this operation.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param output_config: Required. Specify data to be exported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.OutputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Exporting Instance: %s", name)
result = client.export_instance(
request={'name': name, 'output_config': output_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance exported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def failover_instance(
self,
location: str,
instance: str,
data_protection_mode: FailoverInstanceRequest.DataProtectionMode,
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Initiates a failover of the primary node to current replica node for a specific STANDARD tier Cloud
Memorystore for Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param data_protection_mode: Optional. Available data protection modes that the user can choose. If
it's unspecified, data protection mode will be LIMITED_DATA_LOSS by default.
.DataProtectionMode
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Failovering Instance: %s", name)
result = client.failover_instance(
request={'name': name, 'data_protection_mode': data_protection_mode},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance failovered: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Gets the details of a specific Redis instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
result = client.get_instance(
request={'name': name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(
self,
location: str,
instance: str,
input_config: Union[Dict, InputConfig],
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Import a Redis RDB snapshot file from Cloud Storage into a Redis instance.
Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation.
When complete, the instance will contain only data from the imported file.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Redis instance in the customer project.
:param input_config: Required. Specify data to be imported.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.InputConfig`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
name = f"projects/{project_id}/locations/{location}/instances/{instance}"
self.log.info("Importing Instance: %s", name)
result = client.import_instance(
request={'name': name, 'input_config': input_config},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance imported: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
page_size: int,
project_id: str = PROVIDE_PROJECT_ID,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Lists all Redis instances owned by a project in either the specified location (region) or all
locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
parent = f"projects/{project_id}/locations/{location}"
result = client.list_instances(
request={'parent': parent, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: Union[Dict, FieldMask],
instance: Union[Dict, Instance],
project_id: str = PROVIDE_PROJECT_ID,
location: Optional[str] = None,
instance_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Updates the metadata and configuration of a specific Redis instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
- ``labels``
- ``memorySizeGb``
- ``redisConfig``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.redis_v1.types.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
if isinstance(instance, dict):
instance = Instance(**instance)
elif not isinstance(instance, Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = f"projects/{project_id}/locations/{location}/instances/{instance_id}"
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
request={'update_mask': update_mask, 'instance': instance},
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance updated: %s", instance.name)
class CloudMemorystoreMemcachedHook(GoogleBaseHook):
"""
Hook for Google Cloud Memorystore for Memcached service APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client: Optional[CloudMemcacheClient] = None
def get_conn(
self,
):
"""Retrieves client library object that allow access to Cloud Memorystore Memcached service."""
if not self._client:
self._client = CloudMemcacheClient(credentials=self._get_credentials())
return self._client
@staticmethod
def _append_label(instance: cloud_memcache.Instance, key: str, val: str) -> cloud_memcache.Instance:
"""
Append labels to provided Instance type
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param instance: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace(".", "-").replace("+", "-")
instance.labels.update({key: val})
return instance
@GoogleBaseHook.fallback_to_default_project_id
def apply_parameters(
self,
node_ids: Sequence[str],
apply_all: bool,
project_id: str,
location: str,
instance_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Will update current set of Parameters to the set of specified nodes of the Memcached Instance.
:param node_ids: Nodes to which we should apply the instance-level parameter group.
:param apply_all: Whether to apply instance-level parameter group to all nodes. If set to true,
will explicitly restrict users from specifying any nodes, and apply parameter group updates
to all nodes within the instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Applying update to instance: %s", instance_id)
result = client.apply_parameters(
name=name,
node_ids=node_ids,
apply_all=apply_all,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance updated: %s", instance_id)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
location: str,
instance_id: str,
instance: Union[Dict, cloud_memcache.Instance],
project_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Creates a Memcached instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Memcached instance in the customer project
with the following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Memcached [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
instance_name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
try:
instance = client.get_instance(
name=instance_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info("Instance exists. Skipping creation.")
return instance
except NotFound:
self.log.info("Instance not exists.")
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
self._append_label(instance, "airflow-version", "v" + version.version)
result = client.create_instance(
parent=parent,
instance_id=instance_id,
resource=instance,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance created.")
return client.get_instance(
name=instance_name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Deletes a specific Memcached instance. Instance stops serving and data is deleted.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
self.log.info("Fetching Instance: %s", name)
instance = client.get_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
if not instance:
return
self.log.info("Deleting Instance: %s", name)
result = client.delete_instance(
name=name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Instance deleted: %s", name)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
location: str,
instance: str,
project_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Gets the details of a specific Memcached instance.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
name = CloudMemcacheClient.instance_path(project_id, location, instance)
result = client.get_instance(name=name, retry=retry, timeout=timeout, metadata=metadata or ())
self.log.info("Fetched Instance: %s", name)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_instances(
self,
location: str,
project_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Lists all Memcached instances owned by a project in either the specified location (region) or all
locations.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
parent = path_template.expand(
"projects/{project}/locations/{location}", project=project_id, location=location
)
result = client.list_instances(
parent=parent,
retry=retry,
timeout=timeout,
metadata=metadata,
)
self.log.info("Fetched instances")
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
update_mask: Union[Dict, FieldMask],
instance: Union[Dict, cloud_memcache.Instance],
project_id: str,
location: Optional[str] = None,
instance_id: Optional[str] = None,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Updates the metadata and configuration of a specific Memcached instance.
:param update_mask: Required. Mask of fields to update. At least one path must be supplied in this
field. The elements of the repeated paths field may only include these fields from ``Instance``:
- ``displayName``
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`)
Union[Dict, google.protobuf.field_mask_pb2.FieldMask]
:param instance: Required. Update description. Only fields specified in ``update_mask`` are updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(instance, dict):
instance = cloud_memcache.Instance(instance)
elif not isinstance(instance, cloud_memcache.Instance):
raise AirflowException("instance is not instance of Instance type or python dict")
if location and instance_id:
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
instance.name = name
self.log.info("Updating instances: %s", instance.name)
result = client.update_instance(
update_mask=update_mask, resource=instance, retry=retry, timeout=timeout, metadata=metadata or ()
)
result.result()
self.log.info("Instance updated: %s", instance.name)
@GoogleBaseHook.fallback_to_default_project_id
def update_parameters(
self,
update_mask: Union[Dict, FieldMask],
parameters: Union[Dict, cloud_memcache.MemcacheParameters],
project_id: str,
location: str,
instance_id: str,
retry: Union[Retry, _MethodDefault] = DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
"""
Updates the defined Memcached Parameters for an existing Instance. This method only stages the
parameters, it must be followed by apply_parameters to apply the parameters to nodes of
the Memcached Instance.
:param update_mask: Required. Mask of fields to update.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
Union[Dict, google.protobuf.field_mask_pb2.FieldMask]
:param parameters: The parameters to apply to the instance.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.MemcacheParameters`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Memcached instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_conn()
metadata = metadata or ()
if isinstance(parameters, dict):
parameters = cloud_memcache.MemcacheParameters(parameters)
elif not isinstance(parameters, cloud_memcache.MemcacheParameters):
raise AirflowException("instance is not instance of MemcacheParameters type or python dict")
name = CloudMemcacheClient.instance_path(project_id, location, instance_id)
self.log.info("Staging update to instance: %s", instance_id)
result = client.update_parameters(
name=name,
update_mask=update_mask,
parameters=parameters,
retry=retry,
timeout=timeout,
metadata=metadata,
)
result.result()
self.log.info("Update staged for instance: %s", instance_id)
|
tests/import/pkg6/x/y.py | sebastien-riou/micropython | 13,648 | 11066130 | print("y")
|
tests/h/tweens_test.py | pombredanne/h | 2,103 | 11066145 | from unittest import mock
from unittest.mock import MagicMock
import pytest
from h_matchers import Any
from h import tweens
from h.util.redirects import Redirect
class TestRedirectTween:
def test_it_loads_redirects(self, patch):
parse_redirects = patch("h.tweens.parse_redirects")
tweens.redirect_tween_factory(handler=None, registry=None)
parse_redirects.assert_called_once_with(
# Check parse_redirects is called with a file like object
Any.object.with_attrs({"readlines": Any.callable()})
)
def test_it_loads_successfully(self):
# Don't mock parse_redirects out to check the file actually parses
tweens.redirect_tween_factory(handler=None, registry=None)
def test_it_does_not_redirect_for_non_redirected_routes(self, pyramid_request):
redirects = [
Redirect(src="/foo", dst="http://bar", internal=False, prefix=False)
]
pyramid_request.path = "/quux"
tween = tweens.redirect_tween_factory(
lambda req: req.response, pyramid_request.registry, redirects
)
response = tween(pyramid_request)
assert response.status_code == 200
def test_it_redirects_for_redirected_routes(self, pyramid_request):
redirects = [
Redirect(src="/foo", dst="http://bar", internal=False, prefix=False)
]
pyramid_request.path = "/foo"
tween = tweens.redirect_tween_factory(
lambda req: req.response, pyramid_request.registry, redirects
)
response = tween(pyramid_request)
assert response.status_code == 301
assert response.location == "http://bar"
class TestSecurityHeaderTween:
def test_it_adds_security_headers_to_the_response(self, pyramid_request):
tween = tweens.security_header_tween_factory(
lambda req: req.response, pyramid_request.registry
)
response = tween(pyramid_request)
assert (
response.headers["Referrer-Policy"]
== "origin-when-cross-origin, strict-origin-when-cross-origin"
)
assert response.headers["X-XSS-Protection"] == "1; mode=block"
class TestCacheHeaderTween:
@pytest.mark.parametrize(
"content_type, expected_cc_header",
[
# It doesn't add any headers for HTML pages.
("text/html", None),
# It adds Cache-Control: no-cache for JSON responses.
("application/json", "no-cache"),
# It doesn't add any headers for responses with no content (eg. 204
# response to a `DELETE` request).
(None, None),
],
)
def test_it_adds_caching_headers_to_the_response(
self, pyramid_request, content_type, expected_cc_header
):
tween = tweens.cache_header_tween_factory(
lambda req: req.response, pyramid_request.registry
)
if content_type is not None:
pyramid_request.response.headers["Content-Type"] = content_type
response = tween(pyramid_request)
assert response.headers.get("Cache-Control") == expected_cc_header
class TestDBRollbackSessionOnExceptionTween:
def test_it_does_nothing_usually(self, handler, pyramid_request):
tween = tweens.rollback_db_session_on_exception_factory(
handler, pyramid_request.registry
)
tween(pyramid_request)
handler.assert_called_once_with(pyramid_request)
pyramid_request.db.rollback.assert_not_called()
def test_it_calls_db_rollback_on_exception(self, handler, pyramid_request):
handler.side_effect = IOError
tween = tweens.rollback_db_session_on_exception_factory(
handler, pyramid_request.registry
)
with pytest.raises(IOError):
tween(pyramid_request)
handler.assert_called_once_with(pyramid_request)
pyramid_request.db.rollback.assert_called_once_with()
@pytest.fixture
def handler(self):
return mock.create_autospec(lambda request: None) # pragma: nocover
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.db = MagicMock(spec_set=["rollback"])
return pyramid_request
|
runtime/server/x86_gpu/client/offline_client.py | pengchengguo/wenet | 1,166 | 11066161 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tritonclient.utils import np_to_triton_dtype
import numpy as np
import soundfile as sf
class SpeechClient(object):
def __init__(self, triton_client, model_name, protocol_client):
self.triton_client = triton_client
self.protocol_client = protocol_client
self.model_name = model_name
def recognize(self, wav_file, idx=0):
waveform, sample_rate = sf.read(wav_file)
samples = np.array([waveform], dtype=np.float32)
lengths = np.array([[len(waveform)]], dtype=np.int32)
sequence_id = 10086 + idx
result = ''
inputs = [
self.protocol_client.InferInput("WAV", samples.shape,
np_to_triton_dtype(samples.dtype)),
self.protocol_client.InferInput("WAV_LENS", lengths.shape,
np_to_triton_dtype(lengths.dtype))
]
inputs[0].set_data_from_numpy(samples)
inputs[1].set_data_from_numpy(lengths)
outputs = [self.protocol_client.InferRequestedOutput("TRANSCRIPTS")]
response = self.triton_client.infer(self.model_name,
inputs,
request_id=str(sequence_id),
outputs=outputs)
result = response.as_numpy("TRANSCRIPTS")[0].decode("utf-8")
return [result]
|
library/connecter/ansible/yaml/data2db.py | GNHJM/lykops | 141 | 11066172 | <gh_stars>100-1000
import os
from library.connecter.ansible.yaml import Yaml_Base
class Data_DB(Yaml_Base):
def router(self, content, name, yaml_tpye='main', file_type='tasks', preserve=True, together=False, describe=''):
'''
检测yaml数据的语法是否正确,如果含有include或/和roles,将把这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if yaml_tpye in ('full_roles' , 'main') :
result = self.main(content, name, preserve=preserve, together=together, describe=describe)
elif yaml_tpye == 'include' :
result = self.include(content, name, file_type=file_type, preserve=preserve, describe=describe)
elif yaml_tpye == 'roles' :
result = self.roles(content, name, preserve=preserve, together=together, describe=describe)
else :
self.logger.error('动作:检测yaml数据的语法是否正确并将把这些存储在后端数据库中,执行结果:失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return result
def main(self, content, name, preserve=True, together=False, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
result = self.yaml_loader(content, data_type='data')
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '转化成yaml数据时失败,' + result[1])
result = self.check_main(yaml_data)
self.logger.error(result)
if result[0] :
(roles_list, includefile_dict) = result[1:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,未通过yaml语法检测,原因:' + result[1])
return (False, '未通过yaml语法检测,' + result[1])
include_content_dict = {}
roles_content_dict = {}
for file in includefile_dict :
result = self.read2db(file, word_field='name')
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + file + '的include查询出错,原因:' + result[1])
return (False, '名为' + file + '的include查询出错,' + result[1])
else :
try :
include_content = result[1]['content']
include_content_dict.update({file:include_content})
except :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + file + '的include查询出错,原因:查询结果不含content字段')
return (False, '名为' + file + '的include查询出错,查询结果不含content字段')
for roles in roles_list :
result = self.read2db(roles, word_field='name')
if result[0] :
try :
content_dict = result[1]['content']
if 'include' in content_dict :
include_content.update(content_dict['include'])
roles_content_dict.update({roles:content_dict['roles']})
except :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,名为' + roles + '的roles查询出错,查询结果不含content字段')
return (False, '名为' + roles + '的roles查询出错,查询结果不含content字段')
else :
return (False, '名为' + roles + '的roles查询出错,' + result[1])
data = {
'main' : content,
'include': include_content_dict,
'roles': roles_content_dict,
}
if preserve :
result = self.write2db(name, data, 'main', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为full_roles或者main的语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1])
return (False, '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml数据名为' + name + '类型为full_roles或者main语法成功')
if together :
return (True, data)
else :
return (True, content)
def include(self, content, name, file_type='main', preserve=True, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
file_type:类型
together:是否返回该main下所有文件内容=
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
result = self.yaml_loader(content, data_type='data')
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '转化成yaml数据时失败,' + result[1])
result = self.check_include(yaml_data, file_type=file_type)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,未通过yaml语法检测,原因:' + result[1])
return (False, '未通过yaml语法检测,' + result[1])
if preserve :
result = self.write2db(name, content, 'include', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为include的语法失败,但无法写入数据库,原因:' + result[1])
return (False, '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml数据名为' + name + '类型为include语法成功')
return (True, content)
def roles(self, content, name, preserve=True, together=False, describe=''):
'''
main文件的语法等是否正确,如果含有include或/和roles,将认为这些存储在后端数据库中
:参数
content:内容
name:名称,yaml文件内容写入数据的名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
content_dict = {}
result = self._isrolesname(name)
if not result :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
return (False, '未通过yaml语法检测,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
if not isinstance(content, dict) :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:参数content必须是字典格式')
self.logger.error('roles名为' + str(name) + '未通过语法检测,原因:参数content必须是字典格式')
return (False, '未通过yaml语法检测,参数content必须是字典格式')
result = self.check_roles(content)
include_content_dict = {}
if result[0] :
includefile_dict = result[1]
for file in includefile_dict:
result = self.read2db(file, word_field='name')
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:' + 'include名为' + file + '的include查询出错,' + result[1])
return (False, '未通过yaml语法检测,名为' + file + '的include查询出错,' + result[1])
else :
try :
include_content = result[1]['content']
include_content_dict.update({file:include_content})
except :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过语法检测,原因:' + '名为' + file + '的include查询出错,查询结果不含content关键字段')
return (False, '未通过yaml语法检测,名为' + file + '的include查询出错,查询结果不含content关键字段')
else :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过yaml语法检测,语法错误,原因:' + result[1])
return (False, '未通过yaml语法检测,语法错误,' + result[1])
if 'templates' in content :
temp_content = content['templates']
if not isinstance(temp_content, dict) :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,未通过yaml语法检测,templates查询错误,查询结果的数据类型不为字典')
return (False, '未通过yaml语法检测,templates查询错误,查询结果的数据类型不为字典')
content_dict['templates'] = {}
for temp_file , tempfile_content in temp_content.items() :
temp_file = os.path.basename(temp_file)
content_dict['templates'][temp_file] = tempfile_content
data = {
'main' : {},
'include': include_content_dict,
'roles': content_dict,
}
if preserve :
result = self.write2db(name, data, 'roles', describe=describe)
if not result[0] :
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法失败,通过yaml语法检测,无法写入数据库,' + result[1])
return (False, '通过yaml语法检测,无法写入数据库,' + result[1])
self.logger.error('检测yaml数据名为' + name + '类型为roles的语法成功')
if together :
return (True, content_dict, include_content)
else :
return (True, {}, {})
|
scripts/examples/OpenMV/25-Machine-Learning/tf_mobilenet_search_just_center.py | jiskra/openmv | 1,761 | 11066188 | # TensorFlow Lite Mobilenet V1 Example
#
# Google's Mobilenet V1 detects 1000 classes of objects
#
# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything
# in the real world. It's just designed to score well on the ImageNet dataset.
# This example just shows off running mobilenet on the OpenMV Cam. However, the
# default model is not really usable for anything. You have to use transfer
# learning to apply the model to a target problem by re-training the model.
#
# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better!
# To get the models please see the CNN Network library in OpenMV IDE under
# Tools -> Machine Vision. The labels are there too.
# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt
# file and your chosen model into the root folder for ths script to work.
#
# In this example we slide the detector window over the image and get a list
# of activations. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.
import sensor, image, time, os, tf
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.
mobilenet_version = "1" # 1
mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25
mobilenet_resolution = "128" # 224, 192, 160, 128
mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution)
labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")]
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
# specified). A classification score output vector will be generated for each location. At each scale the
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
# y_overlap is not -1 the method will search in all vertical positions.
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
# x_overlap is not -1 the method will serach in all horizontal positions.
# default settings just do one detection... change them to search the image...
for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1):
print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
img.draw_rectangle(obj.rect())
# This combines the labels and confidence values into a list of tuples
# and then sorts that list by the confidence values.
sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True)
for i in range(5):
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
print(clock.fps(), "fps")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.