response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Context manager which expects one or more warnings. With no arguments, squelches all SAWarnings emitted via sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise pass string expressions that will match selected warnings via regex; all non-matching warnings are sent through. The expect version **asserts** that the warnings were in fact seen. Note that the test suite sets SAWarning warnings to raise exceptions.
def expect_warnings(*messages, **kw): """Context manager which expects one or more warnings. With no arguments, squelches all SAWarnings emitted via sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise pass string expressions that will match selected warnings via regex; all non-matching warnings are sent through. The expect version **asserts** that the warnings were in fact seen. Note that the test suite sets SAWarning warnings to raise exceptions. """ return _expect_warnings(Warning, messages, **kw)
Decorator form of expect_warnings(). Note that emits_warning does **not** assert that the warnings were in fact seen.
def emits_python_deprecation_warning(*messages): """Decorator form of expect_warnings(). Note that emits_warning does **not** assert that the warnings were in fact seen. """ @decorator def decorate(fn, *args, **kw): with _expect_warnings(DeprecationWarning, assert_=False, *messages): return fn(*args, **kw) return decorate
use a postgresql url with no host so that connections guaranteed to fail
def _no_sql_testing_config(dialect="postgresql", directives=""): """use a postgresql url with no host so that connections guaranteed to fail""" dir_ = os.path.join(_get_staging_directory(), "scripts") return _write_config_file( """ [alembic] script_location = %s sqlalchemy.url = %s:// %s [loggers] keys = root [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % (dir_, dialect, directives) )
Create a multiple head fixture from the three-revs fixture
def multi_heads_fixture(cfg, a, b, c): """Create a multiple head fixture from the three-revs fixture""" # a->b->c # -> d -> e # -> f d = util.rev_id() e = util.rev_id() f = util.rev_id() script = ScriptDirectory.from_config(cfg) script.generate_revision( d, "revision d from b", head=b, splice=True, refresh=True ) write_script( script, d, """\ "Rev D" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 4") def downgrade(): op.execute("DROP STEP 4") """ % (d, b), ) script.generate_revision( e, "revision e from d", head=d, splice=True, refresh=True ) write_script( script, e, """\ "Rev E" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 5") def downgrade(): op.execute("DROP STEP 5") """ % (e, d), ) script.generate_revision( f, "revision f from b", head=b, splice=True, refresh=True ) write_script( script, f, """\ "Rev F" revision = '%s' down_revision = '%s' from alembic import op def upgrade(): op.execute("CREATE STEP 6") def downgrade(): op.execute("DROP STEP 6") """ % (f, b), ) return d, e, f
alembic.ini fixture to work exactly with the 'multidb' template
def _multidb_testing_config(engines): """alembic.ini fixture to work exactly with the 'multidb' template""" dir_ = os.path.join(_get_staging_directory(), "scripts") sqlalchemy_future = "future" in config.db.__class__.__module__ databases = ", ".join(engines.keys()) engines = "\n\n".join( "[%s]\n" "sqlalchemy.url = %s" % (key, value.url) for key, value in engines.items() ) return _write_config_file( """ [alembic] script_location = %s sourceless = false sqlalchemy.future = %s databases = %s %s [loggers] keys = root [handlers] keys = console [logger_root] level = WARN handlers = console qualname = [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatters] keys = generic [formatter_generic] format = %%(levelname)-5.5s [%%(name)s] %%(message)s datefmt = %%H:%%M:%%S """ % (dir_, "true" if sqlalchemy_future else "false", databases, engines) )
A facade around @testing.combinations() oriented towards boolean keyword-based arguments. Basically generates a nice looking identifier based on the keywords and also sets up the argument names. E.g.:: @testing.flag_combinations( dict(lazy=False, passive=False), dict(lazy=True, passive=False), dict(lazy=False, passive=True), dict(lazy=False, passive=True, raiseload=True), ) would result in:: @testing.combinations( ('', False, False, False), ('lazy', True, False, False), ('lazy_passive', True, True, False), ('lazy_passive', True, True, True), id_='iaaa', argnames='lazy,passive,raiseload' )
def flag_combinations(*combinations): """A facade around @testing.combinations() oriented towards boolean keyword-based arguments. Basically generates a nice looking identifier based on the keywords and also sets up the argument names. E.g.:: @testing.flag_combinations( dict(lazy=False, passive=False), dict(lazy=True, passive=False), dict(lazy=False, passive=True), dict(lazy=False, passive=True, raiseload=True), ) would result in:: @testing.combinations( ('', False, False, False), ('lazy', True, False, False), ('lazy_passive', True, True, False), ('lazy_passive', True, True, True), id_='iaaa', argnames='lazy,passive,raiseload' ) """ from sqlalchemy.testing import config keys = set() for d in combinations: keys.update(d) keys = sorted(keys) return config.combinations( *[ ("_".join(k for k in keys if d.get(k, False)),) + tuple(d.get(k, False) for k in keys) for d in combinations ], id_="i" + ("a" * len(keys)), argnames=",".join(keys), )
Given a no-arg lambda and a namespace, return a new lambda that has all the values filled in. This is used so that we can have module-level fixtures that refer to instance-level variables using lambdas.
def resolve_lambda(__fn, **kw): """Given a no-arg lambda and a namespace, return a new lambda that has all the values filled in. This is used so that we can have module-level fixtures that refer to instance-level variables using lambdas. """ pos_args = inspect_getfullargspec(__fn)[0] pass_pos_args = {arg: kw.pop(arg) for arg in pos_args} glb = dict(__fn.__globals__) glb.update(kw) new_fn = types.FunctionType(__fn.__code__, glb) return new_fn(**pass_pos_args)
Provide MetaData for a pytest fixture.
def metadata_fixture(ddl="function"): """Provide MetaData for a pytest fixture.""" from sqlalchemy.testing import config from . import fixture_functions def decorate(fn): def run_ddl(self): from sqlalchemy import schema metadata = self.metadata = schema.MetaData() try: result = fn(self, metadata) metadata.create_all(config.db) # TODO: # somehow get a per-function dml erase fixture here yield result finally: metadata.drop_all(config.db) return fixture_functions.fixture(scope=ddl)(run_ddl) return decorate
Set global warning behavior for the test suite.
def setup_filters(): """Set global warning behavior for the test suite.""" warnings.resetwarnings() warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) warnings.filterwarnings("error", category=sa_exc.SAWarning) # some selected deprecations... warnings.filterwarnings("error", category=DeprecationWarning) if not sqla_14: # 1.3 uses pkg_resources in PluginLoader warnings.filterwarnings( "ignore", "pkg_resources is deprecated as an API", DeprecationWarning, ) try: import pytest except ImportError: pass else: warnings.filterwarnings( "once", category=pytest.PytestDeprecationWarning )
vendored from python 3.7
def formatannotation_fwdref( annotation: Any, base_module: Optional[Any] = None ) -> str: """vendored from python 3.7""" # copied over _formatannotation from sqlalchemy 2.0 if isinstance(annotation, str): return annotation if getattr(annotation, "__module__", None) == "typing": return repr(annotation).replace("typing.", "").replace("~", "") if isinstance(annotation, type): if annotation.__module__ in ("builtins", base_module): return repr(annotation.__qualname__) return annotation.__module__ + "." + annotation.__qualname__ elif isinstance(annotation, typing.TypeVar): return repr(annotation).replace("~", "") return repr(annotation).replace("~", "")
Opens the given file in a text editor. If the environment variable ``EDITOR`` is set, this is taken as preference. Otherwise, a list of commonly installed editors is tried. If no editor matches, an :py:exc:`OSError` is raised. :param filename: The filename to open. Will be passed verbatim to the editor command. :param environ: An optional drop-in replacement for ``os.environ``. Used mainly for testing.
def open_in_editor( filename: str, environ: Optional[Dict[str, str]] = None ) -> None: """ Opens the given file in a text editor. If the environment variable ``EDITOR`` is set, this is taken as preference. Otherwise, a list of commonly installed editors is tried. If no editor matches, an :py:exc:`OSError` is raised. :param filename: The filename to open. Will be passed verbatim to the editor command. :param environ: An optional drop-in replacement for ``os.environ``. Used mainly for testing. """ env = os.environ if environ is None else environ try: editor = _find_editor(env) check_call([editor, filename]) except Exception as exc: raise CommandError("Error executing editor (%s)" % (exc,)) from exc
Interpret a filename as either a filesystem location or as a package resource. Names that are non absolute paths and contain a colon are interpreted as resources and coerced to a file location.
def coerce_resource_to_filename(fname: str) -> str: """Interpret a filename as either a filesystem location or as a package resource. Names that are non absolute paths and contain a colon are interpreted as resources and coerced to a file location. """ if not os.path.isabs(fname) and ":" in fname: tokens = fname.split(":") # from https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename # noqa E501 file_manager = ExitStack() atexit.register(file_manager.close) ref = compat.importlib_resources.files(tokens[0]) for tok in tokens[1:]: ref = ref / tok fname = file_manager.enter_context( # type: ignore[assignment] compat.importlib_resources.as_file(ref) ) return fname
Given a python source path, locate the .pyc.
def pyc_file_from_path(path: str) -> Optional[str]: """Given a python source path, locate the .pyc.""" candidate = importlib.util.cache_from_source(path) if os.path.exists(candidate): return candidate # even for pep3147, fall back to the old way of finding .pyc files, # to support sourceless operation filepath, ext = os.path.splitext(path) for ext in importlib.machinery.BYTECODE_SUFFIXES: if os.path.exists(filepath + ext): return filepath + ext else: return None
Load a file from the given path as a Python module.
def load_python_file(dir_: str, filename: str) -> ModuleType: """Load a file from the given path as a Python module.""" module_id = re.sub(r"\W", "_", filename) path = os.path.join(dir_, filename) _, ext = os.path.splitext(filename) if ext == ".py": if os.path.exists(path): module = load_module_py(module_id, path) else: pyc_path = pyc_file_from_path(path) if pyc_path is None: raise ImportError("Can't find Python file %s" % path) else: module = load_module_py(module_id, pyc_path) elif ext in (".pyc", ".pyo"): module = load_module_py(module_id, path) else: assert False return module
locate Column objects within the given expression.
def _find_columns(clause): """locate Column objects within the given expression.""" cols: Set[ColumnElement[Any]] = set() traverse(clause, {}, {"column": cols.add}) return cols
remove a column from a ColumnCollection.
def _remove_column_from_collection( collection: ColumnCollection, column: Union[Column[Any], ColumnClause[Any]] ) -> None: """remove a column from a ColumnCollection.""" # workaround for older SQLAlchemy, remove the # same object that's present assert column.key is not None to_remove = collection[column.key] # SQLAlchemy 2.0 will use more ReadOnlyColumnCollection # (renamed from ImmutableColumnCollection) if hasattr(collection, "_immutable") or hasattr(collection, "_readonly"): collection._parent.remove(to_remove) else: collection.remove(to_remove)
a workaround for the Index construct's severe lack of flexibility
def _textual_index_column( table: Table, text_: Union[str, TextClause, ColumnElement[Any]] ) -> Union[ColumnElement[Any], Column[Any]]: """a workaround for the Index construct's severe lack of flexibility""" if isinstance(text_, str): c = Column(text_, sqltypes.NULLTYPE) table.append_column(c) return c elif isinstance(text_, TextClause): return _textual_index_element(table, text_) elif isinstance(text_, _textual_index_element): return _textual_index_column(table, text_.text) elif isinstance(text_, sql.ColumnElement): return _copy_expression(text_, table) else: raise ValueError("String or text() construct expected")
Given input filename, attempts to find the file, first by relative to cwd, then by absolute, the relative to animated_drawings root directory. If not found, prints error message indicating which file_type it is.
def resolve_ad_filepath(file_name: str, file_type: str) -> Path: """ Given input filename, attempts to find the file, first by relative to cwd, then by absolute, the relative to animated_drawings root directory. If not found, prints error message indicating which file_type it is. """ if Path(file_name).exists(): return Path(file_name) elif Path.joinpath(Path.cwd(), file_name).exists(): return Path.joinpath(Path.cwd(), file_name) elif Path(resource_filename(__name__, file_name)).exists(): return Path(resource_filename(__name__, file_name)) elif Path(resource_filename(__name__, str(Path('..', file_name)))): return Path(resource_filename(__name__, str(Path('..', file_name)))) msg = f'Could not find the {file_type} specified: {file_name}' logging.critical(msg) assert False, msg
Given path to input image file, opens it, flips it based on EXIF tags, if present, and returns image with proper orientation.
def read_background_image(file_name: str) -> npt.NDArray[np.uint8]: """ Given path to input image file, opens it, flips it based on EXIF tags, if present, and returns image with proper orientation. """ # Check the file path file_path = resolve_ad_filepath(file_name, 'background_image') # Open the image and rotate as needed depending upon exif tag image = Image.open(str(file_path)) image = ImageOps.exif_transpose(image) # Convert to numpy array and flip rightside up image_np = np.asarray(image) image_np = cv2.flip(image_np, 0) # Ensure we have RGBA if len(image_np.shape) == 3 and image_np.shape[-1] == 3: # if RGB image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2RGBA) if len(image_np.shape) == 2: # if grayscale image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGBA) return image_np.astype(np.uint8)
Helper function to visualize mesh deformation outputs
def plot_mesh(vertices, triangles, pins_xy): """ Helper function to visualize mesh deformation outputs """ import matplotlib.pyplot as plt for tri in triangles: x_points = [] y_points = [] v0, v1, v2 = tri.tolist() x_points.append(vertices[v0][0]) y_points.append(vertices[v0][1]) x_points.append(vertices[v1][0]) y_points.append(vertices[v1][1]) x_points.append(vertices[v2][0]) y_points.append(vertices[v2][1]) x_points.append(vertices[v0][0]) y_points.append(vertices[v0][1]) plt.plot(x_points, y_points) plt.ylim((-15, 15)) plt.xlim((-15, 15)) for pin in pins_xy: plt.plot(pin[0], pin[1], color='red', marker='o') plt.show()
Given a path to a directory with character annotations, a motion configuration file, and a retarget configuration file, creates an animation and saves it to {annotation_dir}/video.png
def annotations_to_animation(char_anno_dir: str, motion_cfg_fn: str, retarget_cfg_fn: str): """ Given a path to a directory with character annotations, a motion configuration file, and a retarget configuration file, creates an animation and saves it to {annotation_dir}/video.png """ # package character_cfg_fn, motion_cfg_fn, and retarget_cfg_fn animated_drawing_dict = { 'character_cfg': str(Path(char_anno_dir, 'char_cfg.yaml').resolve()), 'motion_cfg': str(Path(motion_cfg_fn).resolve()), 'retarget_cfg': str(Path(retarget_cfg_fn).resolve()) } # create mvc config mvc_cfg = { 'scene': {'ANIMATED_CHARACTERS': [animated_drawing_dict]}, # add the character to the scene 'controller': { 'MODE': 'video_render', # 'video_render' or 'interactive' 'OUTPUT_VIDEO_PATH': str(Path(char_anno_dir, 'video.gif').resolve())} # set the output location } # write the new mvc config file out output_mvc_cfn_fn = str(Path(char_anno_dir, 'mvc_cfg.yaml')) with open(output_mvc_cfn_fn, 'w') as f: yaml.dump(dict(mvc_cfg), f) # render the video animated_drawings.render.start(output_mvc_cfn_fn)
Given the image located at img_fn, create annotation files needed for animation. Then create animation from those animations and motion cfg and retarget cfg.
def image_to_animation(img_fn: str, char_anno_dir: str, motion_cfg_fn: str, retarget_cfg_fn: str): """ Given the image located at img_fn, create annotation files needed for animation. Then create animation from those animations and motion cfg and retarget cfg. """ # create the annotations image_to_annotations(img_fn, char_anno_dir) # create the animation annotations_to_animation(char_anno_dir, motion_cfg_fn, retarget_cfg_fn)
Given the RGB image located at img_fn, runs detection, segmentation, and pose estimation for drawn character within it. Crops the image and saves texture, mask, and character config files necessary for animation. Writes to out_dir. Params: img_fn: path to RGB image out_dir: directory where outputs will be saved
def image_to_annotations(img_fn: str, out_dir: str) -> None: """ Given the RGB image located at img_fn, runs detection, segmentation, and pose estimation for drawn character within it. Crops the image and saves texture, mask, and character config files necessary for animation. Writes to out_dir. Params: img_fn: path to RGB image out_dir: directory where outputs will be saved """ # create output directory outdir = Path(out_dir) outdir.mkdir(exist_ok=True) # read image img = cv2.imread(img_fn) # copy the original image into the output_dir cv2.imwrite(str(outdir/'image.png'), img) # ensure it's rgb if len(img.shape) != 3: msg = f'image must have 3 channels (rgb). Found {len(img.shape)}' logging.critical(msg) assert False, msg # resize if needed if np.max(img.shape) > 1000: scale = 1000 / np.max(img.shape) img = cv2.resize(img, (round(scale * img.shape[1]), round(scale * img.shape[0]))) # convert to bytes and send to torchserve img_b = cv2.imencode('.png', img)[1].tobytes() request_data = {'data': img_b} resp = requests.post("http://localhost:8080/predictions/drawn_humanoid_detector", files=request_data, verify=False) if resp is None or resp.status_code >= 300: raise Exception(f"Failed to get bounding box, please check if the 'docker_torchserve' is running and healthy, resp: {resp}") detection_results = json.loads(resp.content) # error check detection_results if isinstance(detection_results, dict) and 'code' in detection_results.keys() and detection_results['code'] == 404: assert False, f'Error performing detection. Check that drawn_humanoid_detector.mar was properly downloaded. Response: {detection_results}' # order results by score, descending detection_results.sort(key=lambda x: x['score'], reverse=True) # if no drawn humanoids detected, abort if len(detection_results) == 0: msg = 'Could not detect any drawn humanoids in the image. Aborting' logging.critical(msg) assert False, msg # otherwise, report # detected and score of highest. msg = f'Detected {len(detection_results)} humanoids in image. Using detection with highest score {detection_results[0]["score"]}.' logging.info(msg) # calculate the coordinates of the character bounding box bbox = np.array(detection_results[0]['bbox']) l, t, r, b = [round(x) for x in bbox] # dump the bounding box results to file with open(str(outdir/'bounding_box.yaml'), 'w') as f: yaml.dump({ 'left': l, 'top': t, 'right': r, 'bottom': b }, f) # crop the image cropped = img[t:b, l:r] # get segmentation mask mask = segment(cropped) # send cropped image to pose estimator data_file = {'data': cv2.imencode('.png', cropped)[1].tobytes()} resp = requests.post("http://localhost:8080/predictions/drawn_humanoid_pose_estimator", files=data_file, verify=False) if resp is None or resp.status_code >= 300: raise Exception(f"Failed to get skeletons, please check if the 'docker_torchserve' is running and healthy, resp: {resp}") pose_results = json.loads(resp.content) # error check pose_results if isinstance(pose_results, dict) and 'code' in pose_results.keys() and pose_results['code'] == 404: assert False, f'Error performing pose estimation. Check that drawn_humanoid_pose_estimator.mar was properly downloaded. Response: {pose_results}' # if no skeleton detected, abort if len(pose_results) == 0: msg = 'Could not detect any skeletons within the character bounding box. Expected exactly 1. Aborting.' logging.critical(msg) assert False, msg # if more than one skeleton detected, if 1 < len(pose_results): msg = f'Detected {len(pose_results)} skeletons with the character bounding box. Expected exactly 1. Aborting.' logging.critical(msg) assert False, msg # get x y coordinates of detection joint keypoints kpts = np.array(pose_results[0]['keypoints'])[:, :2] # use them to build character skeleton rig skeleton = [] skeleton.append({'loc' : [round(x) for x in (kpts[11]+kpts[12])/2], 'name': 'root' , 'parent': None}) skeleton.append({'loc' : [round(x) for x in (kpts[11]+kpts[12])/2], 'name': 'hip' , 'parent': 'root'}) skeleton.append({'loc' : [round(x) for x in (kpts[5]+kpts[6])/2 ], 'name': 'torso' , 'parent': 'hip'}) skeleton.append({'loc' : [round(x) for x in kpts[0] ], 'name': 'neck' , 'parent': 'torso'}) skeleton.append({'loc' : [round(x) for x in kpts[6] ], 'name': 'right_shoulder', 'parent': 'torso'}) skeleton.append({'loc' : [round(x) for x in kpts[8] ], 'name': 'right_elbow' , 'parent': 'right_shoulder'}) skeleton.append({'loc' : [round(x) for x in kpts[10] ], 'name': 'right_hand' , 'parent': 'right_elbow'}) skeleton.append({'loc' : [round(x) for x in kpts[5] ], 'name': 'left_shoulder' , 'parent': 'torso'}) skeleton.append({'loc' : [round(x) for x in kpts[7] ], 'name': 'left_elbow' , 'parent': 'left_shoulder'}) skeleton.append({'loc' : [round(x) for x in kpts[9] ], 'name': 'left_hand' , 'parent': 'left_elbow'}) skeleton.append({'loc' : [round(x) for x in kpts[12] ], 'name': 'right_hip' , 'parent': 'root'}) skeleton.append({'loc' : [round(x) for x in kpts[14] ], 'name': 'right_knee' , 'parent': 'right_hip'}) skeleton.append({'loc' : [round(x) for x in kpts[16] ], 'name': 'right_foot' , 'parent': 'right_knee'}) skeleton.append({'loc' : [round(x) for x in kpts[11] ], 'name': 'left_hip' , 'parent': 'root'}) skeleton.append({'loc' : [round(x) for x in kpts[13] ], 'name': 'left_knee' , 'parent': 'left_hip'}) skeleton.append({'loc' : [round(x) for x in kpts[15] ], 'name': 'left_foot' , 'parent': 'left_knee'}) # create the character config dictionary char_cfg = {'skeleton': skeleton, 'height': cropped.shape[0], 'width': cropped.shape[1]} # convert texture to RGBA and save cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2BGRA) cv2.imwrite(str(outdir/'texture.png'), cropped) # save mask cv2.imwrite(str(outdir/'mask.png'), mask) # dump character config to yaml with open(str(outdir/'char_cfg.yaml'), 'w') as f: yaml.dump(char_cfg, f) # create joint viz overlay for inspection purposes joint_overlay = cropped.copy() for joint in skeleton: x, y = joint['loc'] name = joint['name'] cv2.circle(joint_overlay, (int(x), int(y)), 5, (0, 0, 0), 5) cv2.putText(joint_overlay, name, (int(x), int(y+15)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, 2) cv2.imwrite(str(outdir/'joint_overlay.png'), joint_overlay)
threshold
def segment(img: np.ndarray): """ threshold """ img = np.min(img, axis=2) img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 8) img = cv2.bitwise_not(img) """ morphops """ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=2) img = cv2.morphologyEx(img, cv2.MORPH_DILATE, kernel, iterations=2) """ floodfill """ mask = np.zeros([img.shape[0]+2, img.shape[1]+2], np.uint8) mask[1:-1, 1:-1] = img.copy() # im_floodfill is results of floodfill. Starts off all white im_floodfill = np.full(img.shape, 255, np.uint8) # choose 10 points along each image side. use as seed for floodfill. h, w = img.shape[:2] for x in range(0, w-1, 10): cv2.floodFill(im_floodfill, mask, (x, 0), 0) cv2.floodFill(im_floodfill, mask, (x, h-1), 0) for y in range(0, h-1, 10): cv2.floodFill(im_floodfill, mask, (0, y), 0) cv2.floodFill(im_floodfill, mask, (w-1, y), 0) # make sure edges aren't character. necessary for contour finding im_floodfill[0, :] = 0 im_floodfill[-1, :] = 0 im_floodfill[:, 0] = 0 im_floodfill[:, -1] = 0 """ retain largest contour """ mask2 = cv2.bitwise_not(im_floodfill) mask = None biggest = 0 contours = measure.find_contours(mask2, 0.0) for c in contours: x = np.zeros(mask2.T.shape, np.uint8) cv2.fillPoly(x, [np.int32(c)], 1) size = len(np.where(x == 1)[0]) if size > biggest: mask = x biggest = size if mask is None: msg = 'Found no contours within image' logging.critical(msg) assert False, msg mask = ndimage.binary_fill_holes(mask).astype(int) mask = 255 * mask.astype(np.uint8) return mask.T
Main program entry point.
def main(): """Main program entry point.""" source_directory = sys.argv[1] if '/ansible_collections/' in os.getcwd(): output_path = "tests/output" else: output_path = "test/results" destination_directory = os.path.join(output_path, 'coverage') if not os.path.exists(destination_directory): os.makedirs(destination_directory) jobs = {} count = 0 for name in os.listdir(source_directory): match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name) label = match.group('label') attempt = int(match.group('attempt')) jobs[label] = max(attempt, jobs.get(label, 0)) for label, attempt in jobs.items(): name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt) source = os.path.join(source_directory, name) source_files = os.listdir(source) for source_file in source_files: source_path = os.path.join(source, source_file) destination_path = os.path.join(destination_directory, source_file + '.' + label) print('"%s" -> "%s"' % (source_path, destination_path)) shutil.copyfile(source_path, destination_path) count += 1 print('Coverage file count: %d' % count) print('##vso[task.setVariable variable=coverageFileCount]%d' % count) print('##vso[task.setVariable variable=outputPath]%s' % output_path)
Main program entry point.
def main(): """Main program entry point.""" start = time.time() sys.stdin.reconfigure(errors='surrogateescape') sys.stdout.reconfigure(errors='surrogateescape') for line in sys.stdin: seconds = time.time() - start sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line)) sys.stdout.flush()
parse command line :return : (options, args)
def parse(): """parse command line :return : (options, args)""" parser = optparse.OptionParser() parser.usage = "%prog -[options] (-h for help)" parser.add_option('-m', '--module-path', dest='module_path', help="REQUIRED: full path of module source to execute") parser.add_option('-a', '--args', dest='module_args', default="", help="module argument string") parser.add_option('-D', '--debugger', dest='debugger', help="path to python debugger (e.g. /usr/bin/pdb)") parser.add_option('-I', '--interpreter', dest='interpreter', help="path to interpreter to use for this module" " (e.g. ansible_python_interpreter=/usr/bin/python)", metavar='INTERPRETER_TYPE=INTERPRETER_PATH', default="ansible_python_interpreter=%s" % (sys.executable if sys.executable else '/usr/bin/python')) parser.add_option('-c', '--check', dest='check', action='store_true', help="run the module in check mode") parser.add_option('-n', '--noexecute', dest='execute', action='store_false', default=True, help="do not run the resulting module") parser.add_option('-o', '--output', dest='filename', help="Filename for resulting module", default="~/.ansible_module_generated") options, args = parser.parse_args() if not options.module_path: parser.print_help() sys.exit(1) else: return options, args
Write args to a file for old-style module's use.
def write_argsfile(argstring, json=False): """ Write args to a file for old-style module's use. """ argspath = os.path.expanduser("~/.ansible_test_module_arguments") argsfile = open(argspath, 'w') if json: args = parse_kv(argstring) argstring = jsonify(args) argsfile.write(argstring) argsfile.close() return argspath
simulate what ansible does with new style modules
def boilerplate_module(modfile, args, interpreters, check, destfile): """ simulate what ansible does with new style modules """ # module_fh = open(modfile) # module_data = module_fh.read() # module_fh.close() # replacer = module_common.ModuleReplacer() loader = DataLoader() # included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 complex_args = {} # default selinux fs list is pass in as _ansible_selinux_special_fs arg complex_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS complex_args['_ansible_tmpdir'] = C.DEFAULT_LOCAL_TMP complex_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES complex_args['_ansible_version'] = __version__ if args.startswith("@"): # Argument is a YAML file (JSON is a subset of YAML) complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:])) args = '' elif args.startswith("{"): # Argument is a YAML document (not a file) complex_args = utils_vars.combine_vars(complex_args, loader.load(args)) args = '' if args: parsed_args = parse_kv(args) complex_args = utils_vars.combine_vars(complex_args, parsed_args) task_vars = interpreters if check: complex_args['_ansible_check_mode'] = True modname = os.path.basename(modfile) modname = os.path.splitext(modname)[0] (module_data, module_style, shebang) = module_common.modify_module( modname, modfile, complex_args, Templar(loader=loader), task_vars=task_vars ) if module_style == 'new' and '_ANSIBALLZ_WRAPPER = True' in to_native(module_data): module_style = 'ansiballz' modfile2_path = os.path.expanduser(destfile) print("* including generated source, if any, saving to: %s" % modfile2_path) if module_style not in ('ansiballz', 'old'): print("* this may offset any line numbers in tracebacks/debuggers!") modfile2 = open(modfile2_path, 'wb') modfile2.write(module_data) modfile2.close() modfile = modfile2_path return (modfile2_path, modname, module_style)
Test run a module, piping it's output for reporting.
def runtest(modfile, argspath, modname, module_style, interpreters): """Test run a module, piping it's output for reporting.""" invoke = "" if module_style == 'ansiballz': modfile, argspath = ansiballz_setup(modfile, modname, interpreters) if 'ansible_python_interpreter' in interpreters: invoke = "%s " % interpreters['ansible_python_interpreter'] os.system("chmod +x %s" % modfile) invoke = "%s%s" % (invoke, modfile) if argspath is not None: invoke = "%s %s" % (invoke, argspath) cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = cmd.communicate() out, err = to_text(out), to_text(err) try: print("*" * 35) print("RAW OUTPUT") print(out) print(err) results = json.loads(out) except Exception: print("*" * 35) print("INVALID OUTPUT FORMAT") print(out) traceback.print_exc() sys.exit(1) print("*" * 35) print("PARSED OUTPUT") print(jsonify(results, format=True))
Run interactively with console debugger.
def rundebug(debugger, modfile, argspath, modname, module_style, interpreters): """Run interactively with console debugger.""" if module_style == 'ansiballz': modfile, argspath = ansiballz_setup(modfile, modname, interpreters) if argspath is not None: subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True) else: subprocess.call("%s %s" % (debugger, modfile), shell=True)
Parse the given requirements and return any applicable pre-build instructions.
def pre_build_instructions(requirements: str) -> str: """Parse the given requirements and return any applicable pre-build instructions.""" parsed_requirements = requirements.splitlines() package_versions = { match.group('package').lower(): match.group('version') for match in (re.search('^(?P<package>.*)==(?P<version>.*)$', requirement) for requirement in parsed_requirements) if match } instructions: list[str] = [] build_constraints = ( ('pyyaml', '>= 5.4, <= 6.0', ('Cython < 3.0',)), ) for package, specifier, constraints in build_constraints: version_string = package_versions.get(package) if version_string: version = packaging.version.Version(version_string) specifier_set = packaging.specifiers.SpecifierSet(specifier) if specifier_set.contains(version): instructions.append(f'# pre-build requirement: {package} == {version}\n') for constraint in constraints: instructions.append(f'# pre-build constraint: {constraint}\n') return ''.join(instructions)
Main program body.
def main(): """Main program body.""" args = parse_args() download_run(args)
Parse and return args.
def parse_args(): """Parse and return args.""" parser = argparse.ArgumentParser(description='Download results from a CI run.') parser.add_argument('run', metavar='RUN', type=run_id_arg, help='AZP run id or URI') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='show what is being downloaded') parser.add_argument('-t', '--test', dest='test', action='store_true', help='show what would be downloaded without downloading') parser.add_argument('-p', '--pipeline-id', type=int, default=20, help='pipeline to download the job from') parser.add_argument('--artifacts', action='store_true', help='download artifacts') parser.add_argument('--console-logs', action='store_true', help='download console logs') parser.add_argument('--run-metadata', action='store_true', help='download run metadata') parser.add_argument('--all', action='store_true', help='download everything') parser.add_argument('--match-artifact-name', default=re.compile('.*'), type=re.compile, help='only download artifacts which names match this regex') parser.add_argument('--match-job-name', default=re.compile('.*'), type=re.compile, help='only download artifacts from jobs which names match this regex') if argcomplete: argcomplete.autocomplete(parser) args = parser.parse_args() if args.all: args.artifacts = True args.run_metadata = True args.console_logs = True selections = ( args.artifacts, args.run_metadata, args.console_logs ) if not any(selections): parser.error('At least one download option is required.') return args
Download a run.
def download_run(args): """Download a run.""" output_dir = '%s' % args.run if not args.test and not os.path.exists(output_dir): os.makedirs(output_dir) if args.run_metadata: run_url = 'https://dev.azure.com/ansible/ansible/_apis/pipelines/%s/runs/%s?api-version=6.0-preview.1' % (args.pipeline_id, args.run) run_info_response = requests.get(run_url) run_info_response.raise_for_status() run = run_info_response.json() path = os.path.join(output_dir, 'run.json') contents = json.dumps(run, sort_keys=True, indent=4) if args.verbose: print(path) if not args.test: with open(path, 'w') as metadata_fd: metadata_fd.write(contents) timeline_response = requests.get('https://dev.azure.com/ansible/ansible/_apis/build/builds/%s/timeline?api-version=6.0' % args.run) timeline_response.raise_for_status() timeline = timeline_response.json() roots = set() by_id = {} children_of = {} parent_of = {} for r in timeline['records']: thisId = r['id'] parentId = r['parentId'] by_id[thisId] = r if parentId is None: roots.add(thisId) else: parent_of[thisId] = parentId children_of[parentId] = children_of.get(parentId, []) + [thisId] allowed = set() def allow_recursive(ei): allowed.add(ei) for ci in children_of.get(ei, []): allow_recursive(ci) for ri in roots: r = by_id[ri] allowed.add(ri) for ci in children_of.get(r['id'], []): c = by_id[ci] if not args.match_job_name.match("%s %s" % (r['name'], c['name'])): continue allow_recursive(c['id']) if args.artifacts: artifact_list_url = 'https://dev.azure.com/ansible/ansible/_apis/build/builds/%s/artifacts?api-version=6.0' % args.run artifact_list_response = requests.get(artifact_list_url) artifact_list_response.raise_for_status() for artifact in artifact_list_response.json()['value']: if artifact['source'] not in allowed or not args.match_artifact_name.match(artifact['name']): continue if args.verbose: print('%s/%s' % (output_dir, artifact['name'])) if not args.test: response = requests.get(artifact['resource']['downloadUrl']) response.raise_for_status() archive = zipfile.ZipFile(io.BytesIO(response.content)) archive.extractall(path=output_dir) if args.console_logs: for r in timeline['records']: if not r['log'] or r['id'] not in allowed or not args.match_artifact_name.match(r['name']): continue names = [] parent_id = r['id'] while parent_id is not None: p = by_id[parent_id] name = p['name'] if name not in names: names = [name] + names parent_id = parent_of.get(p['id'], None) path = " ".join(names) # Some job names have the separator in them. path = path.replace(os.sep, '_') log_path = os.path.join(output_dir, '%s.log' % path) if args.verbose: print(log_path) if not args.test: log = requests.get(r['log']['url']) log.raise_for_status() open(log_path, 'wb').write(log.content)
Main program body.
def main(): """Main program body.""" args = parse_args() try: incidental_report(args) except ApplicationError as ex: sys.exit(ex)
Parse and return args.
def parse_args(): """Parse and return args.""" source = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) parser = argparse.ArgumentParser(description='Report on incidental test coverage downloaded from Azure Pipelines.') parser.add_argument('result', type=directory, help='path to directory containing test results downloaded from Azure Pipelines') parser.add_argument('--output', type=optional_directory, default=os.path.join(source, 'test', 'results', '.tmp', 'incidental'), help='path to directory where reports should be written') parser.add_argument('--source', type=optional_directory, default=source, help='path to git repository containing Ansible source') parser.add_argument('--skip-checks', action='store_true', help='skip integrity checks, use only for debugging') parser.add_argument('--ignore-cache', dest='use_cache', action='store_false', help='ignore cached files') parser.add_argument('-v', '--verbose', action='store_true', help='increase verbosity') parser.add_argument('--result-sha', default=None, help='Override the result sha') targets = parser.add_mutually_exclusive_group() targets.add_argument('--targets', type=regex, default='^incidental_', help='regex for targets to analyze, default: %(default)s') targets.add_argument('--plugin-path', help='path to plugin to report incidental coverage on') if argcomplete: argcomplete.autocomplete(parser) args = parser.parse_args() return args
Generate incidental coverage report.
def incidental_report(args): """Generate incidental coverage report.""" ct = CoverageTool() git = Git(os.path.abspath(args.source)) coverage_data = CoverageData(os.path.abspath(args.result)) result_sha = args.result_sha or coverage_data.result_sha try: git.show([result_sha, '--']) except subprocess.CalledProcessError: raise ApplicationError('%s: commit not found: %s\n' 'make sure your source repository is up-to-date' % (git.path, result_sha)) if coverage_data.result != "succeeded": check_failed(args, 'results indicate tests did not pass (result: %s)\n' 're-run until passing, then download the latest results and re-run the report using those results' % coverage_data.result) if not coverage_data.paths: raise ApplicationError('no coverage data found\n' 'make sure the downloaded results are from a code coverage run on Azure Pipelines') # generate a unique subdirectory in the output directory based on the input files being used path_hash = hashlib.sha256(b'\n'.join(p.encode() for p in coverage_data.paths)).hexdigest() output_path = os.path.abspath(os.path.join(args.output, path_hash)) data_path = os.path.join(output_path, 'data') reports_path = os.path.join(output_path, 'reports') for path in [data_path, reports_path]: if not os.path.exists(path): os.makedirs(path) # combine coverage results into a single file combined_path = os.path.join(output_path, 'combined.json') cached(combined_path, args.use_cache, args.verbose, lambda: ct.combine(coverage_data.paths, combined_path)) with open(combined_path) as combined_file: combined = json.load(combined_file) if args.plugin_path: # reporting on coverage missing from the test target for the specified plugin # the report will be on a single target cache_path_format = '%s' + '-for-%s' % os.path.splitext(os.path.basename(args.plugin_path))[0] target_pattern = '^%s$' % get_target_name_from_plugin_path(args.plugin_path) include_path = args.plugin_path missing = True target_name = get_target_name_from_plugin_path(args.plugin_path) else: # reporting on coverage exclusive to the matched targets # the report can contain multiple targets cache_path_format = '%s' target_pattern = args.targets include_path = None missing = False target_name = None # identify integration test targets to analyze target_names = sorted(combined['targets']) incidental_target_names = [target for target in target_names if re.search(target_pattern, target)] if not incidental_target_names: if target_name: # if the plugin has no tests we still want to know what coverage is missing incidental_target_names = [target_name] else: raise ApplicationError('no targets to analyze') # exclude test support plugins from analysis # also exclude six, which for an unknown reason reports bogus coverage lines (indicating coverage of comments) exclude_path = '^(test/support/|lib/ansible/module_utils/six/)' # process coverage for each target and then generate a report # save sources for generating a summary report at the end summary = {} report_paths = {} for target_name in incidental_target_names: cache_name = cache_path_format % target_name only_target_path = os.path.join(data_path, 'only-%s.json' % cache_name) cached(only_target_path, args.use_cache, args.verbose, lambda: ct.filter(combined_path, only_target_path, include_targets=[target_name], include_path=include_path, exclude_path=exclude_path)) without_target_path = os.path.join(data_path, 'without-%s.json' % cache_name) cached(without_target_path, args.use_cache, args.verbose, lambda: ct.filter(combined_path, without_target_path, exclude_targets=[target_name], include_path=include_path, exclude_path=exclude_path)) if missing: source_target_path = missing_target_path = os.path.join(data_path, 'missing-%s.json' % cache_name) cached(missing_target_path, args.use_cache, args.verbose, lambda: ct.missing(without_target_path, only_target_path, missing_target_path, only_gaps=True)) else: source_target_path = exclusive_target_path = os.path.join(data_path, 'exclusive-%s.json' % cache_name) cached(exclusive_target_path, args.use_cache, args.verbose, lambda: ct.missing(only_target_path, without_target_path, exclusive_target_path, only_gaps=True)) source_expanded_target_path = os.path.join(os.path.dirname(source_target_path), 'expanded-%s' % os.path.basename(source_target_path)) cached(source_expanded_target_path, args.use_cache, args.verbose, lambda: ct.expand(source_target_path, source_expanded_target_path)) summary[target_name] = sources = collect_sources(source_expanded_target_path, git, coverage_data, result_sha) txt_report_path = os.path.join(reports_path, '%s.txt' % cache_name) cached(txt_report_path, args.use_cache, args.verbose, lambda: generate_report(sources, txt_report_path, coverage_data, target_name, missing=missing)) report_paths[target_name] = txt_report_path # provide a summary report of results for target_name in incidental_target_names: sources = summary[target_name] report_path = os.path.relpath(report_paths[target_name]) print('%s: %d arcs, %d lines, %d files - %s' % ( target_name, sum(len(s.covered_arcs) for s in sources), sum(len(s.covered_lines) for s in sources), len(sources), report_path, )) if not missing: sys.stderr.write('NOTE: This report shows only coverage exclusive to the reported targets. ' 'As targets are removed, exclusive coverage on the remaining targets will increase.\n')
Return the integration test target name for the given plugin path.
def get_target_name_from_plugin_path(path): # type: (str) -> str """Return the integration test target name for the given plugin path.""" parts = os.path.splitext(path)[0].split(os.path.sep) plugin_name = parts[-1] if path.startswith('lib/ansible/modules/'): plugin_type = None elif path.startswith('lib/ansible/plugins/'): plugin_type = parts[3] elif path.startswith('lib/ansible/module_utils/'): plugin_type = parts[2] elif path.startswith('plugins/'): plugin_type = parts[1] else: raise ApplicationError('Cannot determine plugin type from plugin path: %s' % path) if plugin_type is None: target_name = plugin_name else: target_name = '%s_%s' % (plugin_type, plugin_name) return target_name
Main program body.
def main(): """Main program body.""" args = parse_args() key = os.environ.get('AZP_TOKEN', None) if not key: sys.stderr.write("please set you AZP token in AZP_TOKEN") sys.exit(1) start_run(args, key)
Parse and return args.
def parse_args(): """Parse and return args.""" parser = argparse.ArgumentParser(description='Start a new CI run.') parser.add_argument('-p', '--pipeline-id', type=int, default=20, help='pipeline to download the job from') parser.add_argument('--ref', help='git ref name to run on') parser.add_argument('--env', nargs=2, metavar=('KEY', 'VALUE'), action='append', help='environment variable to pass') if argcomplete: argcomplete.autocomplete(parser) args = parser.parse_args() return args
Start a new CI run.
def start_run(args, key): """Start a new CI run.""" url = "https://dev.azure.com/ansible/ansible/_apis/pipelines/%s/runs?api-version=6.0-preview.1" % args.pipeline_id payload = {"resources": {"repositories": {"self": {"refName": args.ref}}}} resp = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', key), data=payload) resp.raise_for_status() print(json.dumps(resp.json(), indent=4, sort_keys=True))
Given a PullRequest, or a string containing a PR number, PR URL, or internal PR URL (e.g. ansible-collections/community.general#1234), return either a full github URL to the PR (if only_number is False), or an int containing the PR number (if only_number is True). Throws if it can't parse the input.
def normalize_pr_url(pr, allow_non_ansible_ansible=False, only_number=False): ''' Given a PullRequest, or a string containing a PR number, PR URL, or internal PR URL (e.g. ansible-collections/community.general#1234), return either a full github URL to the PR (if only_number is False), or an int containing the PR number (if only_number is True). Throws if it can't parse the input. ''' if isinstance(pr, PullRequest): return pr.html_url if pr.isnumeric(): if only_number: return int(pr) return 'https://github.com/ansible/ansible/pull/{0}'.format(pr) # Allow for forcing ansible/ansible if not allow_non_ansible_ansible and 'ansible/ansible' not in pr: raise Exception('Non ansible/ansible repo given where not expected') re_match = PULL_HTTP_URL_RE.match(pr) if re_match: if only_number: return int(re_match.group('ticket')) return pr re_match = PULL_URL_RE.match(pr) if re_match: if only_number: return int(re_match.group('ticket')) return 'https://github.com/{0}/{1}/pull/{2}'.format( re_match.group('user'), re_match.group('repo'), re_match.group('ticket')) raise Exception('Did not understand given PR')
Given a full Github PR URL, extract the user/org and repo name. Return them in the form: "user/repo"
def url_to_org_repo(url): ''' Given a full Github PR URL, extract the user/org and repo name. Return them in the form: "user/repo" ''' match = PULL_HTTP_URL_RE.match(url) if not match: return '' return '{0}/{1}'.format(match.group('user'), match.group('repo'))
Given the new PR (the backport) and the originating (source) PR, construct the new body for the backport PR. If the backport follows the usual ansible/ansible template, we look for the '##### SUMMARY'-type line and add our "Backport of" line right below that. If we can't find the SUMMARY line, we add our line at the very bottom. This function does not side-effect, it simply returns the new body as a string.
def generate_new_body(pr, source_pr): ''' Given the new PR (the backport) and the originating (source) PR, construct the new body for the backport PR. If the backport follows the usual ansible/ansible template, we look for the '##### SUMMARY'-type line and add our "Backport of" line right below that. If we can't find the SUMMARY line, we add our line at the very bottom. This function does not side-effect, it simply returns the new body as a string. ''' backport_text = '\nBackport of {0}\n'.format(source_pr) body_lines = pr.body.split('\n') new_body_lines = [] added = False for line in body_lines: if 'Backport of http' in line: raise Exception('Already has a backport line, aborting.') new_body_lines.append(line) if line.startswith('#') and line.strip().endswith('SUMMARY'): # This would be a fine place to add it new_body_lines.append(backport_text) added = True if not added: # Otherwise, no '#### SUMMARY' line, so just add it at the bottom new_body_lines.append(backport_text) return '\n'.join(new_body_lines)
Given a commit hash, attempt to find the hash in any repo in the ansible orgs, and then use it to determine what, if any, PR it appeared in.
def get_prs_for_commit(g, commit): ''' Given a commit hash, attempt to find the hash in any repo in the ansible orgs, and then use it to determine what, if any, PR it appeared in. ''' commits = g.search_commits( 'hash:{0} org:ansible org:ansible-collections is:public'.format(commit) ).get_page(0) if not commits or len(commits) == 0: return [] pulls = commits[0].get_pulls().get_page(0) if not pulls or len(pulls) == 0: return [] return pulls
Do magic. This is basically the "brain" of 'auto'. It will search the PR (the newest PR - the backport) and try to find where it originated. First it will search in the title. Some titles include things like "foo bar change (#12345)" or "foo bar change (backport of #54321)" so we search for those and pull them out. Next it will scan the body of the PR and look for: - cherry-pick reference lines (e.g. "cherry-picked from commit XXXXX") - other PRs (#nnnnnn) and (foo/bar#nnnnnnn) - full URLs to other PRs It will take all of the above, and return a list of "possibilities", which is a list of PullRequest objects.
def search_backport(pr, g, ansible_ansible): ''' Do magic. This is basically the "brain" of 'auto'. It will search the PR (the newest PR - the backport) and try to find where it originated. First it will search in the title. Some titles include things like "foo bar change (#12345)" or "foo bar change (backport of #54321)" so we search for those and pull them out. Next it will scan the body of the PR and look for: - cherry-pick reference lines (e.g. "cherry-picked from commit XXXXX") - other PRs (#nnnnnn) and (foo/bar#nnnnnnn) - full URLs to other PRs It will take all of the above, and return a list of "possibilities", which is a list of PullRequest objects. ''' possibilities = [] # 1. Try searching for it in the title. title_search = PULL_BACKPORT_IN_TITLE.match(pr.title) if title_search: ticket = title_search.group('ticket1') if not ticket: ticket = title_search.group('ticket2') try: possibilities.append(ansible_ansible.get_pull(int(ticket))) except Exception: pass # 2. Search for clues in the body of the PR body_lines = pr.body.split('\n') for line in body_lines: # a. Try searching for a `git cherry-pick` line cherrypick = PULL_CHERRY_PICKED_FROM.match(line) if cherrypick: prs = get_prs_for_commit(g, cherrypick.group('hash')) possibilities.extend(prs) continue # b. Try searching for other referenced PRs (by #nnnnn or full URL) tickets = [('ansible', 'ansible', ticket) for ticket in TICKET_NUMBER.findall(line)] tickets.extend(PULL_HTTP_URL_RE.findall(line)) tickets.extend(PULL_URL_RE.findall(line)) if tickets: for ticket in tickets: # Is it a PR (even if not in ansible/ansible)? # TODO: As a small optimization/to avoid extra calls to GitHub, # we could limit this check to non-URL matches. If it's a URL, # we know it's definitely a pull request. try: repo_path = '{0}/{1}'.format(ticket[0], ticket[1]) repo = ansible_ansible if repo_path != 'ansible/ansible': repo = g.get_repo(repo_path) ticket_pr = repo.get_pull(int(ticket)) possibilities.append(ticket_pr) except Exception: pass continue # Future-proofing return possibilities
Prompt the user and return whether or not they agree.
def prompt_add(): ''' Prompt the user and return whether or not they agree. ''' res = input('Shall I add the reference? [Y/n]: ') return res.lower() in ('', 'y', 'yes')
Given the new PR (the backport), and the "possibility" that we have decided on, prompt the user and then add the reference to the body of the new PR. This method does the actual "destructive" work of editing the PR body.
def commit_edit(new_pr, pr): ''' Given the new PR (the backport), and the "possibility" that we have decided on, prompt the user and then add the reference to the body of the new PR. This method does the actual "destructive" work of editing the PR body. ''' print('I think this PR might have come from:') print(pr.title) print('-' * 50) print(pr.html_url) if prompt_add(): new_body = generate_new_body(new_pr, pr.html_url) new_pr.edit(body=new_body) print('I probably added the reference successfully.')
Return the given value converted to a string suitable for use as a command line argument.
def path_to_str(value: t.Any) -> str: """Return the given value converted to a string suitable for use as a command line argument.""" return f"{value}/" if isinstance(value, pathlib.Path) and value.is_dir() else str(value)
Run the specified command.
def run( *args: t.Any, env: dict[str, t.Any] | None, cwd: pathlib.Path | str, capture_output: bool = False, ) -> CompletedProcess | None: """Run the specified command.""" args = [arg.relative_to(cwd) if isinstance(arg, pathlib.Path) else arg for arg in args] str_args = tuple(path_to_str(arg) for arg in args) str_env = {key: path_to_str(value) for key, value in env.items()} if env is not None else None display.show(f"--> {shlex.join(str_args)}", color=Display.CYAN) try: p = subprocess.run(str_args, check=True, text=True, env=str_env, cwd=cwd, capture_output=capture_output) except subprocess.CalledProcessError as ex: # improve type hinting and include stdout/stderr (if any) in the message raise CalledProcessError( message=str(ex), cmd=str_args, status=ex.returncode, stdout=ex.stdout, stderr=ex.stderr, ) from None if not capture_output: return None # improve type hinting return CompletedProcess( stdout=p.stdout, stderr=p.stderr, )
Conditionally convert an ApplicationError in the provided context to a warning.
def suppress_when(error_as_warning: bool) -> t.Generator[None, None, None]: """Conditionally convert an ApplicationError in the provided context to a warning.""" if error_as_warning: try: yield except ApplicationError as ex: display.warning(ex) else: yield
Run the specified git command.
def git(*args: t.Any, capture_output: t.Literal[True] | t.Literal[False] = False) -> CompletedProcess | None: """Run the specified git command.""" return run("git", *args, env=None, cwd=CHECKOUT_DIR, capture_output=capture_output)
Return the commit associated with the given rev, or HEAD if no rev is given.
def get_commit(rev: str | None = None) -> str: """Return the commit associated with the given rev, or HEAD if no rev is given.""" try: return git("rev-parse", "--quiet", "--verify", "--end-of-options", f"{rev or 'HEAD'}^{{commit}}", capture_output=True).stdout.strip() except CalledProcessError as ex: if ex.status == 1 and not ex.stdout and not ex.stderr: raise ApplicationError(f"Could not find commit: {rev}") from None raise
Return pull request parameters using the provided details.
def prepare_pull_request(version: Version, branch: str, title: str, add: t.Iterable[pathlib.Path | str], allow_stale: bool) -> PullRequest: """Return pull request parameters using the provided details.""" git_state = get_git_state(version, allow_stale) if not git("status", "--porcelain", "--untracked-files=no", capture_output=True).stdout.strip(): raise ApplicationError("There are no changes to commit. Did you skip a step?") upstream_branch = get_upstream_branch(version) body = create_pull_request_body(title) git("checkout", "-b", branch) git("add", *add) git("commit", "-m", title) git("push", "--set-upstream", git_state.remotes.fork.name, branch) git("checkout", git_state.branch or git_state.commit) git("branch", "-d", branch) pr = PullRequest( upstream_user=git_state.remotes.upstream.user, upstream_repo=git_state.remotes.upstream.repo, upstream_branch=upstream_branch, user=git_state.remotes.fork.user, repo=git_state.remotes.fork.repo, branch=branch, title=title, body=body, ) return pr
Open a browser tab for creating the given GitHub release.
def create_github_release(release: GitHubRelease) -> None: """Open a browser tab for creating the given GitHub release.""" # See: https://docs.github.com/en/repositories/releasing-projects-on-github/automation-for-release-forms-with-query-parameters params = dict( tag=release.tag, target=release.target, title=release.title, body=release.body, prerelease=1 if release.pre_release else 0, ) query_string = urllib.parse.urlencode(params) url = f"https://github.com/{release.user}/{release.repo}/releases/new?{query_string}" display.show("Opening release creation page in new tab using default browser ...") webbrowser.open_new_tab(url)
Open a browser tab for creating the given pull request.
def create_pull_request(pr: PullRequest) -> None: """Open a browser tab for creating the given pull request.""" # See: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/using-query-parameters-to-create-a-pull-request # noqa params = dict( quick_pull=1, title=pr.title, body=pr.body, ) query_string = urllib.parse.urlencode(params) url = f"https://github.com/{pr.upstream_user}/{pr.upstream_repo}/compare/{pr.upstream_branch}...{pr.user}:{pr.repo}:{pr.branch}?{query_string}" display.show("Opening pull request in new tab using default browser ...") webbrowser.open_new_tab(url)
Return a simple pull request body created from the given title.
def create_pull_request_body(title: str) -> str: """Return a simple pull request body created from the given title.""" body = f""" ##### SUMMARY {title} ##### ISSUE TYPE Feature Pull Request """ return body.lstrip()
Return details about the specified remote.
def get_remote(name: str, push: bool) -> Remote: """Return details about the specified remote.""" remote_url = git("remote", "get-url", *(["--push"] if push else []), name, capture_output=True).stdout.strip() remote_match = re.search(r"[@/]github[.]com[:/](?P<user>[^/]+)/(?P<repo>[^.]+)(?:[.]git)?$", remote_url) if not remote_match: raise RuntimeError(f"Unable to identify the user and repo in the '{name}' remote: {remote_url}") remote = Remote( name=name, user=remote_match.group("user"), repo=remote_match.group("repo"), ) return remote
Return details about the remotes we need to use.
def get_remotes() -> Remotes: """Return details about the remotes we need to use.""" # assume the devel branch has its upstream remote pointing to the user's fork fork_remote_name = git("branch", "--list", "devel", "--format=%(upstream:remotename)", capture_output=True).stdout.strip() if not fork_remote_name: raise ApplicationError("Could not determine the remote for your fork of Ansible.") display.show(f"Detected '{fork_remote_name}' as the remote for your fork of Ansible.") # assume there is only one ansible org remote, which would allow release testing using another repo in the same org without special configuration all_remotes = git("remote", "-v", capture_output=True).stdout.strip().splitlines() ansible_remote_names = set(line.split()[0] for line in all_remotes if re.search(r"[@/]github[.]com[:/]ansible/", line)) if not ansible_remote_names: raise ApplicationError(f"Could not determine the remote which '{fork_remote_name}' was forked from.") if len(ansible_remote_names) > 1: raise ApplicationError(f"Found multiple candidates for the remote from which '{fork_remote_name}' was forked from: {', '.join(ansible_remote_names)}") upstream_remote_name = ansible_remote_names.pop() display.show(f"Detected '{upstream_remote_name}' as the remote from which '{fork_remote_name}' was forked from.") if fork_remote_name == upstream_remote_name: raise ApplicationError("The remote for your fork of Ansible cannot be the same as the remote from which it was forked.") remotes = Remotes( fork=get_remote(fork_remote_name, push=True), upstream=get_remote(upstream_remote_name, push=False), ) return remotes
Return the upstream branch name for the given version.
def get_upstream_branch(version: Version) -> str: """Return the upstream branch name for the given version.""" return f"stable-{version.major}.{version.minor}"
Return information about the current state of the git repository.
def get_git_state(version: Version, allow_stale: bool) -> GitState: """Return information about the current state of the git repository.""" remotes = get_remotes() upstream_branch = get_upstream_branch(version) git("fetch", remotes.upstream.name, upstream_branch) upstream_ref = f"{remotes.upstream.name}/{upstream_branch}" upstream_commit = get_commit(upstream_ref) commit = get_commit() if commit != upstream_commit: with suppress_when(allow_stale): raise ApplicationError(f"The current commit ({commit}) does not match {upstream_ref} ({upstream_commit}).") branch = git("branch", "--show-current", capture_output=True).stdout.strip() or None state = GitState( remotes=remotes, branch=branch, commit=commit, ) return state
Ensure the release venv is ready and return the env vars needed to use it.
def ensure_venv() -> dict[str, t.Any]: """Ensure the release venv is ready and return the env vars needed to use it.""" # TODO: consider freezing the ansible and release requirements along with their dependencies ansible_requirements = ANSIBLE_REQUIREMENTS_FILE.read_text() release_requirements = """ build twine """ requirements_file = CHECKOUT_DIR / "test/sanity/code-smell/package-data.requirements.txt" requirements_content = requirements_file.read_text() requirements_content += ansible_requirements requirements_content += release_requirements requirements_hash = hashlib.sha256(requirements_content.encode()).hexdigest()[:8] python_version = ".".join(map(str, sys.version_info[:2])) venv_dir = VENV_DIR / python_version / requirements_hash venv_bin_dir = venv_dir / "bin" venv_requirements_file = venv_dir / "requirements.txt" venv_marker_file = venv_dir / "marker.txt" env = os.environ.copy() env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment env.update( PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])), ) if not venv_marker_file.exists(): display.show(f"Creating a Python {python_version} virtual environment ({requirements_hash}) ...") if venv_dir.exists(): shutil.rmtree(venv_dir) venv.create(venv_dir, with_pip=True) venv_requirements_file.write_text(requirements_content) run("pip", "install", "-r", venv_requirements_file, env=env | PIP_ENV, cwd=CHECKOUT_DIR) venv_marker_file.touch() return env
Parse and return the current ansible-core version, the provided version or the version from the provided commit.
def get_ansible_version(version: str | None = None, /, commit: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version: """Parse and return the current ansible-core version, the provided version or the version from the provided commit.""" if version and commit: raise ValueError("Specify only one of: version, commit") if version: source = "" else: if commit: current = git("show", f"{commit}:{ANSIBLE_RELEASE_FILE.relative_to(CHECKOUT_DIR)}", capture_output=True).stdout else: current = ANSIBLE_RELEASE_FILE.read_text() if not (match := ANSIBLE_VERSION_PATTERN.search(current)): raise RuntimeError("Failed to get the ansible-core version.") version = match.group("version") source = f" in '{ANSIBLE_RELEASE_FILE}'" try: parsed_version = Version(version) except InvalidVersion: raise ApplicationError(f"Invalid version{source}: {version}") from None parsed_version = mode.apply(parsed_version) return parsed_version
Return the next version after the specified version.
def get_next_version(version: Version, /, final: bool = False, pre: str | None = None, mode: VersionMode = VersionMode.DEFAULT) -> Version: """Return the next version after the specified version.""" # TODO: consider using development versions instead of post versions after a release is published pre = pre or "" micro = version.micro if version.is_devrelease: # The next version of a development release is the same version without the development component. if final: pre = "" elif not pre and version.pre is not None: pre = f"{version.pre[0]}{version.pre[1]}" elif version.is_postrelease: # The next version of a post release is the next pre-release *or* micro release component. if final: pre = "" elif not pre and version.pre is not None: pre = f"{version.pre[0]}{version.pre[1] + 1}" if version.pre is None: micro = version.micro + 1 else: raise ApplicationError(f"Version {version} is not a development or post release version.") version = f"{version.major}.{version.minor}.{micro}{pre}" return get_ansible_version(version, mode=mode)
Verify the requested version is valid for the current version.
def check_ansible_version(current_version: Version, requested_version: Version) -> None: """Verify the requested version is valid for the current version.""" if requested_version.release[:2] != current_version.release[:2]: raise ApplicationError(f"Version {requested_version} does not match the major and minor portion of the current version: {current_version}") if requested_version < current_version: raise ApplicationError(f"Version {requested_version} is older than the current version: {current_version}")
Set the current ansible-core version.
def set_ansible_version(current_version: Version, requested_version: Version) -> None: """Set the current ansible-core version.""" check_ansible_version(current_version, requested_version) if requested_version == current_version: return display.show(f"Updating version {current_version} to {requested_version} ...") current = ANSIBLE_RELEASE_FILE.read_text() updated = ANSIBLE_VERSION_PATTERN.sub(ANSIBLE_VERSION_FORMAT.format(version=requested_version), current) if current == updated: raise RuntimeError("Failed to set the ansible-core version.") ANSIBLE_RELEASE_FILE.write_text(updated)
Read the specified sdist and write out a new copy with uniform file metadata at the specified location.
def create_reproducible_sdist(original_path: pathlib.Path, output_path: pathlib.Path, mtime: int) -> None: """Read the specified sdist and write out a new copy with uniform file metadata at the specified location.""" with tarfile.open(original_path) as original_archive: with tempfile.TemporaryDirectory() as temp_dir: tar_file = pathlib.Path(temp_dir) / "sdist.tar" with tarfile.open(tar_file, mode="w") as tar_archive: for original_info in original_archive.getmembers(): # type: tarfile.TarInfo tar_archive.addfile(create_reproducible_tar_info(original_info, mtime), original_archive.extractfile(original_info)) with tar_file.open("rb") as tar_archive: with gzip.GzipFile(output_path, "wb", mtime=mtime) as output_archive: shutil.copyfileobj(tar_archive, output_archive)
Return a copy of the given TarInfo with uniform file metadata.
def create_reproducible_tar_info(original: tarfile.TarInfo, mtime: int) -> tarfile.TarInfo: """Return a copy of the given TarInfo with uniform file metadata.""" sanitized = tarfile.TarInfo() sanitized.name = original.name sanitized.size = original.size sanitized.mtime = mtime sanitized.mode = (original.mode & ~(stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)) | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR sanitized.type = original.type sanitized.linkname = original.linkname sanitized.uid = 0 sanitized.gid = 0 sanitized.uname = "root" sanitized.gname = "root" if original.mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH): sanitized.mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH return sanitized
Test the specified built artifact by installing it in a venv and running some basic commands.
def test_built_artifact(path: pathlib.Path) -> None: """Test the specified built artifact by installing it in a venv and running some basic commands.""" with tempfile.TemporaryDirectory() as temp_dir_name: temp_dir = pathlib.Path(temp_dir_name) venv_dir = temp_dir / "venv" venv_bin_dir = venv_dir / "bin" venv.create(venv_dir, with_pip=True) env = os.environ.copy() env.pop("PYTHONPATH", None) # avoid interference from ansible being injected into the environment env.update( PATH=os.pathsep.join((str(venv_bin_dir), env["PATH"])), ) run("pip", "install", path, env=env | PIP_ENV, cwd=CHECKOUT_DIR) run("ansible", "--version", env=env, cwd=CHECKOUT_DIR) run("ansible-test", "--version", env=env, cwd=CHECKOUT_DIR)
Return the path to the sdist file.
def get_sdist_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path: """Return the path to the sdist file.""" return dist_dir / f"ansible_core-{version}.tar.gz"
Return the path to the wheel file.
def get_wheel_path(version: Version, dist_dir: pathlib.Path = DIST_DIR) -> pathlib.Path: """Return the path to the wheel file.""" return dist_dir / f"ansible_core-{version}-py3-none-any.whl"
Return the digest for the specified file.
def calculate_digest(path: pathlib.Path) -> str: """Return the digest for the specified file.""" # TODO: use hashlib.file_digest once Python 3.11 is the minimum supported version return hashlib.new(DIGEST_ALGORITHM, path.read_bytes()).hexdigest()
Return information about the release artifacts hosted on PyPI.
def get_release_artifact_details(repository: str, version: Version, validate: bool) -> list[ReleaseArtifact]: """Return information about the release artifacts hosted on PyPI.""" endpoint = PYPI_ENDPOINTS[repository] url = f"{endpoint}/ansible-core/{version}/json" opener = urllib.request.build_opener() response: http.client.HTTPResponse try: with opener.open(url) as response: data = json.load(response) except urllib.error.HTTPError as ex: if ex.status == http.HTTPStatus.NOT_FOUND: raise ApplicationError(f"Version {version} not found on PyPI.") from None raise RuntimeError(f"Failed to get {version} from PyPI: {ex}") from ex artifacts = [describe_release_artifact(version, item, validate) for item in data["urls"]] expected_artifact_types = {"bdist_wheel", "sdist"} found_artifact_types = set(artifact.package_type for artifact in artifacts) if found_artifact_types != expected_artifact_types: raise RuntimeError(f"Expected {expected_artifact_types} artifact types, but found {found_artifact_types} instead.") return artifacts
Return release artifact details extracted from the given PyPI data.
def describe_release_artifact(version: Version, item: dict[str, t.Any], validate: bool) -> ReleaseArtifact: """Return release artifact details extracted from the given PyPI data.""" package_type = item["packagetype"] # The artifact URL is documented as stable, so is safe to put in release notes and announcements. # See: https://github.com/pypi/warehouse/blame/c95be4a1055f4b36a8852715eb80318c81fc00ca/docs/api-reference/integration-guide.rst#L86-L90 url = item["url"] pypi_size = item["size"] pypi_digest = item["digests"][DIGEST_ALGORITHM] if package_type == "bdist_wheel": local_artifact_file = get_wheel_path(version) package_label = "Built Distribution" elif package_type == "sdist": local_artifact_file = get_sdist_path(version) package_label = "Source Distribution" else: raise NotImplementedError(f"Package type '{package_type}' is not supported.") if validate: try: local_size = local_artifact_file.stat().st_size local_digest = calculate_digest(local_artifact_file) except FileNotFoundError: raise ApplicationError(f"Missing local artifact: {local_artifact_file.relative_to(CHECKOUT_DIR)}") from None if local_size != pypi_size: raise ApplicationError(f"The {version} local {package_type} size {local_size} does not match the PyPI size {pypi_size}.") if local_digest != pypi_digest: raise ApplicationError(f"The {version} local {package_type} digest '{local_digest}' does not match the PyPI digest '{pypi_digest}'.") return ReleaseArtifact( package_type=package_type, package_label=package_label, url=url, size=pypi_size, digest=pypi_digest, digest_algorithm=DIGEST_ALGORITHM.upper(), )
Return the next release date.
def get_next_release_date(start: datetime.date, step: int, after: datetime.date) -> datetime.date: """Return the next release date.""" if start > after: raise ValueError(f"{start=} is greater than {after=}") current_delta = after - start release_delta = datetime.timedelta(days=(math.floor(current_delta.days / step) + 1) * step) release = start + release_delta return release
Create and return a jinja2 environment.
def create_template_environment() -> jinja2.Environment: """Create and return a jinja2 environment.""" env = jinja2.Environment() env.filters.update( basename=os.path.basename, ) return env
Create and return GitHub release notes.
def create_github_release_notes(upstream: Remote, repository: str, version: Version, validate: bool) -> str: """Create and return GitHub release notes.""" env = create_template_environment() template = env.from_string(GITHUB_RELEASE_NOTES_TEMPLATE) variables = dict( version=version, releases=get_release_artifact_details(repository, version, validate), changelog=f"https://github.com/{upstream.user}/{upstream.repo}/blob/v{version}/changelogs/CHANGELOG-v{version.major}.{version.minor}.rst", ) release_notes = template.render(**variables).strip() return release_notes
Create and return a release announcement message.
def create_release_announcement(upstream: Remote, repository: str, version: Version, validate: bool) -> ReleaseAnnouncement: """Create and return a release announcement message.""" env = create_template_environment() subject_template = env.from_string(RELEASE_ANNOUNCEMENT_SUBJECT_TEMPLATE) body_template = env.from_string(RELEASE_ANNOUNCEMENT_BODY_TEMPLATE) today = datetime.datetime.now(tz=datetime.timezone.utc).date() variables = dict( version=version, info=dict( name="ansible-core", short=f"{version.major}.{version.minor}", releases=get_release_artifact_details(repository, version, validate), ), next_rc=get_next_release_date(datetime.date(2021, 8, 9), 28, today), next_ga=get_next_release_date(datetime.date(2021, 8, 16), 28, today), rc=version.pre and version.pre[0] == "rc", beta=version.pre and version.pre[0] == "b", alpha=version.pre and version.pre[0] == "a", major=version.micro == 0, upstream=upstream, ) if version.pre and version.pre[0] in ("a", "b"): display.warning("The release announcement template does not populate the date for the next release.") subject = subject_template.render(**variables).strip() body = body_template.render(**variables).strip() message = ReleaseAnnouncement( subject=subject, body=body, ) return message
Show instructions for the release process.
def instructions() -> None: """Show instructions for the release process.""" message = """ Releases must be performed using an up-to-date checkout of a fork of the Ansible repository. 1. Make sure your checkout is up-to-date. 2. Run the `prepare` command [1], then: a. Submit the PR opened in the browser. b. Wait for CI to pass. c. Merge the PR. 3. Update your checkout to include the commit from the PR which was just merged. 4. Run the `complete` command [2], then: a. Submit the GitHub release opened in the browser. b. Submit the PR opened in the browser. c. Send the release announcement opened in your browser. d. Wait for CI to pass. e. Merge the PR. [1] Use the `--final`, `--pre` or `--version` option for control over the version. [2] During the `publish` step, `twine` may prompt for credentials. """ display.show(message.strip())
Show the current and next ansible-core version.
def show_version(final: bool = False, pre: str | None = None) -> None: """Show the current and next ansible-core version.""" current_version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST) display.show(f"Current version: {current_version}") try: next_version = get_next_version(current_version, final=final, pre=pre) except ApplicationError as ex: display.show(f" Next version: Unknown - {ex}") else: display.show(f" Next version: {next_version}") check_ansible_version(current_version, next_version)
Verify the git repository is in a usable state for creating a pull request.
def check_state(allow_stale: bool = False) -> None: """Verify the git repository is in a usable state for creating a pull request.""" get_git_state(get_ansible_version(), allow_stale)
Prepare a release.
def prepare(final: bool = False, pre: str | None = None, version: str | None = None) -> None: """Prepare a release.""" command.run( update_version, check_state, generate_summary, generate_changelog, create_release_pr, )
Update the version embedded in the source code.
def update_version(final: bool = False, pre: str | None = None, version: str | None = None) -> None: """Update the version embedded in the source code.""" current_version = get_ansible_version(mode=VersionMode.REQUIRE_DEV_POST) if version: requested_version = get_ansible_version(version) else: requested_version = get_next_version(current_version, final=final, pre=pre) set_ansible_version(current_version, requested_version)
Generate a summary changelog fragment for this release.
def generate_summary() -> None: """Generate a summary changelog fragment for this release.""" version = get_ansible_version() release_date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d") summary_path = CHANGELOGS_FRAGMENTS_DIR / f"{version}_summary.yaml" major_minor = f"{version.major}.{version.minor}" content = f""" release_summary: | | Release Date: {release_date} | `Porting Guide <https://docs.ansible.com/ansible-core/{major_minor}/porting_guides/porting_guide_core_{major_minor}.html>`__ """ summary_path.write_text(content.lstrip())
Generate the changelog and validate the results.
def generate_changelog() -> None: """Generate the changelog and validate the results.""" env = ensure_venv() env.update( PATH=os.pathsep.join((str(ANSIBLE_BIN_DIR), env["PATH"])), PYTHONPATH=ANSIBLE_LIB_DIR, ) # TODO: consider switching back to the original changelog generator instead of using antsibull-changelog run("antsibull-changelog", "release", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR) run("antsibull-changelog", "generate", "-vv", "--use-ansible-doc", env=env, cwd=CHECKOUT_DIR) run("ansible-test", "sanity", CHANGELOGS_DIR, ANSIBLE_RELEASE_FILE, env=env, cwd=CHECKOUT_DIR)
Create a branch and open a browser tab for creating a release pull request.
def create_release_pr(allow_stale: bool = False) -> None: """Create a branch and open a browser tab for creating a release pull request.""" version = get_ansible_version() pr = prepare_pull_request( version=version, branch=f"release-{version}-{secrets.token_hex(4)}", title=f"New release v{version}", add=( CHANGELOGS_DIR, ANSIBLE_RELEASE_FILE, ), allow_stale=allow_stale, ) create_pull_request(pr)
Complete a release after the prepared changes have been merged.
def complete(repository: str, mailto: bool = True, allow_dirty: bool = False) -> None: """Complete a release after the prepared changes have been merged.""" command.run( check_state, build, test, publish, tag_release, post_version, create_post_pr, release_announcement, )
Build the sdist and wheel.
def build(allow_dirty: bool = False) -> None: """Build the sdist and wheel.""" version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST) env = ensure_venv() dirty = git("status", "--porcelain", "--untracked-files=all", capture_output=True).stdout.strip().splitlines() if dirty: with suppress_when(allow_dirty): raise ApplicationError(f"There are {len(dirty)} files which are untracked and/or have changes, which will be omitted from the build.") sdist_file = get_sdist_path(version) wheel_file = get_wheel_path(version) with tempfile.TemporaryDirectory(dir=DIST_DIR, prefix=f"build-{version}-", suffix=".tmp") as temp_dir_name: temp_dir = pathlib.Path(temp_dir_name) dist_dir = temp_dir / "dist" commit_time = int(git("show", "-s", "--format=%ct", capture_output=True).stdout) env.update( SOURCE_DATE_EPOCH=commit_time, ) git("worktree", "add", "-d", temp_dir) try: run("python", "-m", "build", env=env, cwd=temp_dir) create_reproducible_sdist(get_sdist_path(version, dist_dir), sdist_file, commit_time) get_wheel_path(version, dist_dir).rename(wheel_file) finally: git("worktree", "remove", temp_dir)
Test the sdist and wheel.
def test() -> None: """Test the sdist and wheel.""" command.run( test_sdist, test_wheel, )
Test the sdist.
def test_sdist() -> None: """Test the sdist.""" version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST) sdist_file = get_sdist_path(version) with tempfile.TemporaryDirectory() as temp_dir_name: temp_dir = pathlib.Path(temp_dir_name) with contextlib.ExitStack() as stack: try: sdist = stack.enter_context(tarfile.open(sdist_file)) except FileNotFoundError: raise ApplicationError(f"Missing sdist: {sdist_file.relative_to(CHECKOUT_DIR)}") from None # deprecated: description='extractall fallback without filter' python_version='3.11' if hasattr(tarfile, 'data_filter'): sdist.extractall(temp_dir, filter='data') # type: ignore[call-arg] else: sdist.extractall(temp_dir) pyc_glob = "*.pyc*" pyc_files = sorted(path.relative_to(temp_dir) for path in temp_dir.rglob(pyc_glob)) if pyc_files: raise ApplicationError(f"Found {len(pyc_files)} '{pyc_glob}' file(s): {', '.join(map(str, pyc_files))}") test_built_artifact(sdist_file)
Test the wheel.
def test_wheel() -> None: """Test the wheel.""" version = get_ansible_version(mode=VersionMode.ALLOW_DEV_POST) wheel_file = get_wheel_path(version) with tempfile.TemporaryDirectory() as temp_dir_name: temp_dir = pathlib.Path(temp_dir_name) with contextlib.ExitStack() as stack: try: wheel = stack.enter_context(zipfile.ZipFile(wheel_file)) except FileNotFoundError: raise ApplicationError(f"Missing wheel for version {version}: {wheel_file}") from None wheel.extractall(temp_dir) test_built_artifact(wheel_file)
Publish to PyPI.
def publish(repository: str, prompt: bool = True) -> None: """Publish to PyPI.""" version = get_ansible_version() sdist_file = get_sdist_path(version) wheel_file = get_wheel_path(version) env = ensure_venv() if prompt: try: while input(f"Do you want to publish {version} to the '{repository}' repository?\nEnter the repository name to confirm: ") != repository: pass except KeyboardInterrupt: display.show("") raise ApplicationError("Publishing was aborted by the user.") from None run("twine", "upload", "-r", repository, sdist_file, wheel_file, env=env, cwd=CHECKOUT_DIR)
Create a GitHub release using the current or specified commit.
def tag_release(repository: str, commit: str | None = None, validate: bool = True, allow_tag: bool = False) -> None: """Create a GitHub release using the current or specified commit.""" upstream = get_remotes().upstream if commit: git("fetch", upstream.name) # fetch upstream to make sure the commit can be found commit = get_commit(commit) version = get_ansible_version(commit=commit) tag = f"v{version}" if upstream_tag := git("ls-remote", "--tags", upstream.name, tag, capture_output=True).stdout.strip(): with suppress_when(allow_tag): raise ApplicationError(f"Version {version} has already been tagged: {upstream_tag}") upstream_branch = get_upstream_branch(version) upstream_refs = git("branch", "-r", "--format=%(refname)", "--contains", commit, capture_output=True).stdout.strip().splitlines() upstream_ref = f"refs/remotes/{upstream.name}/{upstream_branch}" if upstream_ref not in upstream_refs: raise ApplicationError(f"Commit {upstream_ref} not found. Found {len(upstream_refs)} upstream ref(s): {', '.join(upstream_refs)}") body = create_github_release_notes(upstream, repository, version, validate) release = GitHubRelease( user=upstream.user, repo=upstream.repo, target=commit, tag=tag, title=tag, body=body, pre_release=version.pre is not None, ) create_github_release(release)
Set the post release version.
def post_version() -> None: """Set the post release version.""" current_version = get_ansible_version() requested_version = get_ansible_version(f"{current_version}.post0", mode=VersionMode.REQUIRE_POST) set_ansible_version(current_version, requested_version)
Create a branch and open a browser tab for creating a post release pull request.
def create_post_pr(allow_stale: bool = False) -> None: """Create a branch and open a browser tab for creating a post release pull request.""" version = get_ansible_version(mode=VersionMode.REQUIRE_POST) pr = prepare_pull_request( version=version, branch=f"release-{version}-{secrets.token_hex(4)}", title=f"Update Ansible release version to v{version}.", add=(ANSIBLE_RELEASE_FILE,), allow_stale=allow_stale, ) create_pull_request(pr)
Generate a release announcement for the current or specified version.
def release_announcement(repository: str, version: str | None = None, mailto: bool = True, validate: bool = True) -> None: """Generate a release announcement for the current or specified version.""" parsed_version = get_ansible_version(version, mode=VersionMode.STRIP_POST) upstream = get_remotes().upstream message = create_release_announcement(upstream, repository, parsed_version, validate) recipient_list = PRE_RELEASE_ANNOUNCEMENT_RECIPIENTS if parsed_version.is_prerelease else FINAL_RELEASE_ANNOUNCEMENT_RECIPIENTS recipients = ", ".join(recipient_list) if mailto: to = urllib.parse.quote(recipients) params = dict( subject=message.subject, body=message.body, ) query_string = urllib.parse.urlencode(params) url = f"mailto:{to}?{query_string}" display.show("Opening email client through default web browser ...") webbrowser.open(url) else: print(f"TO: {recipients}") print(f"SUBJECT: {message.subject}") print() print(message.body)
Main program entry point.
def main() -> None: """Main program entry point.""" parser = argparse.ArgumentParser(description=__doc__) subparsers = parser.add_subparsers(required=True, metavar='command') man_parser = subparsers.add_parser('man', description=build_man.__doc__, help=build_man.__doc__) man_parser.add_argument('--output-dir', required=True, type=pathlib.Path, metavar='DIR', help='output directory') man_parser.add_argument('--template-file', default=SCRIPT_DIR / 'man.j2', type=pathlib.Path, metavar='FILE', help='template file') man_parser.set_defaults(func=build_man) rst_parser = subparsers.add_parser('rst', description=build_rst.__doc__, help=build_rst.__doc__) rst_parser.add_argument('--output-dir', required=True, type=pathlib.Path, metavar='DIR', help='output directory') rst_parser.add_argument('--template-file', default=SCRIPT_DIR / 'rst.j2', type=pathlib.Path, metavar='FILE', help='template file') rst_parser.set_defaults(func=build_rst) json_parser = subparsers.add_parser('json', description=build_json.__doc__, help=build_json.__doc__) json_parser.add_argument('--output-file', required=True, type=pathlib.Path, metavar='FILE', help='output file') json_parser.set_defaults(func=build_json) try: # noinspection PyUnresolvedReferences import argcomplete except ImportError: pass else: argcomplete.autocomplete(parser) args = parser.parse_args() kwargs = {name: getattr(args, name) for name in inspect.signature(args.func).parameters} sys.path.insert(0, str(SOURCE_DIR / 'lib')) args.func(**kwargs)
Build man pages for ansible-core CLI programs.
def build_man(output_dir: pathlib.Path, template_file: pathlib.Path) -> None: """Build man pages for ansible-core CLI programs.""" if not template_file.resolve().is_relative_to(SCRIPT_DIR): warnings.warn("Custom templates are intended for debugging purposes only. The data model may change in future releases without notice.") import docutils.core import docutils.writers.manpage output_dir.mkdir(exist_ok=True, parents=True) for cli_name, source in generate_rst(template_file).items(): with io.StringIO(source) as source_file: docutils.core.publish_file( source=source_file, destination_path=output_dir / f'{cli_name}.1', writer=docutils.writers.manpage.Writer(), )
Build RST documentation for ansible-core CLI programs.
def build_rst(output_dir: pathlib.Path, template_file: pathlib.Path) -> None: """Build RST documentation for ansible-core CLI programs.""" if not template_file.resolve().is_relative_to(SCRIPT_DIR): warnings.warn("Custom templates are intended for debugging purposes only. The data model may change in future releases without notice.") output_dir.mkdir(exist_ok=True, parents=True) for cli_name, source in generate_rst(template_file).items(): (output_dir / f'{cli_name}.rst').write_text(source)