code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def load_config(): """Load config file.""" parent_dir = Path(__file__).resolve().parent config_file = yaml.safe_load((parent_dir / "config.yaml").read_text()) config = ObjDict(config_file["smarts"]) return config
Load config file.
load_config
python
huawei-noah/SMARTS
examples/e11_platoon/train/run.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e11_platoon/train/run.py
MIT
def empty_parser(program: Optional[str] = None): """This factory method returns an empty `argparse.ArgumentParser` with primitive configuration. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`. """ if not program: from pathlib import Path program = Path(__file__).stem parser = argparse.ArgumentParser(program) return parser
This factory method returns an empty `argparse.ArgumentParser` with primitive configuration. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`.
empty_parser
python
huawei-noah/SMARTS
examples/tools/argument_parser.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/tools/argument_parser.py
MIT
def minimal_argument_parser(program: Optional[str] = None): """This factory method returns a minimal `argparse.ArgumentParser` with the minimum subset of arguments that should be supported. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`. """ if not program: from pathlib import Path program = Path(__file__).stem parser = argparse.ArgumentParser(program) parser.add_argument( "scenarios", help="A list of scenarios. Each element can be either the scenario to" "run or a directory of scenarios to sample from. See `scenarios/`" "folder for some samples you can use.", type=str, nargs="*", ) parser.add_argument( "--episodes", help="The number of episodes to run the simulation for.", type=int, default=10, ) parser.add_argument( "--headless", help="Run the simulation in headless mode.", action="store_true" ) parser.add_argument( "--max_episode_steps", help="Maximum number of steps to run each episode for.", type=int, default=100, ) return parser
This factory method returns a minimal `argparse.ArgumentParser` with the minimum subset of arguments that should be supported. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`.
minimal_argument_parser
python
huawei-noah/SMARTS
examples/tools/argument_parser.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/tools/argument_parser.py
MIT
def default_argument_parser(program: Optional[str] = None): """This factory method returns a vanilla `argparse.ArgumentParser` with a slightly broader subset of arguments that should be supported. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`. """ parser = minimal_argument_parser(program=program) parser.add_argument("--seed", type=int, default=42) parser.add_argument( "--sim_name", help="Simulation name.", type=str, default=None, ) parser.add_argument( "--sumo_port", help="Run SUMO with a specified port.", type=int, default=None ) return parser
This factory method returns a vanilla `argparse.ArgumentParser` with a slightly broader subset of arguments that should be supported. You can extend it with more `parser.add_argument(...)` calls or obtain the arguments via `parser.parse_args()`.
default_argument_parser
python
huawei-noah/SMARTS
examples/tools/argument_parser.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/tools/argument_parser.py
MIT
def format(self, action: int, prev_heading: float): """Adapts the action input to the wrapped environment. Note: Users should not directly call this method. """ wrapped_act = self._wrapper(action, prev_heading) return wrapped_act
Adapts the action input to the wrapped environment. Note: Users should not directly call this method.
format
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/format_action.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/format_action.py
MIT
def filter(self, obs: Dict[str, Any]) -> Dict[str, Any]: """Adapts the environment's observation.""" # fmt: off # Ego's heading with respect to the map's coordinate system. # Note: All angles returned by smarts is with respect to the map's coordinate system. # On the map, angle is zero at positive y axis, and increases anti-clockwise. ego_heading = (obs["ego_vehicle_state"]["heading"] + np.pi) % (2 * np.pi) - np.pi ego_pos = obs["ego_vehicle_state"]["position"] # Get rgb image, remove road, and replace other egos (if any) as background vehicles rgb = obs["top_down_rgb"] h, w, _ = rgb.shape rgb_noroad = replace_rgb_image_color(rgb=rgb, old_color=[self._road_color, self._lane_divider_color, self._edge_divider_color], new_color=self._no_color) rgb_ego = replace_rgb_image_color(rgb=rgb_noroad, old_color=[self._ego_color], new_color=self._traffic_color, mask=self._rgb_mask) # Superimpose waypoints onto rgb image wps = obs["waypoint_paths"]["position"][0:11, 3:, 0:3] for path in wps[:]: wps_valid = points_to_pixels( points=path, center_position=ego_pos, heading=ego_heading, width=w, height=h, resolution=self._res, ) for point in wps_valid: img_x, img_y = point[0], point[1] if all(rgb_ego[img_y, img_x, :] == self._no_color): rgb_ego[img_y, img_x, :] = self._wps_color # Superimpose goal position onto rgb image if not all((goal:=obs["ego_vehicle_state"]["mission"]["goal_position"]) == np.zeros((3,))): goal_pixel = points_to_pixels( points=np.expand_dims(goal,axis=0), center_position=ego_pos, heading=ego_heading, width=w, height=h, resolution=self._res, ) if len(goal_pixel) != 0: img_x, img_y = goal_pixel[0][0], goal_pixel[0][1] if all(rgb_ego[img_y, img_x, :] == self._no_color) or all(rgb_ego[img_y, img_x, :] == self._wps_color): rgb_ego[ max(img_y-self._blur_radius,0):min(img_y+self._blur_radius,h), max(img_x-self._blur_radius,0):min(img_x+self._blur_radius,w), :, ] = self._goal_color # Crop image rgb_ego = rgb_ego[self._crop[2]:h-self._crop[3],self._crop[0]:w-self._crop[1],:] # Channel first rgb rgb_ego = rgb_ego.transpose(2, 0, 1) filtered_obs = np.uint8(rgb_ego) return filtered_obs
Adapts the environment's observation.
filter
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/filter_obs.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/filter_obs.py
MIT
def __init__(self, num_stack, top_down_rgb, crop, action_space_type): """All policy initialization matters, including loading of model, is performed here. To be implemented by the user. """ from contrib_policy import network from contrib_policy.filter_obs import FilterObs from contrib_policy.format_action import FormatAction from contrib_policy.frame_stack import FrameStack from contrib_policy.make_dict import MakeDict self._model = self._get_model() self._filter_obs = FilterObs(top_down_rgb=top_down_rgb, crop=crop) self._frame_stack = FrameStack( input_space=self._filter_obs.observation_space, num_stack=num_stack, stack_axis=0, ) self._frame_stack.reset() self._make_dict = MakeDict(input_space=self._frame_stack.observation_space) self.observation_space = self._make_dict.observation_space self._format_action = FormatAction(action_space_type=action_space_type) self.action_space = self._format_action.action_space
All policy initialization matters, including loading of model, is performed here. To be implemented by the user.
__init__
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/policy.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/policy.py
MIT
def act(self, obs): """Mandatory act function to be implemented by user.""" processed_obs = self._process(obs) action, _ = self._model.predict(observation=processed_obs, deterministic=True) formatted_action = self._format_action.format( action=int(action), prev_heading=obs["ego_vehicle_state"]["heading"] ) return formatted_action
Mandatory act function to be implemented by user.
act
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/policy.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/policy.py
MIT
def __init__(self, input_space: gym.Space, num_stack: int = 3, stack_axis: int = 0): """ Args: num_stack (int, optional): Number of frames to be stacked. Defaults to 3. stack_axis (int, optional): An int specifying the dimension over which to stack each observation. """ assert num_stack > 1, f"Expected num_stack > 1, but got {num_stack}." self._num_stack = num_stack self._frames = deque(maxlen=self._num_stack) assert stack_axis >= 0 and stack_axis < len(input_space.shape) self._stack_axis = stack_axis dim_multiplier = np.ones_like(input_space.shape) dim_multiplier[stack_axis] = num_stack shape = dim_multiplier * input_space.shape self.observation_space = gym.spaces.Box( low=0, high=255, shape=shape, dtype=np.uint8, )
Args: num_stack (int, optional): Number of frames to be stacked. Defaults to 3. stack_axis (int, optional): An int specifying the dimension over which to stack each observation.
__init__
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/frame_stack.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/frame_stack.py
MIT
def _stack(self, obs: np.ndarray) -> np.ndarray: """Update and return frames stack with given latest single frame.""" self._frames.appendleft(obs) while len(self._frames) < self._num_stack: self._frames.appendleft(obs) frames_seq = tuple(self._frames) new_frames = copy.deepcopy(frames_seq) return np.concatenate(new_frames, axis=self._stack_axis)
Update and return frames stack with given latest single frame.
_stack
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/frame_stack.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/frame_stack.py
MIT
def stack(self, obs: np.ndarray) -> np.ndarray: """Stacks the latest obs with num_stack-1 past obs. Args: obs (np.ndarray): Numpy array input. Returns: np.ndarray: Stacked observation. """ return self._stack(obs)
Stacks the latest obs with num_stack-1 past obs. Args: obs (np.ndarray): Numpy array input. Returns: np.ndarray: Stacked observation.
stack
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/frame_stack.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/frame_stack.py
MIT
def reset(self): """Resets the stacked obs.""" self._frames.clear()
Resets the stacked obs.
reset
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/frame_stack.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/frame_stack.py
MIT
def make(self, obs: np.ndarray) -> Dict[str, np.ndarray]: """converts input into a dict. Args: obs (np.ndarray): Numpy array input. Returns: dict[str, np.ndarray]: A dictionary with key (string) corresponding to input obs (np.ndarray). """ return {"rgb": obs}
converts input into a dict. Args: obs (np.ndarray): Numpy array input. Returns: dict[str, np.ndarray]: A dictionary with key (string) corresponding to input obs (np.ndarray).
make
python
huawei-noah/SMARTS
examples/e10_drive/inference/contrib_policy/make_dict.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/inference/contrib_policy/make_dict.py
MIT
def __init__(self, env: gym.Env, crop: Tuple[int, int, int, int]): """Constructor for the Reward wrapper.""" super().__init__(env) self._total_dist = {} self._crop = crop
Constructor for the Reward wrapper.
__init__
python
huawei-noah/SMARTS
examples/e10_drive/train/reward.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/train/reward.py
MIT
def step(self, action): """Adapts the wrapped environment's step. Note: Users should not directly call this method. """ obs, reward, terminated, truncated, info = self.env.step(action) wrapped_reward = self._reward(obs, reward) for agent_id, agent_obs in obs.items(): # Accumulate total distance travelled self._total_dist[agent_id] = ( self._total_dist.get(agent_id, 0) + agent_obs["distance_travelled"] ) # If agent is done if terminated[agent_id] == True: if agent_obs["events"]["reached_goal"]: print(f"{agent_id}: Hooray! Reached goal.") elif agent_obs["events"]["reached_max_episode_steps"]: print(f"{agent_id}: Reached max episode steps.") elif ( agent_obs["events"]["collisions"] | agent_obs["events"]["off_road"] | agent_obs["events"]["off_route"] | agent_obs["events"]["wrong_way"] ): pass else: print("Events: ", agent_obs["events"]) raise Exception("Episode ended for unknown reason.") print( f"{agent_id}: Steps = {agent_obs['steps_completed']} " f"{agent_id}: Dist = {self._total_dist[agent_id]:.2f}" ) return obs, wrapped_reward, terminated, truncated, info
Adapts the wrapped environment's step. Note: Users should not directly call this method.
step
python
huawei-noah/SMARTS
examples/e10_drive/train/reward.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/train/reward.py
MIT
def step(self, action): """Uses the :meth:`step` of the :attr:`env` that can be overwritten to change the returned data.""" formatted_action = self._format_action.format( action=action, prev_heading=self._prev_heading ) obs, reward, terminated, truncated, info = self.env.step(formatted_action) self._prev_heading = obs["ego_vehicle_state"]["heading"] obs = self._process(obs) return obs, reward, terminated, truncated, info
Uses the :meth:`step` of the :attr:`env` that can be overwritten to change the returned data.
step
python
huawei-noah/SMARTS
examples/e10_drive/train/preprocess.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/train/preprocess.py
MIT
def reset(self, *, seed=None, options=None): """Uses the :meth:`reset` of the :attr:`env` that can be overwritten to change the returned data.""" self._frame_stack.reset() obs, info = self.env.reset(seed=seed, options=options) self._prev_heading = obs["ego_vehicle_state"]["heading"] obs = self._process(obs) return obs, info
Uses the :meth:`reset` of the :attr:`env` that can be overwritten to change the returned data.
reset
python
huawei-noah/SMARTS
examples/e10_drive/train/preprocess.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/train/preprocess.py
MIT
def load_config(): """Load config file.""" parent_dir = Path(__file__).resolve().parent config_file = yaml.safe_load((parent_dir / "config.yaml").read_text()) config = ObjDict(config_file["smarts"]) return config
Load config file.
load_config
python
huawei-noah/SMARTS
examples/e10_drive/train/run.py
https://github.com/huawei-noah/SMARTS/blob/master/examples/e10_drive/train/run.py
MIT
def mlp_permutation_spec(num_hidden_layers: int) -> PermutationSpec: """We assume that one permutation cannot appear in two axes of the same weight array.""" assert num_hidden_layers >= 1 return permutation_spec_from_axes_to_perm({ "Dense_0/kernel": (None, "P_0"), **{f"Dense_{i}/kernel": (f"P_{i-1}", f"P_{i}") for i in range(1, num_hidden_layers)}, **{f"Dense_{i}/bias": (f"P_{i}", ) for i in range(num_hidden_layers)}, f"Dense_{num_hidden_layers}/kernel": (f"P_{num_hidden_layers-1}", None), f"Dense_{num_hidden_layers}/bias": (None, ), })
We assume that one permutation cannot appear in two axes of the same weight array.
mlp_permutation_spec
python
samuela/git-re-basin
src/weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/weight_matching.py
MIT
def get_permuted_param(ps: PermutationSpec, perm, k: str, params, except_axis=None): """Get parameter `k` from `params`, with the permutations applied.""" w = params[k] for axis, p in enumerate(ps.axes_to_perm[k]): # Skip the axis we're trying to permute. if axis == except_axis: continue # None indicates that there is no permutation relevant to that axis. if p is not None: w = jnp.take(w, perm[p], axis=axis) return w
Get parameter `k` from `params`, with the permutations applied.
get_permuted_param
python
samuela/git-re-basin
src/weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/weight_matching.py
MIT
def apply_permutation(ps: PermutationSpec, perm, params): """Apply a `perm` to `params`.""" return {k: get_permuted_param(ps, perm, k, params) for k in params.keys()}
Apply a `perm` to `params`.
apply_permutation
python
samuela/git-re-basin
src/weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/weight_matching.py
MIT
def weight_matching(rng, ps: PermutationSpec, params_a, params_b, max_iter=100, init_perm=None, silent=False): """Find a permutation of `params_b` to make them match `params_a`.""" perm_sizes = {p: params_a[axes[0][0]].shape[axes[0][1]] for p, axes in ps.perm_to_axes.items()} perm = {p: jnp.arange(n) for p, n in perm_sizes.items()} if init_perm is None else init_perm perm_names = list(perm.keys()) for iteration in range(max_iter): progress = False for p_ix in random.permutation(rngmix(rng, iteration), len(perm_names)): p = perm_names[p_ix] n = perm_sizes[p] A = jnp.zeros((n, n)) for wk, axis in ps.perm_to_axes[p]: w_a = params_a[wk] w_b = get_permuted_param(ps, perm, wk, params_b, except_axis=axis) w_a = jnp.moveaxis(w_a, axis, 0).reshape((n, -1)) w_b = jnp.moveaxis(w_b, axis, 0).reshape((n, -1)) A += w_a @ w_b.T ri, ci = linear_sum_assignment(A, maximize=True) assert (ri == jnp.arange(len(ri))).all() oldL = jnp.vdot(A, jnp.eye(n)[perm[p]]) newL = jnp.vdot(A, jnp.eye(n)[ci, :]) if not silent: print(f"{iteration}/{p}: {newL - oldL}") progress = progress or newL > oldL + 1e-12 perm[p] = jnp.array(ci) if not progress: break return perm
Find a permutation of `params_b` to make them match `params_a`.
weight_matching
python
samuela/git-re-basin
src/weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/weight_matching.py
MIT
def test_weight_matching(): """If we just have a single hidden layer then it should converge after just one step.""" ps = mlp_permutation_spec(num_hidden_layers=1) rng = random.PRNGKey(123) num_hidden = 10 shapes = { "Dense_0/kernel": (2, num_hidden), "Dense_0/bias": (num_hidden, ), "Dense_1/kernel": (num_hidden, 3), "Dense_1/bias": (3, ) } params_a = {k: random.normal(rngmix(rng, f"a-{k}"), shape) for k, shape in shapes.items()} params_b = {k: random.normal(rngmix(rng, f"b-{k}"), shape) for k, shape in shapes.items()} perm = weight_matching(rng, ps, params_a, params_b) print(perm)
If we just have a single hidden layer then it should converge after just one step.
test_weight_matching
python
samuela/git-re-basin
src/weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/weight_matching.py
MIT
def load_datasets(): """Return the training and test datasets, unbatched.""" # See https://www.tensorflow.org/datasets/overview#as_batched_tftensor_batch_size-1. train_ds_images_u8, train_ds_labels = tfds.as_numpy( tfds.load("mnist", split="train", batch_size=-1, as_supervised=True)) test_ds_images_u8, test_ds_labels = tfds.as_numpy( tfds.load("mnist", split="test", batch_size=-1, as_supervised=True)) train_ds = {"images_u8": train_ds_images_u8, "labels": train_ds_labels} test_ds = {"images_u8": test_ds_images_u8, "labels": test_ds_labels} return train_ds, test_ds
Return the training and test datasets, unbatched.
load_datasets
python
samuela/git-re-basin
src/mnist_mlp_train.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_train.py
MIT
def match_filters(paramsA, paramsB): """Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permutation to apply to the weights of paramsB in order to align as best as possible with paramsA along with the permuted paramsB.""" paf = flatten_params(paramsA) pbf = flatten_params(paramsB) perm = {} pbf_new = {**pbf} num_layers = max(int(kmatch("Dense_*/**", k).group(1)) for k in paf.keys()) # range is [0, num_layers), so we're safe here since we don't want to be # reordering the output of the last layer. for layer in range(num_layers): # Maximize since we're dealing with similarities, not distances. # Note that it's critically important to use `pbf_new` here, not `pbf`! ri, ci = linear_sum_assignment(cosine_similarity(paf[f"Dense_{layer}/kernel"].T, pbf_new[f"Dense_{layer}/kernel"].T), maximize=True) assert (ri == jnp.arange(len(ri))).all() perm[f"Dense_{layer}"] = ci pbf_new[f"Dense_{layer}/kernel"] = pbf_new[f"Dense_{layer}/kernel"][:, ci] pbf_new[f"Dense_{layer}/bias"] = pbf_new[f"Dense_{layer}/bias"][ci] pbf_new[f"Dense_{layer+1}/kernel"] = pbf_new[f"Dense_{layer+1}/kernel"][ci, :] return perm, unflatten_params(pbf_new)
Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permutation to apply to the weights of paramsB in order to align as best as possible with paramsA along with the permuted paramsB.
match_filters
python
samuela/git-re-basin
src/mnist_mlp_cosine_similarity_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_cosine_similarity_matching.py
MIT
def get_intermediates(params, images_u8): """Calculate intermediate activations for all layers in flax's format.""" images_f32 = vmap(stuff["normalize_transform"])(None, images_u8) _, state = model.apply({"params": params}, images_f32, capture_intermediates=lambda mdl, _: isinstance(mdl, nn.Dense), mutable=["intermediates"]) return state["intermediates"]
Calculate intermediate activations for all layers in flax's format.
get_intermediates
python
samuela/git-re-basin
src/mnist_mlp_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_activation_matching.py
MIT
def normalize_activations(intermediates): """Simplify the activation dict format and flatten everything to be (batch_size, channels).""" def dense(i: int): k = f"Dense_{i}" # The activations are (batch_size, num_units) so we don't need to reshape. act = intermediates[k]["__call__"][0] act = nn.relu(act) return act return {f"Dense_{i}": dense(i) for i in range(num_mlp_layers)}
Simplify the activation dict format and flatten everything to be (batch_size, channels).
normalize_activations
python
samuela/git-re-basin
src/mnist_mlp_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_activation_matching.py
MIT
def conv_norm_conv(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" return { **pf, f"Conv_{l1}/kernel": pf[f"Conv_{l1}/kernel"][:, :, :, perm], f"Conv_{l1}/bias": pf[f"Conv_{l1}/bias"][perm], f"LayerNorm_{l1}/scale": pf[f"LayerNorm_{l1}/scale"][perm], f"LayerNorm_{l1}/bias": pf[f"LayerNorm_{l1}/bias"][perm], f"Conv_{l2}/kernel": pf[f"Conv_{l2}/kernel"][:, :, perm, :] }
Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.
vgg16_permutify.conv_norm_conv
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def dense_dense(pf, l1, l2, perm): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" return { **pf, f"Dense_{l1}/kernel": pf[f"Dense_{l1}/kernel"][:, perm], f"Dense_{l1}/bias": pf[f"Dense_{l1}/bias"][perm], f"Dense_{l2}/kernel": pf[f"Dense_{l2}/kernel"][perm, :] }
Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.
vgg16_permutify.dense_dense
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def conv_norm_flatten_dense(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.""" # Note that the flatten is kind of a no-op since the flatten is (batch, 1, 1, 512) -> (batch, 512) return { **pf, f"Conv_{l1}/kernel": pf[f"Conv_{l1}/kernel"][:, :, :, perm], f"Conv_{l1}/bias": pf[f"Conv_{l1}/bias"][perm], f"LayerNorm_{l1}/scale": pf[f"LayerNorm_{l1}/scale"][perm], f"LayerNorm_{l1}/bias": pf[f"LayerNorm_{l1}/bias"][perm], f"Dense_{l2}/kernel": pf[f"Dense_{l2}/kernel"][perm, :] }
Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.
vgg16_permutify.conv_norm_flatten_dense
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def vgg16_permutify(permutation, params): """Permute the parameters of `params` based on `permutation`.""" # conv kernel shape: (width, height, in_channel, out_channel) # dense kernel shape: (in, out) # VGG16: Conv0-Conv12 flatten Dense0-Dense2 def conv_norm_conv(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" return { **pf, f"Conv_{l1}/kernel": pf[f"Conv_{l1}/kernel"][:, :, :, perm], f"Conv_{l1}/bias": pf[f"Conv_{l1}/bias"][perm], f"LayerNorm_{l1}/scale": pf[f"LayerNorm_{l1}/scale"][perm], f"LayerNorm_{l1}/bias": pf[f"LayerNorm_{l1}/bias"][perm], f"Conv_{l2}/kernel": pf[f"Conv_{l2}/kernel"][:, :, perm, :] } def dense_dense(pf, l1, l2, perm): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" return { **pf, f"Dense_{l1}/kernel": pf[f"Dense_{l1}/kernel"][:, perm], f"Dense_{l1}/bias": pf[f"Dense_{l1}/bias"][perm], f"Dense_{l2}/kernel": pf[f"Dense_{l2}/kernel"][perm, :] } def conv_norm_flatten_dense(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.""" # Note that the flatten is kind of a no-op since the flatten is (batch, 1, 1, 512) -> (batch, 512) return { **pf, f"Conv_{l1}/kernel": pf[f"Conv_{l1}/kernel"][:, :, :, perm], f"Conv_{l1}/bias": pf[f"Conv_{l1}/bias"][perm], f"LayerNorm_{l1}/scale": pf[f"LayerNorm_{l1}/scale"][perm], f"LayerNorm_{l1}/bias": pf[f"LayerNorm_{l1}/bias"][perm], f"Dense_{l2}/kernel": pf[f"Dense_{l2}/kernel"][perm, :] } params_flat_new = {**flatten_params(params)} # Backbone conv layers for layer in range(12): params_flat_new = conv_norm_conv(params_flat_new, layer, layer + 1, permutation[f"Conv_{layer}"]) # Conv_12 flatten Dense_0 params_flat_new = conv_norm_flatten_dense(params_flat_new, 12, 0, permutation["Conv_12"]) # (Dense_0, Dense_1) and (Dense_1, Dense_2) params_flat_new = dense_dense(params_flat_new, 0, 1, permutation["Dense_0"]) params_flat_new = dense_dense(params_flat_new, 1, 2, permutation["Dense_1"]) return unflatten_params(params_flat_new)
Permute the parameters of `params` based on `permutation`.
vgg16_permutify
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def get_permuted_param(ps: PermutationSpec, perm, k: str, params, except_axis=None): """Get parameter `k` from `params`, with the permutations applied.""" w = params[k] for axis, p in enumerate(ps.axes_to_perm[k]): # Skip the axis we're trying to permute. if axis == except_axis: continue # None indicates that there is no permutation relevant to that axis. if p is not None: w = jnp.take(w, perm[p], axis=axis) return w
Get parameter `k` from `params`, with the permutations applied.
get_permuted_param
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def apply_permutation(ps: PermutationSpec, perm, params): """Apply a `perm` to `params`.""" return {k: get_permuted_param(ps, perm, k, params) for k in params.keys()}
Apply a `perm` to `params`.
apply_permutation
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def weight_matching(rng, ps: PermutationSpec, params_a, params_b, max_iter=100, init_perm=None): """Find a permutation of `params_b` to make them match `params_a`.""" perm_sizes = {p: params_a[axes[0][0]].shape[axes[0][1]] for p, axes in ps.perm_to_axes.items()} perm = {p: jnp.arange(n) for p, n in perm_sizes.items()} if init_perm is None else init_perm perm_names = list(perm.keys()) for iteration in range(max_iter): progress = False for p_ix in random.permutation(rngmix(rng, iteration), len(perm_names)): p = perm_names[p_ix] n = perm_sizes[p] A = jnp.zeros((n, n)) for wk, axis in ps.perm_to_axes[p]: w_a = params_a[wk] w_b = get_permuted_param(ps, perm, wk, params_b, except_axis=axis) w_a = jnp.moveaxis(w_a, axis, 0).reshape((n, -1)) w_b = jnp.moveaxis(w_b, axis, 0).reshape((n, -1)) A += w_a @ w_b.T ri, ci = linear_sum_assignment(A, maximize=True) assert (ri == jnp.arange(len(ri))).all() oldL = jnp.vdot(A, jnp.eye(n)[perm[p]]) newL = jnp.vdot(A, jnp.eye(n)[ci, :]) print(f"{iteration}/{p}: {newL - oldL}") progress = progress or newL > oldL + 1e-12 perm[p] = jnp.array(ci) if not progress: break return perm
Find a permutation of `params_b` to make them match `params_a`.
weight_matching
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def permutation_matrix(ixs): """Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.""" # This is confusing, but indexing the columns onto the rows is actually the correct thing to do return jnp.eye(len(ixs), dtype=jnp.bool_)[ixs, :]
Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.
main.permutation_matrix
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-a", type=str, required=True) parser.add_argument("--model-b", type=str, required=True) parser.add_argument("--test", action="store_true", help="Run in smoke-test mode") parser.add_argument("--seed", type=int, default=0, help="Random seed") parser.add_argument("--width-multiplier", type=int, required=True) args = parser.parse_args() with wandb.init( project="git-re-basin", entity="skainswo", tags=["cifar10", "vgg16", "straight-through-estimator"], # See https://github.com/wandb/client/issues/3672. mode="online", job_type="analysis", ) as wandb_run: config = wandb.config config.ec2_instance_type = ec2_get_instance_type() config.model_a = args.model_a config.model_b = args.model_b config.test = args.test config.seed = args.seed config.width_multiplier = args.width_multiplier config.num_epochs = 100 config.batch_size = 500 config.learning_rate = 1e-3 # This is the epoch that we pull the model A/B params from. config.load_epoch = 99 # model = VGG16Wide() model = make_vgg_width_ablation(config.width_multiplier) def load_model(filepath): with open(filepath, "rb") as fh: return from_bytes( init_train_state(random.PRNGKey(0), model, learning_rate=0.1, num_epochs=100, batch_size=100, num_train_examples=50_000), fh.read()) artifact_a = Path(wandb_run.use_artifact(f"cifar10-vgg-weights:{config.model_a}").download()) artifact_b = Path(wandb_run.use_artifact(f"cifar10-vgg-weights:{config.model_b}").download()) model_a = load_model(artifact_a / f"checkpoint{config.load_epoch}") model_b = load_model(artifact_b / f"checkpoint{config.load_epoch}") stuff = make_stuff(model) train_ds, test_ds = load_cifar10() num_train_examples = train_ds["images_u8"].shape[0] num_test_examples = test_ds["images_u8"].shape[0] assert num_train_examples % config.batch_size == 0 assert num_test_examples % config.batch_size == 0 # 1000 is the largest batch size feasible on p3.2xlarge. with timeblock("test evaluation"): test_loss_a, test_accuracy_a = stuff["dataset_loss_and_accuracy"](model_a.params, test_ds, 1000) test_loss_b, test_accuracy_b = stuff["dataset_loss_and_accuracy"](model_b.params, test_ds, 1000) with timeblock("train evaluation"): train_loss_a, train_accuracy_a = stuff["dataset_loss_and_accuracy"](model_a.params, train_ds, 1000) train_loss_b, train_accuracy_b = stuff["dataset_loss_and_accuracy"](model_b.params, train_ds, 1000) print({ "train_loss_a": train_loss_a, "train_accuracy_a": train_accuracy_a, "train_loss_b": train_loss_b, "train_accuracy_b": train_accuracy_b, "test_loss_a": test_loss_a, "test_accuracy_a": test_accuracy_a, "test_loss_b": test_loss_b, "test_accuracy_b": test_accuracy_b, }) baseline_train_loss = 0.5 * (train_loss_a + train_loss_b) def lsa(A): ri, ci = linear_sum_assignment(A, maximize=True) assert (ri == jnp.arange(len(ri))).all() return ci def permutation_matrix(ixs): """Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.""" # This is confusing, but indexing the columns onto the rows is actually the correct thing to do return jnp.eye(len(ixs), dtype=jnp.bool_)[ixs, :] @jit def batch_eval(permute_params, hardened_permute_params, images_u8, labels): model_b_permuted_params = permute_params_apply(permute_params, hardened_permute_params, model_b.params) interp_params = tree_map(lambda a, b: 0.5 * (a + b), model_a.params, model_b_permuted_params) l, info = stuff["batch_eval"](interp_params, images_u8, labels) # Makes life easier to know when we're winning. stop_gradient shouldn't be # necessary but I'm paranoid. l -= stop_gradient(baseline_train_loss) return l, {**info, "accuracy": info["num_correct"] / config.batch_size} @jit def step(train_state, hardened_permute_params, images_u8, labels): (l, metrics), g = value_and_grad(batch_eval, has_aux=True)(train_state.params, hardened_permute_params, images_u8, labels) train_state = train_state.apply_gradients(grads=g) # Project onto Birkhoff polytope. train_state = train_state.replace( params=tree_map(sinkhorn_knopp_projection, train_state.params)) return train_state, {**metrics, "loss": l} rng = random.PRNGKey(args.seed) tx = optax.sgd(learning_rate=config.learning_rate, momentum=0.9) # tx = optax.radam(learning_rate=config.learning_rate) # Start from the weight matching solution # permutation_spec = vgg16_permutation_spec() # init_perm = weight_matching(rngmix(rng, "weight_matching"), permutation_spec, # flatten_params(model_a.params), flatten_params(model_b.params)) # init_pp = {k: permutation_matrix(v) for k, v in init_perm.items()} # init_pp = tree_map(lambda x, y: x.T + 0.01 * y, init_pp, # permute_params_init(rngmix(rng, "init"), model_a.params)) # Start randomly init_pp = permute_params_init(rngmix(rng, "init"), model_a.params) train_state = TrainState.create(apply_fn=None, params=init_pp, tx=tx) artifact = wandb.Artifact("model_b_permutation", type="permutation", metadata={ "dataset": "cifar10", "model": "vgg16" }) for epoch in tqdm(range(config.num_epochs)): train_data_perm = random.permutation(rngmix(rng, f"epoch-{epoch}"), num_train_examples).reshape((-1, config.batch_size)) for i in tqdm(range(num_train_examples // config.batch_size)): # STE projection # hardened_pp = {k: permutation_matrix(lsa(v)) for k, v in train_state.params.items()} # No STE projection hardened_pp = train_state.params train_state, metrics = step(train_state, hardened_pp, train_ds["images_u8"][train_data_perm[i]], train_ds["labels"][train_data_perm[i]]) wandb_run.log(metrics) if not jnp.isfinite(metrics["loss"]): raise ValueError(f"Loss is not finite: {metrics['loss']}") with artifact.new_file(f"permutation-epoch{epoch}.pkl", mode="wb") as f: pickle.dump({k: jnp.argsort(lsa(v)) for k, v in train_state.params.items()}, f) wandb_run.log_artifact(artifact) final_permutation = {k: jnp.argsort(lsa(v)) for k, v in train_state.params.items()} ### plotting lambdas = jnp.linspace(0, 1, num=25) train_loss_interp_naive = [] test_loss_interp_naive = [] train_acc_interp_naive = [] test_acc_interp_naive = [] for lam in tqdm(lambdas): naive_p = tree_map(lambda a, b: (1 - lam) * a + lam * b, model_a.params, model_b.params) train_loss, train_acc = stuff["dataset_loss_and_accuracy"](naive_p, train_ds, 1000) test_loss, test_acc = stuff["dataset_loss_and_accuracy"](naive_p, test_ds, 1000) train_loss_interp_naive.append(train_loss) test_loss_interp_naive.append(test_loss) train_acc_interp_naive.append(train_acc) test_acc_interp_naive.append(test_acc) model_b_clever = vgg16_permutify(final_permutation, model_b.params) train_loss_interp_clever = [] test_loss_interp_clever = [] train_acc_interp_clever = [] test_acc_interp_clever = [] for lam in tqdm(lambdas): clever_p = tree_map(lambda a, b: (1 - lam) * a + lam * b, model_a.params, model_b_clever) train_loss, train_acc = stuff["dataset_loss_and_accuracy"](clever_p, train_ds, 1000) test_loss, test_acc = stuff["dataset_loss_and_accuracy"](clever_p, test_ds, 1000) train_loss_interp_clever.append(train_loss) test_loss_interp_clever.append(test_loss) train_acc_interp_clever.append(train_acc) test_acc_interp_clever.append(test_acc) assert len(lambdas) == len(train_loss_interp_naive) assert len(lambdas) == len(test_loss_interp_naive) assert len(lambdas) == len(train_acc_interp_naive) assert len(lambdas) == len(test_acc_interp_naive) assert len(lambdas) == len(train_loss_interp_clever) assert len(lambdas) == len(test_loss_interp_clever) assert len(lambdas) == len(train_acc_interp_clever) assert len(lambdas) == len(test_acc_interp_clever) print("Plotting...") fig = plot_interp_loss(config.load_epoch, lambdas, train_loss_interp_naive, test_loss_interp_naive, train_loss_interp_clever, test_loss_interp_clever) plt.savefig(f"cifar10_vgg16_ste_interp_loss_epoch{config.load_epoch}.png", dpi=300) wandb_run.log({"interp_loss_fig": wandb.Image(fig)}, commit=False) plt.close(fig) fig = plot_interp_acc(config.load_epoch, lambdas, train_acc_interp_naive, test_acc_interp_naive, train_acc_interp_clever, test_acc_interp_clever) plt.savefig(f"cifar10_vgg16_ste_interp_accuracy_epoch{config.load_epoch}.png", dpi=300) wandb_run.log({"interp_acc_fig": wandb.Image(fig)}, commit=False) plt.close(fig) wandb_run.log({}, commit=True)
Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.
main
python
samuela/git-re-basin
src/cifar10_vgg_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_ste.py
MIT
def load_cifar10(): """Return the training and test datasets, as jnp.array's.""" train_ds_images_u8, train_ds_labels = tfds.as_numpy( tfds.load("cifar10", split="train", batch_size=-1, as_supervised=True)) test_ds_images_u8, test_ds_labels = tfds.as_numpy( tfds.load("cifar10", split="test", batch_size=-1, as_supervised=True)) train_ds = {"images_u8": train_ds_images_u8, "labels": train_ds_labels} test_ds = {"images_u8": test_ds_images_u8, "labels": test_ds_labels} return train_ds, test_ds
Return the training and test datasets, as jnp.array's.
load_cifar10
python
samuela/git-re-basin
src/datasets.py
https://github.com/samuela/git-re-basin/blob/master/src/datasets.py
MIT
def _split_cifar(train_ds, label_split: int): """Split a CIFAR-ish dataset into two biased subsets.""" assert train_ds["images_u8"].shape[0] == 50_000 assert train_ds["labels"].shape[0] == 50_000 # We randomly permute the training data, just in case there's some kind of # non-iid ordering coming out of tfds. perm = np.random.default_rng(123).permutation(50_000) train_images_u8 = train_ds["images_u8"][perm, :, :, :] train_labels = train_ds["labels"][perm] # This just so happens to be a clean 25000/25000 split. lt_images_u8 = train_images_u8[train_labels < label_split] lt_labels = train_labels[train_labels < label_split] gte_images_u8 = train_images_u8[train_labels >= label_split] gte_labels = train_labels[train_labels >= label_split] s1 = { "images_u8": np.concatenate((lt_images_u8[:5000], gte_images_u8[5000:]), axis=0), "labels": np.concatenate((lt_labels[:5000], gte_labels[5000:]), axis=0) } s2 = { "images_u8": np.concatenate((gte_images_u8[:5000], lt_images_u8[5000:]), axis=0), "labels": np.concatenate((gte_labels[:5000], lt_labels[5000:]), axis=0) } return s1, s2
Split a CIFAR-ish dataset into two biased subsets.
_split_cifar
python
samuela/git-re-basin
src/datasets.py
https://github.com/samuela/git-re-basin/blob/master/src/datasets.py
MIT
def conv_conv(paf, pbf, l1, l2): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" k1 = paf[f"params/Conv_{l1}/kernel"] k2 = pbf[f"params/Conv_{l1}/kernel"] assert k1.shape == k2.shape ri, ci = linear_sum_assignment(cosine_similarity( jnp.reshape(jnp.moveaxis(k1, -1, 0), (k1.shape[-1], -1)), jnp.reshape(jnp.moveaxis(k2, -1, 0), (k2.shape[-1], -1))), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Conv_{l1}/kernel": pbf[f"params/Conv_{l1}/kernel"][:, :, :, ci], f"params/Conv_{l1}/bias": pbf[f"params/Conv_{l1}/bias"][ci], f"params/Conv_{l2}/kernel": pbf[f"params/Conv_{l2}/kernel"][:, :, ci, :] }
Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.
permutify.conv_conv
python
samuela/git-re-basin
src/mnist_convnet_plot.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_convnet_plot.py
MIT
def dense_dense(paf, pbf, l1, l2): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" k1 = paf[f"params/Dense_{l1}/kernel"] k2 = pbf[f"params/Dense_{l1}/kernel"] assert k1.shape == k2.shape ri, ci = linear_sum_assignment(cosine_similarity(k1.T, k2.T), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Dense_{l1}/kernel": pbf[f"params/Dense_{l1}/kernel"][:, ci], f"params/Dense_{l1}/bias": pbf[f"params/Dense_{l1}/bias"][ci], f"params/Dense_{l2}/kernel": pbf[f"params/Dense_{l2}/kernel"][ci, :] }
Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.
permutify.dense_dense
python
samuela/git-re-basin
src/mnist_convnet_plot.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_convnet_plot.py
MIT
def permutify(paramsA, paramsB): """Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permuted version of paramsB. Only works on sequences of Dense layers for now.""" paf = flatten_params(paramsA) pbf = flatten_params(paramsB) # conv kernel shape: (width, height, in_channel, out_channel) # dense kernel shape: (in, out) # {in:fixed Conv_0 Conv_1 any:mean:fixed Dense_0 Dense_1 fixed:out} def conv_conv(paf, pbf, l1, l2): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" k1 = paf[f"params/Conv_{l1}/kernel"] k2 = pbf[f"params/Conv_{l1}/kernel"] assert k1.shape == k2.shape ri, ci = linear_sum_assignment(cosine_similarity( jnp.reshape(jnp.moveaxis(k1, -1, 0), (k1.shape[-1], -1)), jnp.reshape(jnp.moveaxis(k2, -1, 0), (k2.shape[-1], -1))), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Conv_{l1}/kernel": pbf[f"params/Conv_{l1}/kernel"][:, :, :, ci], f"params/Conv_{l1}/bias": pbf[f"params/Conv_{l1}/bias"][ci], f"params/Conv_{l2}/kernel": pbf[f"params/Conv_{l2}/kernel"][:, :, ci, :] } def dense_dense(paf, pbf, l1, l2): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" k1 = paf[f"params/Dense_{l1}/kernel"] k2 = pbf[f"params/Dense_{l1}/kernel"] assert k1.shape == k2.shape ri, ci = linear_sum_assignment(cosine_similarity(k1.T, k2.T), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Dense_{l1}/kernel": pbf[f"params/Dense_{l1}/kernel"][:, ci], f"params/Dense_{l1}/bias": pbf[f"params/Dense_{l1}/bias"][ci], f"params/Dense_{l2}/kernel": pbf[f"params/Dense_{l2}/kernel"][ci, :] } pbf_new = {**pbf} # (Conv_0, Conv_1) pbf_new = conv_conv(paf, pbf_new, 0, 1) # (Conv_1, Conv_2) pbf_new = conv_conv(paf, pbf_new, 1, 2) # (Dense_0, Dense_1) pbf_new = dense_dense(paf, pbf_new, 0, 1) return unflatten_params(pbf_new)
Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permuted version of paramsB. Only works on sequences of Dense layers for now.
permutify
python
samuela/git-re-basin
src/mnist_convnet_plot.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_convnet_plot.py
MIT
def permutation_matrix(ixs): """Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.""" # This is confusing, but indexing the columns onto the rows is actually the correct thing to do return jnp.eye(len(ixs), dtype=jnp.bool_)[ixs, :]
Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.
main.permutation_matrix
python
samuela/git-re-basin
src/mnist_mlp_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_ste.py
MIT
def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-a", type=str, required=True) parser.add_argument("--model-b", type=str, required=True) parser.add_argument("--test", action="store_true", help="Run in smoke-test mode") parser.add_argument("--seed", type=int, default=0, help="Random seed") args = parser.parse_args() with wandb.init( project="git-re-basin", entity="skainswo", tags=["mnist", "mlp", "straight-through-estimator"], # See https://github.com/wandb/client/issues/3672. mode="online", job_type="analysis", ) as wandb_run: config = wandb.config config.ec2_instance_type = ec2_get_instance_type() config.model_a = args.model_a config.model_b = args.model_b config.test = args.test config.seed = args.seed config.num_epochs = 10 config.batch_size = 1000 config.learning_rate = 1e-2 # This is the epoch that we pull the model A/B params from. config.load_epoch = 49 model = MLPModel() def load_model(filepath): with open(filepath, "rb") as fh: return from_bytes(init_train_state(random.PRNGKey(0), -1, model), fh.read()) artifact_a = Path(wandb_run.use_artifact(f"mnist-mlp-weights:{config.model_a}").download()) artifact_b = Path(wandb_run.use_artifact(f"mnist-mlp-weights:{config.model_b}").download()) model_a = load_model(artifact_a / f"checkpoint{config.load_epoch}") model_b = load_model(artifact_b / f"checkpoint{config.load_epoch}") stuff = make_stuff(model) train_ds, test_ds = load_datasets(smoke_test_mode=config.test) num_train_examples = train_ds["images_u8"].shape[0] num_test_examples = test_ds["images_u8"].shape[0] assert num_train_examples % config.batch_size == 0 assert num_test_examples % config.batch_size == 0 train_loss_a, train_accuracy_a = stuff["dataset_loss_and_accuracy"](model_a.params, train_ds, 10_000) train_loss_b, train_accuracy_b = stuff["dataset_loss_and_accuracy"](model_b.params, train_ds, 10_000) test_loss_a, test_accuracy_a = stuff["dataset_loss_and_accuracy"](model_a.params, test_ds, 10_000) test_loss_b, test_accuracy_b = stuff["dataset_loss_and_accuracy"](model_b.params, test_ds, 10_000) print({ "train_loss_a": train_loss_a, "train_accuracy_a": train_accuracy_a, "train_loss_b": train_loss_b, "train_accuracy_b": train_accuracy_b, "test_loss_a": test_loss_a, "test_accuracy_a": test_accuracy_a, "test_loss_b": test_loss_b, "test_accuracy_b": test_accuracy_b, }) baseline_train_loss = 0.5 * (train_loss_a + train_loss_b) def lsa(A): ri, ci = linear_sum_assignment(A, maximize=True) assert (ri == jnp.arange(len(ri))).all() return ci def permutation_matrix(ixs): """Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.""" # This is confusing, but indexing the columns onto the rows is actually the correct thing to do return jnp.eye(len(ixs), dtype=jnp.bool_)[ixs, :] @jit def batch_eval(permute_params, hardened_permute_params, images_u8, labels): model_b_permuted_params = permute_params_apply(permute_params, hardened_permute_params, model_b.params) interp_params = tree_map(lambda a, b: 0.5 * (a + b), model_a.params, model_b_permuted_params) l, num_correct = stuff["batch_eval"](interp_params, images_u8, labels) # Makes life easier to know when we're winning. stop_gradient shouldn't be # necessary but I'm paranoid. l -= stop_gradient(baseline_train_loss) return l, {"num_correct": num_correct, "accuracy": num_correct / config.batch_size} @jit def step(train_state, hardened_permute_params, images_u8, labels): (l, metrics), g = value_and_grad(batch_eval, has_aux=True)(train_state.params, hardened_permute_params, images_u8, labels) train_state = train_state.apply_gradients(grads=g) # Project onto Birkhoff polytope. train_state = train_state.replace( params=tree_map(sinkhorn_knopp_projection, train_state.params)) return train_state, {**metrics, "loss": l} rng = random.PRNGKey(config.seed) tx = optax.sgd(learning_rate=config.learning_rate, momentum=0.9) train_state = TrainState.create(apply_fn=None, params=permute_params_init(rngmix(rng, "init")), tx=tx) for epoch in tqdm(range(config.num_epochs)): train_data_perm = random.permutation(rngmix(rng, f"epoch-{epoch}"), num_train_examples).reshape((-1, config.batch_size)) for i in range(num_train_examples // config.batch_size): hardened_pp = {k: permutation_matrix(lsa(v)) for k, v in train_state.params.items()} train_state, metrics = step(train_state, hardened_pp, train_ds["images_u8"][train_data_perm[i]], train_ds["labels"][train_data_perm[i]]) wandb_run.log(metrics) if not jnp.isfinite(metrics["loss"]): raise ValueError(f"Loss is not finite: {metrics['loss']}") final_permutation = {k: jnp.argsort(lsa(v)) for k, v in train_state.params.items()} # Save final_permutation as an Artifact artifact = wandb.Artifact("model_b_permutation", type="permutation", metadata={ "dataset": "mnist", "model": "mlp" }) with artifact.new_file("permutation.pkl", mode="wb") as f: pickle.dump(final_permutation, f) wandb_run.log_artifact(artifact) ### plotting lambdas = jnp.linspace(0, 1, num=25) train_loss_interp_naive = [] test_loss_interp_naive = [] train_acc_interp_naive = [] test_acc_interp_naive = [] for lam in tqdm(lambdas): naive_p = tree_map(lambda a, b: (1 - lam) * a + lam * b, model_a.params, model_b.params) train_loss, train_acc = stuff["dataset_loss_and_accuracy"](naive_p, train_ds, 10_000) test_loss, test_acc = stuff["dataset_loss_and_accuracy"](naive_p, test_ds, 10_000) train_loss_interp_naive.append(train_loss) test_loss_interp_naive.append(test_loss) train_acc_interp_naive.append(train_acc) test_acc_interp_naive.append(test_acc) model_b_clever = apply_permutation(final_permutation, model_b.params) train_loss_interp_clever = [] test_loss_interp_clever = [] train_acc_interp_clever = [] test_acc_interp_clever = [] for lam in tqdm(lambdas): clever_p = tree_map(lambda a, b: (1 - lam) * a + lam * b, model_a.params, model_b_clever) train_loss, train_acc = stuff["dataset_loss_and_accuracy"](clever_p, train_ds, 10_000) test_loss, test_acc = stuff["dataset_loss_and_accuracy"](clever_p, test_ds, 10_000) train_loss_interp_clever.append(train_loss) test_loss_interp_clever.append(test_loss) train_acc_interp_clever.append(train_acc) test_acc_interp_clever.append(test_acc) assert len(lambdas) == len(train_loss_interp_naive) assert len(lambdas) == len(test_loss_interp_naive) assert len(lambdas) == len(train_acc_interp_naive) assert len(lambdas) == len(test_acc_interp_naive) assert len(lambdas) == len(train_loss_interp_clever) assert len(lambdas) == len(test_loss_interp_clever) assert len(lambdas) == len(train_acc_interp_clever) assert len(lambdas) == len(test_acc_interp_clever) print("Plotting...") fig = plot_interp_loss(config.load_epoch, lambdas, train_loss_interp_naive, test_loss_interp_naive, train_loss_interp_clever, test_loss_interp_clever) plt.savefig(f"mnist_mlp_ste_interp_loss_epoch{config.load_epoch}.png", dpi=300) wandb_run.log({"interp_loss_fig": wandb.Image(fig)}, commit=False) plt.close(fig) fig = plot_interp_acc(config.load_epoch, lambdas, train_acc_interp_naive, test_acc_interp_naive, train_acc_interp_clever, test_acc_interp_clever) plt.savefig(f"mnist_mlp_ste_interp_accuracy_epoch{config.load_epoch}.png", dpi=300) wandb_run.log({"interp_acc_fig": wandb.Image(fig)}, commit=False) plt.close(fig) wandb_run.log({}, commit=True)
Convert a permutation array, eg. [2, 3, 0, 1], to a permutation matrix.
main
python
samuela/git-re-basin
src/mnist_mlp_ste.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_ste.py
MIT
def conv_gn_conv(paf, pbf, l1, l2): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" ka = paf[f"params/Conv_{l1}/kernel"] kb = pbf[f"params/Conv_{l1}/kernel"] assert ka.shape == kb.shape ri, ci = linear_sum_assignment(cosine_similarity( jnp.reshape(jnp.moveaxis(ka, -1, 0), (ka.shape[-1], -1)), jnp.reshape(jnp.moveaxis(kb, -1, 0), (kb.shape[-1], -1))), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Conv_{l1}/kernel": pbf[f"params/Conv_{l1}/kernel"][:, :, :, ci], f"params/Conv_{l1}/bias": pbf[f"params/Conv_{l1}/bias"][ci], f"params/LayerNorm_{l1}/scale": pbf[f"params/LayerNorm_{l1}/scale"][ci], f"params/LayerNorm_{l1}/bias": pbf[f"params/LayerNorm_{l1}/bias"][ci], f"params/Conv_{l2}/kernel": pbf[f"params/Conv_{l2}/kernel"][:, :, ci, :] }
Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.
permutify.conv_gn_conv
python
samuela/git-re-basin
src/cifar10_vgg_cosine_similarity_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_cosine_similarity_matching.py
MIT
def dense_dense(paf, pbf, l1, l2): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" ka = paf[f"params/Dense_{l1}/kernel"] kb = pbf[f"params/Dense_{l1}/kernel"] assert ka.shape == kb.shape ri, ci = linear_sum_assignment(cosine_similarity(ka.T, kb.T), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Dense_{l1}/kernel": pbf[f"params/Dense_{l1}/kernel"][:, ci], f"params/Dense_{l1}/bias": pbf[f"params/Dense_{l1}/bias"][ci], f"params/Dense_{l2}/kernel": pbf[f"params/Dense_{l2}/kernel"][ci, :] }
Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.
permutify.dense_dense
python
samuela/git-re-basin
src/cifar10_vgg_cosine_similarity_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_cosine_similarity_matching.py
MIT
def permutify(paramsA, paramsB): """Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permuted version of paramsB. Only works on sequences of Dense layers for now.""" paf = flatten_params(paramsA) pbf = flatten_params(paramsB) # conv kernel shape: (width, height, in_channel, out_channel) # dense kernel shape: (in, out) # VGG16: Conv0-Conv12 flatten Dense0-Dense2 def conv_gn_conv(paf, pbf, l1, l2): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" ka = paf[f"params/Conv_{l1}/kernel"] kb = pbf[f"params/Conv_{l1}/kernel"] assert ka.shape == kb.shape ri, ci = linear_sum_assignment(cosine_similarity( jnp.reshape(jnp.moveaxis(ka, -1, 0), (ka.shape[-1], -1)), jnp.reshape(jnp.moveaxis(kb, -1, 0), (kb.shape[-1], -1))), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Conv_{l1}/kernel": pbf[f"params/Conv_{l1}/kernel"][:, :, :, ci], f"params/Conv_{l1}/bias": pbf[f"params/Conv_{l1}/bias"][ci], f"params/LayerNorm_{l1}/scale": pbf[f"params/LayerNorm_{l1}/scale"][ci], f"params/LayerNorm_{l1}/bias": pbf[f"params/LayerNorm_{l1}/bias"][ci], f"params/Conv_{l2}/kernel": pbf[f"params/Conv_{l2}/kernel"][:, :, ci, :] } def dense_dense(paf, pbf, l1, l2): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" ka = paf[f"params/Dense_{l1}/kernel"] kb = pbf[f"params/Dense_{l1}/kernel"] assert ka.shape == kb.shape ri, ci = linear_sum_assignment(cosine_similarity(ka.T, kb.T), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Dense_{l1}/kernel": pbf[f"params/Dense_{l1}/kernel"][:, ci], f"params/Dense_{l1}/bias": pbf[f"params/Dense_{l1}/bias"][ci], f"params/Dense_{l2}/kernel": pbf[f"params/Dense_{l2}/kernel"][ci, :] } def conv_gn_flatten_dense(paf, pbf, l1, l2): # Note that this is much simpler than the general case since we also know # that the output of Conv_{l1} has shape (_, 1, 1, 512) when inputs are # (_, 32, 32, 3) as is the case with CIFAR-10. And Dense_{l2} has shape # (512, _). ka = paf[f"params/Conv_{l1}/kernel"] kb = pbf[f"params/Conv_{l1}/kernel"] assert ka.shape == kb.shape ri, ci = linear_sum_assignment(cosine_similarity( jnp.reshape(jnp.moveaxis(ka, -1, 0), (ka.shape[-1], -1)), jnp.reshape(jnp.moveaxis(kb, -1, 0), (kb.shape[-1], -1))), maximize=True) assert (ri == jnp.arange(len(ri))).all() return { **pbf, f"params/Conv_{l1}/kernel": pbf[f"params/Conv_{l1}/kernel"][:, :, :, ci], f"params/Conv_{l1}/bias": pbf[f"params/Conv_{l1}/bias"][ci], f"params/LayerNorm_{l1}/scale": pbf[f"params/LayerNorm_{l1}/scale"][ci], f"params/LayerNorm_{l1}/bias": pbf[f"params/LayerNorm_{l1}/bias"][ci], f"params/Dense_{l2}/kernel": pbf[f"params/Dense_{l2}/kernel"][ci, :] } pbf_new = {**pbf} # Backbone conv layers for layer in range(12): pbf_new = conv_gn_conv(paf, pbf_new, layer, layer + 1) # Conv_12 flatten Dense_0 pbf_new = conv_gn_flatten_dense(paf, pbf_new, 12, 0) # (Dense_0, Dense_1) and (Dense_1, Dense_2) pbf_new = dense_dense(paf, pbf_new, 0, 1) pbf_new = dense_dense(paf, pbf_new, 1, 2) return unflatten_params(pbf_new)
Permute the parameters of paramsB to match paramsA as closely as possible. Returns the permuted version of paramsB. Only works on sequences of Dense layers for now.
permutify
python
samuela/git-re-basin
src/cifar10_vgg_cosine_similarity_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_cosine_similarity_matching.py
MIT
def load_mnist(): """Return the training and test datasets, unbatched.""" # See https://www.tensorflow.org/datasets/overview#as_batched_tftensor_batch_size-1. train_ds_images_u8, train_ds_labels = tfds.as_numpy( tfds.load("mnist", split="train", batch_size=-1, as_supervised=True)) test_ds_images_u8, test_ds_labels = tfds.as_numpy( tfds.load("mnist", split="test", batch_size=-1, as_supervised=True)) train_ds = {"images_u8": train_ds_images_u8, "labels": train_ds_labels} test_ds = {"images_u8": test_ds_images_u8, "labels": test_ds_labels} return train_ds, test_ds
Return the training and test datasets, unbatched.
load_mnist
python
samuela/git-re-basin
src/mnist_mlp_wm_many.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_mlp_wm_many.py
MIT
def get_intermediates(params, images_u8): """Calculate intermediate activations for all layers in flax's format.""" images_f32 = vmap(stuff["normalize_transform"])(None, images_u8) _, state = model.apply({"params": params}, images_f32, capture_intermediates=lambda mdl, _: isinstance(mdl, nn.Dense), mutable=["intermediates"]) return state["intermediates"]
Calculate intermediate activations for all layers in flax's format.
get_intermediates
python
samuela/git-re-basin
src/cifar10_mlp_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_mlp_activation_matching.py
MIT
def normalize_activations(intermediates): """Simplify the activation dict format and flatten everything to be (batch_size, channels).""" def dense(i: int): k = f"Dense_{i}" # The activations are (batch_size, num_units) so we don't need to reshape. act = intermediates[k]["__call__"][0] act = nn.relu(act) return act return {f"Dense_{i}": dense(i) for i in range(num_mlp_layers)}
Simplify the activation dict format and flatten everything to be (batch_size, channels).
normalize_activations
python
samuela/git-re-basin
src/cifar10_mlp_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_mlp_activation_matching.py
MIT
def conv_norm_conv(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" return { **pf, f"params/Conv_{l1}/kernel": pf[f"params/Conv_{l1}/kernel"][:, :, :, perm], f"params/Conv_{l1}/bias": pf[f"params/Conv_{l1}/bias"][perm], f"params/LayerNorm_{l1}/scale": pf[f"params/LayerNorm_{l1}/scale"][perm], f"params/LayerNorm_{l1}/bias": pf[f"params/LayerNorm_{l1}/bias"][perm], f"params/Conv_{l2}/kernel": pf[f"params/Conv_{l2}/kernel"][:, :, perm, :] }
Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.
vgg16_permutify.conv_norm_conv
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def dense_dense(pf, l1, l2, perm): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" return { **pf, f"params/Dense_{l1}/kernel": pf[f"params/Dense_{l1}/kernel"][:, perm], f"params/Dense_{l1}/bias": pf[f"params/Dense_{l1}/bias"][perm], f"params/Dense_{l2}/kernel": pf[f"params/Dense_{l2}/kernel"][perm, :] }
Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.
vgg16_permutify.dense_dense
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def conv_norm_flatten_dense(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.""" # Note that the flatten is kind of a no-op since the flatten is (batch, 1, 1, 512) -> (batch, 512) return { **pf, f"params/Conv_{l1}/kernel": pf[f"params/Conv_{l1}/kernel"][:, :, :, perm], f"params/Conv_{l1}/bias": pf[f"params/Conv_{l1}/bias"][perm], f"params/LayerNorm_{l1}/scale": pf[f"params/LayerNorm_{l1}/scale"][perm], f"params/LayerNorm_{l1}/bias": pf[f"params/LayerNorm_{l1}/bias"][perm], f"params/Dense_{l2}/kernel": pf[f"params/Dense_{l2}/kernel"][perm, :] }
Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.
vgg16_permutify.conv_norm_flatten_dense
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def vgg16_permutify(permutation, params): """Permute the parameters of `params` based on `permutation`.""" params_flat = flatten_params(params) # print(tree_map(jnp.shape, params_flat)) # conv kernel shape: (width, height, in_channel, out_channel) # dense kernel shape: (in, out) # VGG16: Conv0-Conv12 flatten Dense0-Dense2 def conv_norm_conv(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Conv_{l2}.""" return { **pf, f"params/Conv_{l1}/kernel": pf[f"params/Conv_{l1}/kernel"][:, :, :, perm], f"params/Conv_{l1}/bias": pf[f"params/Conv_{l1}/bias"][perm], f"params/LayerNorm_{l1}/scale": pf[f"params/LayerNorm_{l1}/scale"][perm], f"params/LayerNorm_{l1}/bias": pf[f"params/LayerNorm_{l1}/bias"][perm], f"params/Conv_{l2}/kernel": pf[f"params/Conv_{l2}/kernel"][:, :, perm, :] } def dense_dense(pf, l1, l2, perm): """Permute the output channels of Dense_{l1} and the input channels of Dense_{l2}.""" return { **pf, f"params/Dense_{l1}/kernel": pf[f"params/Dense_{l1}/kernel"][:, perm], f"params/Dense_{l1}/bias": pf[f"params/Dense_{l1}/bias"][perm], f"params/Dense_{l2}/kernel": pf[f"params/Dense_{l2}/kernel"][perm, :] } def conv_norm_flatten_dense(pf, l1, l2, perm): """Permute the output channels of Conv_{l1} and the input channels of Dense_{l2}.""" # Note that the flatten is kind of a no-op since the flatten is (batch, 1, 1, 512) -> (batch, 512) return { **pf, f"params/Conv_{l1}/kernel": pf[f"params/Conv_{l1}/kernel"][:, :, :, perm], f"params/Conv_{l1}/bias": pf[f"params/Conv_{l1}/bias"][perm], f"params/LayerNorm_{l1}/scale": pf[f"params/LayerNorm_{l1}/scale"][perm], f"params/LayerNorm_{l1}/bias": pf[f"params/LayerNorm_{l1}/bias"][perm], f"params/Dense_{l2}/kernel": pf[f"params/Dense_{l2}/kernel"][perm, :] } params_flat_new = {**params_flat} # Backbone conv layers for layer in range(12): params_flat_new = conv_norm_conv(params_flat_new, layer, layer + 1, permutation[f"Conv_{layer}"]) # Conv_12 flatten Dense_0 params_flat_new = conv_norm_flatten_dense(params_flat_new, 12, 0, permutation["Conv_12"]) # (Dense_0, Dense_1) and (Dense_1, Dense_2) params_flat_new = dense_dense(params_flat_new, 0, 1, permutation["Dense_0"]) params_flat_new = dense_dense(params_flat_new, 1, 2, permutation["Dense_1"]) return unflatten_params(params_flat_new)
Permute the parameters of `params` based on `permutation`.
vgg16_permutify
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def get_intermediates(params, images_u8): """Calculate intermediate activations for all layers in flax's format.""" images_f32 = vmap(stuff["normalize_transform"])(None, images_u8) _, state = model.apply({"params": params}, images_f32, capture_intermediates=lambda mdl, _: isinstance(mdl, nn.LayerNorm) or isinstance(mdl, nn.Dense), mutable=["intermediates"]) return state["intermediates"]
Calculate intermediate activations for all layers in flax's format.
get_intermediates
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def normalize_activations(intermediates): """Simplify the activation dict format and flatten everything to be (batch_size, channels).""" def layernorm(i: int): k = f"LayerNorm_{i}" act = intermediates[k]["__call__"][0] act = rearrange(act, "batch w h c -> (batch w h) c") act = nn.relu(act) return act def dense(i: int): k = f"Dense_{i}" # The activations are (batch_size, num_units) so we don't need to reshape. act = intermediates[k]["__call__"][0] act = nn.relu(act) return act return { "Dense_0": dense(0), "Dense_1": dense(1), **{f"Conv_{i}": layernorm(i) for i in range(13)}, }
Simplify the activation dict format and flatten everything to be (batch_size, channels).
normalize_activations
python
samuela/git-re-basin
src/cifar10_vgg_activation_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/cifar10_vgg_activation_matching.py
MIT
def mlp_permutation_spec(num_hidden_layers: int) -> PermutationSpec: """We assume that one permutation cannot appear in two axes of the same weight array.""" assert num_hidden_layers >= 1 return permutation_spec_from_axes_to_perm({ "Dense_0/kernel": (None, "P_0"), **{f"Dense_{i}/kernel": (f"P_{i-1}", f"P_{i}") for i in range(1, num_hidden_layers)}, **{f"Dense_{i}/bias": (f"P_{i}", ) for i in range(num_hidden_layers)}, f"Dense_{num_hidden_layers}/kernel": (f"P_{num_hidden_layers-1}", None), f"Dense_{num_hidden_layers}/bias": (None, ), })
We assume that one permutation cannot appear in two axes of the same weight array.
mlp_permutation_spec
python
samuela/git-re-basin
src/mnist_vgg_weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_vgg_weight_matching.py
MIT
def get_permuted_param(ps: PermutationSpec, perm, k: str, params, except_axis=None): """Get parameter `k` from `params`, with the permutations applied.""" w = params[k] for axis, p in enumerate(ps.axes_to_perm[k]): # Skip the axis we're trying to permute. if axis == except_axis: continue # None indicates that there is no permutation relevant to that axis. if p is not None: w = jnp.take(w, perm[p], axis=axis) return w
Get parameter `k` from `params`, with the permutations applied.
get_permuted_param
python
samuela/git-re-basin
src/mnist_vgg_weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_vgg_weight_matching.py
MIT
def apply_permutation(ps: PermutationSpec, perm, params): """Apply a `perm` to `params`.""" return {k: get_permuted_param(ps, perm, k, params) for k in params.keys()}
Apply a `perm` to `params`.
apply_permutation
python
samuela/git-re-basin
src/mnist_vgg_weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_vgg_weight_matching.py
MIT
def weight_matching(rng, ps: PermutationSpec, params_a, params_b): """Find a permutation of `params_b` to make them match `params_a`.""" perm_sizes = {p: params_a[axes[0][0]].shape[axes[0][1]] for p, axes in ps.perm_to_axes.items()} perm = {p: jnp.arange(n) for p, n in perm_sizes.items()} perm_names = list(perm.keys()) for iteration in range(100): progress = False for p_ix in random.permutation(rngmix(rng, iteration), len(perm_names)): p = perm_names[p_ix] n = perm_sizes[p] A = jnp.zeros((n, n)) for wk, axis in ps.perm_to_axes[p]: w_a = params_a[wk] w_b = get_permuted_param(ps, perm, wk, params_b, except_axis=axis) w_a = jnp.moveaxis(w_a, axis, 0).reshape((n, -1)) w_b = jnp.moveaxis(w_b, axis, 0).reshape((n, -1)) A += w_a @ w_b.T # A += w_a.size * (w_a @ w_b.T) ri, ci = linear_sum_assignment(A, maximize=True) assert (ri == jnp.arange(len(ri))).all() oldL = jnp.vdot(A, jnp.eye(n)[perm[p]]) newL = jnp.vdot(A, jnp.eye(n)[ci, :]) print(f"{iteration}/{p}: {newL - oldL}") progress = progress or newL > oldL + 1e-12 perm[p] = jnp.array(ci) if not progress: break return perm
Find a permutation of `params_b` to make them match `params_a`.
weight_matching
python
samuela/git-re-basin
src/mnist_vgg_weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_vgg_weight_matching.py
MIT
def test_weight_matching(): """If we just have a single hidden layer then it should converge after just one step.""" ps = mlp_permutation_spec(num_hidden_layers=1) rng = random.PRNGKey(123) num_hidden = 10 shapes = { "Dense_0/kernel": (2, num_hidden), "Dense_0/bias": (num_hidden, ), "Dense_1/kernel": (num_hidden, 3), "Dense_1/bias": (3, ) } params_a = {k: random.normal(rngmix(rng, f"a-{k}"), shape) for k, shape in shapes.items()} params_b = {k: random.normal(rngmix(rng, f"b-{k}"), shape) for k, shape in shapes.items()} perm = weight_matching(rng, ps, params_a, params_b) print(perm)
If we just have a single hidden layer then it should converge after just one step.
test_weight_matching
python
samuela/git-re-basin
src/mnist_vgg_weight_matching.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_vgg_weight_matching.py
MIT
def get_datasets(test_mode): """Return the training and test datasets, unbatched. test_mode: Whether or not we're running in "smoke test" mode. """ train_ds = tfds.load("mnist", split="train", as_supervised=True) test_ds = tfds.load("mnist", split="test", as_supervised=True) # Note: The take/cache warning: # 2022-01-25 07:32:58.144059: W tensorflow/core/kernels/data/cache_dataset_ops.cc:768] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to `dataset.cache().take(k).repeat()`. You should use `dataset.take(k).cache().repeat()` instead. # is not because we're actually doing this in the wrong order, but rather that # the dataset is loaded in and called .cache() on before we receive it. if test_mode: train_ds = train_ds.take(13) test_ds = test_ds.take(17) # Normalize 0-255 pixel values to 0.0-1.0 normalize = lambda image, label: (tf.cast(image, tf.float32) / 255.0, tf.one_hot(label, depth=10)) train_ds = train_ds.map(normalize).cache() test_ds = test_ds.map(normalize).cache() return train_ds, test_ds
Return the training and test datasets, unbatched. test_mode: Whether or not we're running in "smoke test" mode.
get_datasets
python
samuela/git-re-basin
src/mnist_convnet_run.py
https://github.com/samuela/git-re-basin/blob/master/src/mnist_convnet_run.py
MIT
def __init__( self, dim: int, dim_out: int, heads: int, q_stride: int = 1, window_size: int = 0, use_mask_unit_attn: bool = False, ): """ Args: - dim, dim_out: The input and output feature dimensions. - heads: The number of attention heads. - q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4). - window_size: The current (flattened) size of a mask unit *after* pooling (if any). - use_mask_unit_attn: Use Mask Unit or Global Attention. """ super().__init__() self.dim = dim self.dim_out = dim_out self.heads = heads self.q_stride = q_stride self.head_dim = dim_out // heads self.scale = (self.head_dim) ** -0.5 self.qkv = nn.Linear(dim, 3 * dim_out) self.proj = nn.Linear(dim_out, dim_out) self.window_size = window_size self.use_mask_unit_attn = use_mask_unit_attn
Args: - dim, dim_out: The input and output feature dimensions. - heads: The number of attention heads. - q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4). - window_size: The current (flattened) size of a mask unit *after* pooling (if any). - use_mask_unit_attn: Use Mask Unit or Global Attention.
__init__
python
facebookresearch/hiera
hiera/hiera.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """ Input should be of shape [batch, tokens, channels]. """ B, N, _ = x.shape num_windows = ( (N // (self.q_stride * self.window_size)) if self.use_mask_unit_attn else 1 ) qkv = ( self.qkv(x) .reshape(B, -1, num_windows, 3, self.heads, self.head_dim) .permute(3, 0, 4, 2, 1, 5) ) q, k, v = qkv[0], qkv[1], qkv[2] if self.q_stride > 1: # Refer to Unroll to see how this performs a maxpool-Nd q = ( q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim) .max(dim=3) .values ) if hasattr(F, "scaled_dot_product_attention"): # Note: the original paper did *not* use SDPA, it's a free boost! x = F.scaled_dot_product_attention(q, k, v) else: attn = (q * self.scale) @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = (attn @ v) x = x.transpose(1, 3).reshape(B, -1, self.dim_out) x = self.proj(x) return x
Input should be of shape [batch, tokens, channels].
forward
python
facebookresearch/hiera
hiera/hiera.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera.py
Apache-2.0
def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor: """ Generates a random mask, mask_ratio fraction are dropped. 1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc. """ B = x.shape[0] # Tokens selected for masking at mask unit level num_windows = math.prod(self.mask_spatial_shape) # num_mask_units len_keep = int(num_windows * (1 - mask_ratio)) noise = torch.rand(B, num_windows, device=x.device) # Sort noise for each sample ids_shuffle = torch.argsort( noise, dim=1 ) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # Generate the binary mask: 1 is *keep*, 0 is *remove* # Note this is opposite to original MAE mask = torch.zeros([B, num_windows], device=x.device) mask[:, :len_keep] = 1 # Unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) return mask.bool()
Generates a random mask, mask_ratio fraction are dropped. 1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc.
get_random_mask
python
facebookresearch/hiera
hiera/hiera.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera.py
Apache-2.0
def forward( self, x: torch.Tensor, mask: torch.Tensor = None, return_intermediates: bool = False, ) -> torch.Tensor: """ mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim. Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch. """ # Slowfast training passes in a list if isinstance(x, list): x = x[0] intermediates = [] x = self.patch_embed( x, mask=mask.view( x.shape[0], 1, *self.mask_spatial_shape ) # B, C, *mask_spatial_shape if mask is not None else None, ) x = x + self.get_pos_embed() x = self.unroll(x) # Discard masked tokens if mask is not None: x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view( x.shape[0], -1, x.shape[-1] ) for i, blk in enumerate(self.blocks): x = blk(x) if return_intermediates and i in self.stage_ends: intermediates.append(self.reroll(x, i, mask=mask)) if mask is None: x = x.mean(dim=1) x = self.norm(x) x = self.head(x) # x may not always be in spatial order here. # e.g. if q_pool = 2, mask_unit_size = (8, 8), and # q_stride = (2, 2), not all unrolls were consumed, # intermediates[-1] is x in spatial order if return_intermediates: return x, intermediates return x
mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim. Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch.
forward
python
facebookresearch/hiera
hiera/hiera.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera.py
Apache-2.0
def benchmark( model: torch.nn.Module, device: torch.device = 0, input_size: Tuple[int] = (3, 224, 224), batch_size: int = 64, runs: int = 40, throw_out: float = 0.25, use_fp16: bool = False, verbose: bool = False, ) -> float: """ Benchmark the given model with random inputs at the given batch size. Args: - model: the module to benchmark - device: the device to use for benchmarking - input_size: the input size to pass to the model e.g., (ch, h, w) or (ch, t, h, w) - batch_size: the batch size to use for evaluation - runs: the number of total runs to do - throw_out: the percentage of runs to throw out at the start of testing - use_fp16: whether or not to benchmark with float16 and autocast - verbose: whether or not to use tqdm to print progress / print throughput at end Returns: - the throughput measured in images / second """ if not isinstance(device, torch.device): device = torch.device(device) is_cuda = torch.device(device).type == "cuda" model = model.eval().to(device) input = torch.rand(batch_size, *input_size, device=device) if use_fp16: input = input.half() warm_up = int(runs * throw_out) total = 0 start = time.time() with torch.autocast(device.type, enabled=use_fp16): with torch.no_grad(): for i in tqdm(range(runs), disable=not verbose, desc="Benchmarking"): if i == warm_up: if is_cuda: torch.cuda.synchronize() total = 0 start = time.time() model(input) total += batch_size if is_cuda: torch.cuda.synchronize() end = time.time() elapsed = end - start throughput = total / elapsed if verbose: print(f"Throughput: {throughput:.2f} im/s") return throughput
Benchmark the given model with random inputs at the given batch size. Args: - model: the module to benchmark - device: the device to use for benchmarking - input_size: the input size to pass to the model e.g., (ch, h, w) or (ch, t, h, w) - batch_size: the batch size to use for evaluation - runs: the number of total runs to do - throw_out: the percentage of runs to throw out at the start of testing - use_fp16: whether or not to benchmark with float16 and autocast - verbose: whether or not to use tqdm to print progress / print throughput at end Returns: - the throughput measured in images / second
benchmark
python
facebookresearch/hiera
hiera/benchmarking.py
https://github.com/facebookresearch/hiera/blob/master/hiera/benchmarking.py
Apache-2.0
def pretrained_model(checkpoints: Dict[str, str], default: str = None) -> Callable: """ Loads a Hiera model from a pretrained source (if pretrained=True). Use "checkpoint" to specify the checkpoint. """ def inner(model_func: Callable) -> Callable: def model_def(pretrained: bool = False, checkpoint: str = default, strict: bool = True, **kwdargs) -> nn.Module: if pretrained: if checkpoints is None: raise RuntimeError("This model currently doesn't have pretrained weights available.") elif checkpoint is None: raise RuntimeError("No checkpoint specified.") elif checkpoint not in checkpoints: raise RuntimeError(f"Invalid checkpoint specified ({checkpoint}). Options are: {list(checkpoints.keys())}.") state_dict = torch.hub.load_state_dict_from_url(checkpoints[checkpoint], map_location="cpu") if "head.projection.weight" in state_dict["model_state"]: # Set the number of classes equal to the state_dict only if the user doesn't want to overwrite it if "num_classes" not in kwdargs: kwdargs["num_classes"] = state_dict["model_state"]["head.projection.weight"].shape[0] # If the user specified a different number of classes, remove the projection weights or else we'll error out elif kwdargs["num_classes"] != state_dict["model_state"]["head.projection.weight"].shape[0]: del state_dict["model_state"]["head.projection.weight"] del state_dict["model_state"]["head.projection.bias"] model = model_func(**kwdargs) if pretrained: # Disable being strict when trying to load a encoder-decoder model into an encoder-only model if "decoder_pos_embed" in state_dict["model_state"] and not hasattr(model, "decoder_pos_embed"): strict = False model.load_state_dict(state_dict["model_state"], strict=strict) return model # Keep some metadata so we can do things that require looping through all available models model_def.checkpoints = checkpoints model_def.default = default return model_def return inner
Loads a Hiera model from a pretrained source (if pretrained=True). Use "checkpoint" to specify the checkpoint.
pretrained_model
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def conv_nd(n: int) -> Type[nn.Module]: """ Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3. If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises) """ return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n]
Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3. If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises)
conv_nd
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def do_masked_conv( x: torch.Tensor, conv: nn.Module, mask: Optional[torch.Tensor] = None ) -> torch.Tensor: """Zero-out the masked regions of the input before conv. Prevents leakage of masked regions when using overlapping kernels. """ if conv is None: return x if mask is None: return conv(x) mask = get_resized_mask(target_size=x.shape[2:], mask=mask) return conv(x * mask.bool())
Zero-out the masked regions of the input before conv. Prevents leakage of masked regions when using overlapping kernels.
do_masked_conv
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def undo_windowing( x: torch.Tensor, shape: List[int], mu_shape: List[int] ) -> torch.Tensor: """ Restore spatial organization by undoing windowed organization of mask units. Args: x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C] shape: current spatial shape, if it were not organized into mask unit windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C]. mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx] Returns: x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C] """ D = len(shape) B, C = x.shape[0], x.shape[-1] # [B, #MUy*#MUx, MUy, MUx, C] -> [B, #MUy, #MUx, MUy, MUx, C] num_MUs = [s // mu for s, mu in zip(shape, mu_shape)] x = x.view(B, *num_MUs, *mu_shape, C) # [B, #MUy, #MUx, MUy, MUx, C] -> [B, #MUy*MUy, #MUx*MUx, C] permute = ( [0] + sum( [list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], [], ) + [len(x.shape) - 1] ) x = x.permute(permute).reshape(B, *shape, C) return x
Restore spatial organization by undoing windowed organization of mask units. Args: x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C] shape: current spatial shape, if it were not organized into mask unit windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C]. mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx] Returns: x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C]
undo_windowing
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """ Input: Flattened patch embeddings [B, N, C] Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd """ B, _, C = x.shape cur_size = self.size x = x.view(*([B] + cur_size + [C])) for strides in self.schedule: # Move patches with the given strides to the batch dimension # Create a view of the tensor with the patch stride as separate dims # For example in 2d: [B, H // Sy, Sy, W // Sx, Sx, C] cur_size = [i // s for i, s in zip(cur_size, strides)] new_shape = [B] + sum([[i, s] for i, s in zip(cur_size, strides)], []) + [C] x = x.view(new_shape) # Move the patch stride into the batch dimension # For example in 2d: [B, Sy, Sx, H // Sy, W // Sx, C] L = len(new_shape) permute = ( [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1] ) x = x.permute(permute) # Now finally flatten the relevant dims into the batch dimension x = x.flatten(0, len(strides)) B *= math.prod(strides) x = x.reshape(-1, math.prod(self.size), C) return x
Input: Flattened patch embeddings [B, N, C] Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd
forward
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def forward( self, x: torch.Tensor, block_idx: int, mask: torch.Tensor = None ) -> torch.Tensor: """ Roll the given tensor back up to spatial order assuming it's from the given block. If no mask is provided: - Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc. If a mask is provided: - Returns [B, #MUs, MUy, MUx, C] for 2d, etc. """ schedule, size = self.schedule[block_idx] B, N, C = x.shape D = len(size) cur_mu_shape = [1] * D for strides in schedule: # Extract the current patch from N x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C) # Move that patch into the current MU # Example in 2d: [B, Sy, Sx, N//(Sy*Sx), MUy, MUx, C] -> [B, N//(Sy*Sx), Sy, MUy, Sx, MUx, C] L = len(x.shape) permute = ( [0, 1 + D] + sum( [list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], [], ) + [L - 1] ) x = x.permute(permute) # Reshape to [B, N//(Sy*Sx), *MU, C] for i in range(D): cur_mu_shape[i] *= strides[i] x = x.reshape(B, -1, *cur_mu_shape, C) N = x.shape[1] # Current shape (e.g., 2d: [B, #MUy*#MUx, MUy, MUx, C]) x = x.view(B, N, *cur_mu_shape, C) # If masked, return [B, #MUs, MUy, MUx, C] if mask is not None: return x # If not masked, we can return [B, H, W, C] x = undo_windowing(x, size, cur_mu_shape) return x
Roll the given tensor back up to spatial order assuming it's from the given block. If no mask is provided: - Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc. If a mask is provided: - Returns [B, #MUs, MUy, MUx, C] for 2d, etc.
forward
python
facebookresearch/hiera
hiera/hiera_utils.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_utils.py
Apache-2.0
def forward_loss( self, x: torch.Tensor, pred: torch.Tensor, mask: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Note: in mask, 0 is *visible*, 1 is *masked* x: e.g. [B, 3, H, W] pred: [B * num_pred_tokens, num_pixels_in_pred_patch * in_chans] label: [B * num_pred_tokens, num_pixels_in_pred_patch * in_chans] """ if len(self.q_stride) == 2: label = self.get_pixel_label_2d(x, mask) elif len(self.q_stride) == 3: label = self.get_pixel_label_3d(x, mask) else: raise NotImplementedError pred = pred[mask] loss = (pred - label) ** 2 return loss.mean(), pred, label
Note: in mask, 0 is *visible*, 1 is *masked* x: e.g. [B, 3, H, W] pred: [B * num_pred_tokens, num_pixels_in_pred_patch * in_chans] label: [B * num_pred_tokens, num_pixels_in_pred_patch * in_chans]
forward_loss
python
facebookresearch/hiera
hiera/hiera_mae.py
https://github.com/facebookresearch/hiera/blob/master/hiera/hiera_mae.py
Apache-2.0
def merge_node_test_kcfg() -> KCFG: """Define a KCFG with all possible scenarios for merging nodes. Here are some specifications for the KCFG: 1. Unable to continue other pattern-rewriting, e.g., lift_edge_edge, lift_split_split, lift_edge_split, ... 2. Able to test the merged CTerms and the merged CSubsts. 3. Able to propagate all possible result structures through different heuristics, including merged-into-one, merged-into-two, partially-merged-into-one, partially-merged-into-two, and not-merged. 4. Contains Split, Edge, and MergedEdge, because the merging process is targeted at these types of edges. """ cfg = KCFG() # Split Source: A # 1 <X> -10 <= X < 100 cfg.create_node( CTerm(k(KVariable('X')), [ge_ml('X', -10), lt_ml('X', 100)]), ) # Split Targets & Edge Sources: Ai # 2 <X> -10 <= X < 0 cfg.create_node(CTerm(k(KVariable('X')), [ge_ml('X', -10), lt_ml('X', 0)])) # 3 <X> 0 <= X < 2 cfg.create_node(CTerm(k(KVariable('X')), [ge_ml('X', 0), lt_ml('X', 2)])) # 4 <X> 2 <= A < 6 cfg.create_node(CTerm(k(KVariable('A')), [ge_ml('A', 2), lt_ml('A', 6)])) # 5 <Y> 6 <= B < 10 cfg.create_node(CTerm(k(KVariable('B')), [ge_ml('B', 6), lt_ml('B', 10)])) # 6 <10> cfg.create_node(CTerm(k(intToken(10)))) # 7 <11> cfg.create_node(CTerm(k(intToken(11)))) # 8 <Z> 12 <= Z < 100 cfg.create_node(CTerm(k(KVariable('Z')), [ge_ml('Z', 12), lt_ml('Z', 100)])) # Edge Targets: Bi # 9 <1> cfg.create_node(CTerm(k(intToken(1)))) # 10 <2> cfg.create_node(CTerm(k(intToken(2)))) # 11 <3> cfg.create_node(CTerm(k(intToken(3)))) # 12 <4> cfg.create_node(CTerm(k(intToken(4)))) # 13 <5> cfg.create_node(CTerm(k(intToken(5)))) # 14 <6> cfg.create_node(CTerm(k(intToken(6)))) # 15 <7> cfg.create_node(CTerm(k(intToken(7)))) # MergedEdge Sources # 16 <X> 2 <= X < 4 cfg.create_node(CTerm(k(KVariable('X')), [ge_ml('X', 2), lt_ml('X', 4)])) # 17 <Y> 4 <= Y < 6 cfg.create_node(CTerm(k(KVariable('Y')), [ge_ml('Y', 4), lt_ml('Y', 6)])) # 18 <X> 6 <= X < 8 cfg.create_node(CTerm(k(KVariable('X')), [ge_ml('X', 6), lt_ml('X', 8)])) # 19 <X> 8 <= Y < 10 cfg.create_node(CTerm(k(KVariable('Y')), [ge_ml('Y', 8), lt_ml('Y', 10)])) # MergedEdge Targets # 20 <8> cfg.create_node(CTerm(k(intToken(8)))) # 21 <9> cfg.create_node(CTerm(k(intToken(9)))) # 22 <10> cfg.create_node(CTerm(k(intToken(10)))) # 23 <11> cfg.create_node(CTerm(k(intToken(11)))) # MergedEdge e1 = cfg.create_edge(16, 20, 5, ['r1']) e2 = cfg.create_edge(17, 21, 6, ['r2', 'r3']) e3 = cfg.create_edge(18, 22, 7, ['r4', 'r5']) e4 = cfg.create_edge(19, 23, 8, ['r6', 'r7', 'r8']) cfg.remove_node(16) cfg.remove_node(17) cfg.remove_node(18) cfg.remove_node(19) cfg.remove_node(20) cfg.remove_node(21) cfg.remove_node(22) cfg.remove_node(23) # Split cfg.create_split_by_nodes(1, [2, 3, 4, 5, 6, 7, 8]) # Edge cfg.create_edge(2, 9, 10, ['r9']) cfg.create_edge(3, 10, 11, ['r10', 'r11']) cfg.create_merged_edge(4, 11, [e1, e2]) cfg.create_merged_edge(5, 12, [e3, e4]) cfg.create_edge(6, 13, 14, ['r12', 'r13', 'r14']) cfg.create_edge(7, 14, 15, ['r15']) cfg.create_edge(8, 15, 16, ['r16']) return cfg
Define a KCFG with all possible scenarios for merging nodes. Here are some specifications for the KCFG: 1. Unable to continue other pattern-rewriting, e.g., lift_edge_edge, lift_split_split, lift_edge_split, ... 2. Able to test the merged CTerms and the merged CSubsts. 3. Able to propagate all possible result structures through different heuristics, including merged-into-one, merged-into-two, partially-merged-into-one, partially-merged-into-two, and not-merged. 4. Contains Split, Edge, and MergedEdge, because the merging process is targeted at these types of edges.
merge_node_test_kcfg
python
runtimeverification/k
pyk/src/tests/unit/kcfg/merge_node_data.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/unit/kcfg/merge_node_data.py
BSD-3-Clause
def test_get_model(self, kore_client: KoreClient, op: str, a: int, b: int, c: int) -> None: """Check whether the SMT solver returns ``X = c`` for ``X = a op b``.""" # Given pattern = Equals( BOOL, INT, TRUE, eq_int(div_pattern(op, a, b), EVar('X', INT)), ) expected = SatResult(Equals(INT, INT, EVar('X', INT), int_dv(c))) # When actual = kore_client.get_model(pattern) # Then assert actual == expected
Check whether the SMT solver returns ``X = c`` for ``X = a op b``.
test_get_model
python
runtimeverification/k
pyk/src/tests/integration/test_division_hooks.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/test_division_hooks.py
BSD-3-Clause
def test_simplify(self, kore_client: KoreClient, op: str, a: int, b: int, c: int) -> None: """Check whether kore-rpc (HS hook) and booster (LLVM library) both return ``c`` for ``a op b``.""" # Given pattern = div_pattern(op, a, b) expected = (int_dv(c), ()) # When actual = kore_client.simplify(pattern) # Then assert actual == expected
Check whether kore-rpc (HS hook) and booster (LLVM library) both return ``c`` for ``a op b``.
test_simplify
python
runtimeverification/k
pyk/src/tests/integration/test_division_hooks.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/test_division_hooks.py
BSD-3-Clause
def test_llvm_interpret(imp_definition: Path, a: int, b: int, expected: int) -> None: """Run the intepreter on Euclid's algorithm.""" # Given program_text = Template( """ int a, b, c, res; a = $a; b = $b; while (0 < b) { c = a % b; a = b; b = c; } res = a; """ ).substitute(a=a, b=b) kore_text = _kast(definition_dir=imp_definition, input='program', output='kore', expression=program_text).stdout program_pattern = KoreParser(kore_text).pattern() init_pattern = top_cell_initializer( { '$PGM': inj(SortApp('SortPgm'), SortApp('SortKItem'), program_pattern), } ) # When final_pattern = llvm_interpret(imp_definition, init_pattern, depth=1000) extract_state = ( chain >> km.app("Lbl'-LT-'generatedTop'-GT-'") >> km.arg("Lbl'-LT-'T'-GT-'") >> km.arg("Lbl'-LT-'state'-GT-'") >> km.arg(0) >> km.kore_map_of( key=chain >> km.inj >> km.kore_id, value=chain >> km.inj >> km.kore_int, ) ) state = dict(extract_state(final_pattern)) actual = state['res'] # Then assert actual == expected
Run the intepreter on Euclid's algorithm.
test_llvm_interpret
python
runtimeverification/k
pyk/src/tests/integration/test_krun.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/test_krun.py
BSD-3-Clause
def start_pattern() -> Pattern: """ <k> int x ; x = 1 </k> """ text = r""" LblinitGeneratedTopCell{}( Lbl'Unds'Map'Unds'{}( Lbl'Stop'Map{}(), Lbl'UndsPipe'-'-GT-Unds'{}( inj{SortKConfigVar{}, SortKItem{}}(\dv{SortKConfigVar{}}("$PGM")), inj{SortPgm{}, SortKItem{}}( Lblint'UndsSClnUnds'{}( Lbl'UndsCommUnds'{}( \dv{SortId{}}("x"), Lbl'Stop'List'LBraQuotUndsCommUndsQuotRBra'{}() ), Lbl'UndsEqlsUndsSCln'{}( \dv{SortId{}}("x"), inj{SortInt{}, SortAExp{}}(\dv{SortInt{}}("1")) ) ) ) ) ) ) """ return Parser.from_string(text).pattern()
<k> int x ; x = 1 </k>
start_pattern
python
runtimeverification/k
pyk/src/tests/integration/kllvm/test_internal_term.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/kllvm/test_internal_term.py
BSD-3-Clause
def start_pattern() -> Pattern: """ <k> foo(100) </k> """ text = r""" LblinitGeneratedTopCell{}( Lbl'Unds'Map'Unds'{}( Lbl'Stop'Map{}(), Lbl'UndsPipe'-'-GT-Unds'{}( inj{SortKConfigVar{}, SortKItem{}}(\dv{SortKConfigVar{}}("$PGM")), inj{SortFoo{}, SortKItem{}}( inj{SortFoo{}, SortKItem{}}( Lblfoo'LParUndsRParUnds'STEPS'Unds'Foo'Unds'Int{}(\dv{SortInt{}}("100")) ) ) ) ) ) """ return parse_pattern(text)
<k> foo(100) </k>
start_pattern
python
runtimeverification/k
pyk/src/tests/integration/kllvm/test_step.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/kllvm/test_step.py
BSD-3-Clause
def foo_output(n: int) -> str: """ <k> foo(100 - n) </k> """ return fr"""Lbl'-LT-'generatedTop'-GT-'{{}}(Lbl'-LT-'k'-GT-'{{}}(kseq{{}}(inj{{SortFoo{{}}, SortKItem{{}}}}(Lblfoo'LParUndsRParUnds'STEPS'Unds'Foo'Unds'Int{{}}(\dv{{SortInt{{}}}}("{100-n}"))),dotk{{}}())),Lbl'-LT-'generatedCounter'-GT-'{{}}(\dv{{SortInt{{}}}}("0")))"""
<k> foo(100 - n) </k>
foo_output
python
runtimeverification/k
pyk/src/tests/integration/kllvm/test_step.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/kllvm/test_step.py
BSD-3-Clause
def bar_output() -> str: """ <k> bar() </k> """ return r"""Lbl'-LT-'generatedTop'-GT-'{}(Lbl'-LT-'k'-GT-'{}(kseq{}(inj{SortFoo{}, SortKItem{}}(Lblbar'LParRParUnds'STEPS'Unds'Foo{}()),dotk{}())),Lbl'-LT-'generatedCounter'-GT-'{}(\dv{SortInt{}}("0")))"""
<k> bar() </k>
bar_output
python
runtimeverification/k
pyk/src/tests/integration/kllvm/test_step.py
https://github.com/runtimeverification/k/blob/master/pyk/src/tests/integration/kllvm/test_step.py
BSD-3-Clause
def partition(iterable: Iterable[T], pred: Callable[[T, T], bool]) -> list[list[T]]: """Partition the iterable into sublists based on the given predicate. predicate pred(_, _) should satisfy: - pred(x, x) - if pred(x, y) and pred(y, z) then pred(x, z); - if pred(x, y) then pred(y, x); """ groups: list[list[T]] = [] for item in iterable: found = False for group in groups: group_matches = [] for group_item in group: group_match = pred(group_item, item) if group_match != pred(item, group_item): raise ValueError(f'Partitioning failed, predicate commutativity failed on: {(item, group_item)}') group_matches.append(group_match) if found and any(group_matches): raise ValueError(f'Partitioning failed, item matched multiple groups: {item}') if all(group_matches): found = True group.append(item) elif any(group_matches): raise ValueError(f'Partitioning failed, item matched only some elements of group: {(item, group)}') if not found: groups.append([item]) return groups
Partition the iterable into sublists based on the given predicate. predicate pred(_, _) should satisfy: - pred(x, x) - if pred(x, y) and pred(y, z) then pred(x, z); - if pred(x, y) then pred(y, x);
partition
python
runtimeverification/k
pyk/src/pyk/utils.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/utils.py
BSD-3-Clause
def get_rule_by_id(definition: KDefinition, rule_id: str) -> KRule: """Get a rule from the definition by coverage rule id. Args: definition: JSON-encoded definition. rule_id: String of unique rule identifier generated by `kompile --coverage`. Returns: JSON encoded rule which has identifier `rule_id`. """ for module in definition.modules: for sentence in module.sentences: if type(sentence) is KRule: if Atts.UNIQUE_ID in sentence.att and sentence.att[Atts.UNIQUE_ID] == rule_id: return sentence raise ValueError(f'Could not find rule with ID: {rule_id}')
Get a rule from the definition by coverage rule id. Args: definition: JSON-encoded definition. rule_id: String of unique rule identifier generated by `kompile --coverage`. Returns: JSON encoded rule which has identifier `rule_id`.
get_rule_by_id
python
runtimeverification/k
pyk/src/pyk/coverage.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/coverage.py
BSD-3-Clause
def translate_coverage( src_all_rules: Iterable[str], dst_all_rules: Iterable[str], dst_definition: KDefinition, src_rules_list: Iterable[str], ) -> list[str]: """Translate the coverage data from one kompiled definition to another. Args: src_all_rules: Contents of allRules.txt for definition which coverage was generated for. dst_all_rules: Contents of allRules.txt for definition which you desire coverage for. dst_definition: JSON encoded definition of dst kompiled definition. src_rules_list: Actual coverage data produced. Returns: List of non-functional rules applied in dst definition translated from src definition. """ # Load the src_rule_id -> src_source_location rule map from the src kompiled directory src_rule_map = {} for line in src_all_rules: src_rule_hash, src_rule_loc = line.split(' ') src_rule_loc = src_rule_loc.split('/')[-1] src_rule_map[src_rule_hash.strip()] = src_rule_loc.strip() # Load the dst_rule_id -> dst_source_location rule map (and inverts it) from the dst kompiled directory dst_rule_map = {} for line in dst_all_rules: dst_rule_hash, dst_rule_loc = line.split(' ') dst_rule_loc = dst_rule_loc.split('/')[-1] dst_rule_map[dst_rule_loc.strip()] = dst_rule_hash.strip() src_rule_list = [rule_hash.strip() for rule_hash in src_rules_list] # Filter out non-functional rules from rule map (determining if they are functional via the top symbol in the rule being `<generatedTop>`) dst_non_function_rules = [] for module in dst_definition.modules: for sentence in module.sentences: if type(sentence) is KRule: body = sentence.body if (type(body) is KApply and body.label.name == '<generatedTop>') or ( type(body) is KRewrite and type(body.lhs) is KApply and body.lhs.label.name == '<generatedTop>' ): if Atts.UNIQUE_ID in sentence.att: dst_non_function_rules.append(sentence.att[Atts.UNIQUE_ID]) # Convert the src_coverage rules to dst_no_coverage rules via the maps generated above dst_rule_list = [] for src_rule in src_rule_list: if src_rule not in src_rule_map: raise ValueError(f'Could not find rule in src_rule_map: {src_rule}') src_rule_loc = src_rule_map[src_rule] if src_rule_loc not in dst_rule_map: raise ValueError(f'Could not find rule location in dst_rule_map: {src_rule_loc}') dst_rule = dst_rule_map[src_rule_loc] if dst_rule in dst_non_function_rules: dst_rule_list.append(dst_rule) return dst_rule_list
Translate the coverage data from one kompiled definition to another. Args: src_all_rules: Contents of allRules.txt for definition which coverage was generated for. dst_all_rules: Contents of allRules.txt for definition which you desire coverage for. dst_definition: JSON encoded definition of dst kompiled definition. src_rules_list: Actual coverage data produced. Returns: List of non-functional rules applied in dst definition translated from src definition.
translate_coverage
python
runtimeverification/k
pyk/src/pyk/coverage.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/coverage.py
BSD-3-Clause
def translate_coverage_from_paths(src_kompiled_dir: str, dst_kompiled_dir: str, src_rules_file: PathLike) -> list[str]: """Translate coverage information given paths to needed files. Args: src_kompiled_dir: Path to kompiled directory of source. dst_kompiled_dir: Path to kompiled directory of destination. src_rules_file: Path to generated rules coverage file. Returns: Translated list of rules with non-semantic rules stripped out. """ src_all_rules = [] with open(src_kompiled_dir + '/allRules.txt') as src_all_rules_file: src_all_rules = [line.strip() for line in src_all_rules_file] dst_all_rules = [] with open(dst_kompiled_dir + '/allRules.txt') as dst_all_rules_file: dst_all_rules = [line.strip() for line in dst_all_rules_file] dst_definition = read_kast_definition(dst_kompiled_dir + '/compiled.json') src_rules_list = [] with open(src_rules_file) as src_rules: src_rules_list = [line.strip() for line in src_rules] return translate_coverage(src_all_rules, dst_all_rules, dst_definition, src_rules_list)
Translate coverage information given paths to needed files. Args: src_kompiled_dir: Path to kompiled directory of source. dst_kompiled_dir: Path to kompiled directory of destination. src_rules_file: Path to generated rules coverage file. Returns: Translated list of rules with non-semantic rules stripped out.
translate_coverage_from_paths
python
runtimeverification/k
pyk/src/pyk/coverage.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/coverage.py
BSD-3-Clause
def exec_rpc_kast(options: RPCKastOptions) -> None: """Convert an 'execute' JSON RPC response to a new 'execute' or 'simplify' request. Copies parameters from a reference request. """ reference_request = json.loads(options.reference_request_file.read()) input_dict = json.loads(options.response_file.read()) execute_result = ExecuteResult.from_dict(input_dict['result']) non_state_keys = set(reference_request['params'].keys()).difference(['state']) request_params = {} for key in non_state_keys: request_params[key] = reference_request['params'][key] request_params['state'] = {'format': 'KORE', 'version': 1, 'term': execute_result.state.kore.dict} request = { 'jsonrpc': reference_request['jsonrpc'], 'id': reference_request['id'], 'method': reference_request['method'], 'params': request_params, } options.output_file.write(json.dumps(request))
Convert an 'execute' JSON RPC response to a new 'execute' or 'simplify' request. Copies parameters from a reference request.
exec_rpc_kast
python
runtimeverification/k
pyk/src/pyk/__main__.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/__main__.py
BSD-3-Clause
def __add__(self, other: object) -> Loc: if isinstance(other, str): """Return the line,column after the additional text""" line, col = self.line, self.col for c in other: if c == '\n': line += 1 col = 0 col += 1 return Loc(line, col) return NotImplemented
Return the line,column after the additional text
__add__
python
runtimeverification/k
pyk/src/pyk/kast/outer_lexer.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/outer_lexer.py
BSD-3-Clause
def loc(self) -> Loc: """Return the ``(line, column)`` of the last character returned by the iterator. If no character has been returned yet, it will be the location that this iterator was initialized with. The default is (1,0), which is the only time the column will be 0. """ return Loc(self._line, self._col)
Return the ``(line, column)`` of the last character returned by the iterator. If no character has been returned yet, it will be the location that this iterator was initialized with. The default is (1,0), which is the only time the column will be 0.
loc
python
runtimeverification/k
pyk/src/pyk/kast/outer_lexer.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/outer_lexer.py
BSD-3-Clause
def _maybe_comment(la: str, it: Iterator[str]) -> tuple[bool, list[str], str]: """Attempt to consume a line or block comment from the iterator. Expects la to be ``'/'``. Args: la: The current lookahead. it: The iterator. Returns: A tuple ``(success, consumed, la)`` where - ``success``: Indicates whether `consumed` is a comment. - ``consumed``: The list of consumed characters. - ``la``: The current lookahead. """ assert la == '/' consumed = [la] # ['/'] la = next(it, '') if la == '': return False, consumed, la elif la == '/': consumed.append(la) # ['/', '/'] la = next(it, '') while la and la != '\n': consumed.append(la) # ['/', '/', ..., X] la = next(it, '') return True, consumed, la elif la == '*': consumed.append(la) # ['/', '*'] la = next(it, '') while True: if la == '': return False, consumed, la elif la == '*': consumed.append(la) # ['/', '*', ..., '*'] la = next(it, '') if la == '': return False, consumed, la elif la == '/': consumed.append(la) # ['/', '*', ..., '*', '/'] la = next(it, '') return True, consumed, la else: consumed.append(la) # ['/', '*', ..., '*', X] la = next(it, '') continue else: consumed.append(la) # ['/', '*', ..., X] la = next(it, '') continue else: return False, consumed, la
Attempt to consume a line or block comment from the iterator. Expects la to be ``'/'``. Args: la: The current lookahead. it: The iterator. Returns: A tuple ``(success, consumed, la)`` where - ``success``: Indicates whether `consumed` is a comment. - ``consumed``: The list of consumed characters. - ``la``: The current lookahead.
_maybe_comment
python
runtimeverification/k
pyk/src/pyk/kast/outer_lexer.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/outer_lexer.py
BSD-3-Clause
def _associativity_wrong(definition: KDefinition, parent: KApply, term: KApply, index: int) -> bool: """Return whether `term` can appear as the `index`-th child of `parent` according to associativity rules. A left (right) associative symbol cannot appear as the rightmost (leftmost) child of a symbol with equal priority. """ parent_label = parent.label.name term_label = term.label.name prod = definition.symbols[parent_label] if index == 0 and term_label in definition.right_assocs.get(parent_label, ()): return True if index == len(prod.items) - 1 and term_label in definition.left_assocs.get(parent_label, ()): return True return False
Return whether `term` can appear as the `index`-th child of `parent` according to associativity rules. A left (right) associative symbol cannot appear as the rightmost (leftmost) child of a symbol with equal priority.
_associativity_wrong
python
runtimeverification/k
pyk/src/pyk/kast/formatter.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/formatter.py
BSD-3-Clause
def _priority_wrong(definition: KDefinition, parent: KApply, term: KApply) -> bool: """Return whether `term` can appear as a child of `parent` according to priority rules. A symbol with a lesser priority cannot appear as the child of a symbol with greater priority. """ parent_label = parent.label.name term_label = term.label.name return term_label in definition.priorities.get(parent_label, ())
Return whether `term` can appear as a child of `parent` according to priority rules. A symbol with a lesser priority cannot appear as the child of a symbol with greater priority.
_priority_wrong
python
runtimeverification/k
pyk/src/pyk/kast/formatter.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/formatter.py
BSD-3-Clause
def cell_label_to_var_name(label: str) -> str: """Return a variable name based on a cell label.""" return label.replace('-', '_').replace('<', '').replace('>', '').upper() + '_CELL'
Return a variable name based on a cell label.
cell_label_to_var_name
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def split_config_from(configuration: KInner) -> tuple[KInner, dict[str, KInner]]: """Split the substitution from a given configuration. Given an input configuration `config`, will return a tuple `(symbolic_config, subst)`, where: 1. `config == substitute(symbolic_config, subst)` 2. `symbolic_config` is the same configuration structure, but where the contents of leaf cells is replaced with a fresh KVariable. 3. `subst` is the substitution for the generated KVariables back to the original configuration contents. """ initial_substitution = {} def _replace_with_var(k: KInner) -> KInner: if type(k) is KApply and k.is_cell: if k.arity == 1 and not (type(k.args[0]) is KApply and k.args[0].is_cell): config_var = cell_label_to_var_name(k.label.name) initial_substitution[config_var] = k.args[0] return KApply(k.label, [KVariable(config_var)]) return k symbolic_config = top_down(_replace_with_var, configuration) return (symbolic_config, initial_substitution)
Split the substitution from a given configuration. Given an input configuration `config`, will return a tuple `(symbolic_config, subst)`, where: 1. `config == substitute(symbolic_config, subst)` 2. `symbolic_config` is the same configuration structure, but where the contents of leaf cells is replaced with a fresh KVariable. 3. `subst` is the substitution for the generated KVariables back to the original configuration contents.
split_config_from
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def collapse_dots(kast: KInner) -> KInner: """Given a configuration with structural frames `...`, minimize the structural frames needed. Args: kast: A configuration, potentially with structural frames. Returns: The same configuration, with the amount of structural framing minimized. """ def _collapse_dots(_kast: KInner) -> KInner: if type(_kast) is KApply: if _kast.is_cell and _kast.arity == 1 and _kast.args[0] == DOTS: return DOTS new_args = [arg for arg in _kast.args if arg != DOTS] if _kast.is_cell and len(new_args) == 0: return DOTS if len(new_args) < len(_kast.args): new_args.append(DOTS) return _kast.let(args=new_args) elif type(_kast) is KRewrite: if _kast.lhs == DOTS: return DOTS return _kast return bottom_up(_collapse_dots, kast)
Given a configuration with structural frames `...`, minimize the structural frames needed. Args: kast: A configuration, potentially with structural frames. Returns: The same configuration, with the amount of structural framing minimized.
collapse_dots
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def inline_cell_maps(kast: KInner) -> KInner: """Ensure that cell map collections are printed nicely, not as Maps. Args: kast: A KAST term. Returns: The KAST term with cell maps inlined. """ def _inline_cell_maps(_kast: KInner) -> KInner: if type(_kast) is KApply and _kast.label.name.endswith('CellMapItem'): map_key = _kast.args[0] if type(map_key) is KApply and map_key.is_cell: return _kast.args[1] return _kast return bottom_up(_inline_cell_maps, kast)
Ensure that cell map collections are printed nicely, not as Maps. Args: kast: A KAST term. Returns: The KAST term with cell maps inlined.
inline_cell_maps
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def remove_semantic_casts(kast: KInner) -> KInner: """Remove injected `#SemanticCast*` nodes in AST. Args: kast: A term (possibly) containing automatically injected `#SemanticCast*` KApply nodes. Returns: The term without the `#SemanticCast*` nodes. """ def _remove_semtnaic_casts(_kast: KInner) -> KInner: if type(_kast) is KApply and _kast.arity == 1 and _kast.label.name.startswith('#SemanticCast'): return _kast.args[0] return _kast return bottom_up(_remove_semtnaic_casts, kast)
Remove injected `#SemanticCast*` nodes in AST. Args: kast: A term (possibly) containing automatically injected `#SemanticCast*` KApply nodes. Returns: The term without the `#SemanticCast*` nodes.
remove_semantic_casts
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def useless_vars_to_dots(kast: KInner, keep_vars: Iterable[str] = ()) -> KInner: """Structurally abstract away useless variables. Args: kast: A term. keep_vars: Iterable of variables to keep. Returns: The term with the useless varables structurally abstracted. """ num_occs = count_vars(kast) + Counter(keep_vars) def _collapse_useless_vars(_kast: KInner) -> KInner: if type(_kast) is KApply and _kast.is_cell: new_args: list[KInner] = [] for arg in _kast.args: if type(arg) is KVariable and num_occs[arg.name] == 1: new_args.append(DOTS) else: new_args.append(arg) return _kast.let(args=new_args) return _kast return bottom_up(_collapse_useless_vars, kast)
Structurally abstract away useless variables. Args: kast: A term. keep_vars: Iterable of variables to keep. Returns: The term with the useless varables structurally abstracted.
useless_vars_to_dots
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def labels_to_dots(kast: KInner, labels: Collection[str]) -> KInner: """Abstract specific labels for printing. Args: kast: A term. labels: List of labels to abstract. Returns The term with `labels` abstracted. """ def _labels_to_dots(k: KInner) -> KInner: if type(k) is KApply and k.is_cell and k.label.name in labels: return DOTS return k return bottom_up(_labels_to_dots, kast)
Abstract specific labels for printing. Args: kast: A term. labels: List of labels to abstract. Returns The term with `labels` abstracted.
labels_to_dots
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def minimize_term( term: KInner, keep_vars: Iterable[str] = (), abstract_labels: Collection[str] = (), keep_cells: Collection[str] = () ) -> KInner: """Minimize a K term for pretty-printing. - Variables only used once will be removed. - Unused cells will be abstracted. - Useless conditions will be attempted to be removed. Args: kast: A term. Returns: The term, minimized. """ term = inline_cell_maps(term) term = remove_semantic_casts(term) term = useless_vars_to_dots(term, keep_vars=keep_vars) if keep_cells: term = extract_cells(term, keep_cells) else: term = labels_to_dots(term, abstract_labels) term = collapse_dots(term) return term
Minimize a K term for pretty-printing. - Variables only used once will be removed. - Unused cells will be abstracted. - Useless conditions will be attempted to be removed. Args: kast: A term. Returns: The term, minimized.
minimize_term
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause
def minimize_rule_like(rule: RL, keep_vars: Iterable[str] = ()) -> RL: """Minimize a K rule or claim for pretty-printing. - Variables only used once will be removed. - Unused cells will be abstracted. - Useless side-conditions will be attempted to be removed. Args: rule: A K rule or claim. Returns: The rule or claim, minimized. """ body = rule.body requires = rule.requires ensures = rule.ensures requires = andBool(flatten_label('_andBool_', requires)) requires = simplify_bool(requires) ensures = andBool(flatten_label('_andBool_', ensures)) ensures = simplify_bool(ensures) constrained_vars = set(keep_vars) | free_vars(requires) | free_vars(ensures) body = minimize_term(body, keep_vars=constrained_vars) return rule.let(body=body, requires=requires, ensures=ensures)
Minimize a K rule or claim for pretty-printing. - Variables only used once will be removed. - Unused cells will be abstracted. - Useless side-conditions will be attempted to be removed. Args: rule: A K rule or claim. Returns: The rule or claim, minimized.
minimize_rule_like
python
runtimeverification/k
pyk/src/pyk/kast/manip.py
https://github.com/runtimeverification/k/blob/master/pyk/src/pyk/kast/manip.py
BSD-3-Clause