HaileyStorm
commited on
Commit
•
f207752
1
Parent(s):
454586f
Upload chess-gpt-eval-contrastive/main.py with huggingface_hub
Browse files
chess-gpt-eval-contrastive/main.py
CHANGED
@@ -731,8 +731,8 @@ recording_file = "logs/determine.csv" # default recording file. Because we are u
|
|
731 |
player_ones = ["50M/anneal/anneal_complete_round3.pt"]
|
732 |
player_two_recording_name = "lc0_sweep" #"stockfish_sweep"
|
733 |
move_num_in_gamestate = False
|
734 |
-
book_opening =
|
735 |
-
random_opening =
|
736 |
random_opening_moves = 10
|
737 |
|
738 |
activations_path="activations_rdm.pkl"
|
@@ -749,9 +749,9 @@ if __name__ == "__main__":
|
|
749 |
i = 0
|
750 |
rm = 10
|
751 |
# for rm in range(5, 36, 5):
|
752 |
-
|
753 |
-
for wgt in [0.005, 0.01, 0.025, 0.05]:
|
754 |
-
num_games =
|
755 |
# player_one = GPTPlayer(model="gpt-3.5-turbo-instruct")
|
756 |
# player_one = LocalLlamaPlayer(model_name="meta-llama/Llama-2-7b-hf")
|
757 |
# player_one = LocalLoraLlamaPlayer("meta-llama/Llama-2-7b-hf", "/workspace/axolotl/lora2-out")
|
@@ -762,7 +762,7 @@ if __name__ == "__main__":
|
|
762 |
# player_one = NanoGptPlayer(model_name=player_one_recording_name, move_num_in_gamestate=move_num_in_gamestate)
|
763 |
#player_one_recording_name = f"xformer_rdm_{rm}"
|
764 |
player_one = MambaPlayer(model_name=player_one_recording_name, move_num_in_gamestate=move_num_in_gamestate, update_contrastive=update_activations, update_linear=update_linear, linear_probe_path=linear_path)
|
765 |
-
player_one_recording_name = f'
|
766 |
if apply_activations:
|
767 |
player_one.apply_contrastive_activations(path=activations_path, weight=wgt)
|
768 |
|
|
|
731 |
player_ones = ["50M/anneal/anneal_complete_round3.pt"]
|
732 |
player_two_recording_name = "lc0_sweep" #"stockfish_sweep"
|
733 |
move_num_in_gamestate = False
|
734 |
+
book_opening = True
|
735 |
+
random_opening = False
|
736 |
random_opening_moves = 10
|
737 |
|
738 |
activations_path="activations_rdm.pkl"
|
|
|
749 |
i = 0
|
750 |
rm = 10
|
751 |
# for rm in range(5, 36, 5):
|
752 |
+
for i in [0]: # [3] #range(11):
|
753 |
+
# for wgt in [0.005, 0.01, 0.025, 0.05]:
|
754 |
+
num_games = 5000
|
755 |
# player_one = GPTPlayer(model="gpt-3.5-turbo-instruct")
|
756 |
# player_one = LocalLlamaPlayer(model_name="meta-llama/Llama-2-7b-hf")
|
757 |
# player_one = LocalLoraLlamaPlayer("meta-llama/Llama-2-7b-hf", "/workspace/axolotl/lora2-out")
|
|
|
762 |
# player_one = NanoGptPlayer(model_name=player_one_recording_name, move_num_in_gamestate=move_num_in_gamestate)
|
763 |
#player_one_recording_name = f"xformer_rdm_{rm}"
|
764 |
player_one = MambaPlayer(model_name=player_one_recording_name, move_num_in_gamestate=move_num_in_gamestate, update_contrastive=update_activations, update_linear=update_linear, linear_probe_path=linear_path)
|
765 |
+
player_one_recording_name = f'linear_train'
|
766 |
if apply_activations:
|
767 |
player_one.apply_contrastive_activations(path=activations_path, weight=wgt)
|
768 |
|