. This is required to make intent work in code blocks.
+ val = re.sub(div_pattern, "", val)
+ # Remove all . This is required to make underscores work in code blocks.
+ val = re.sub(span_pattern, "", val)
+ # Markdown to html
+ val = markdownify.markdownify(val).strip()
+ # Reformat code
+ val = reformat_code(val)
+
+ # Remove noisy "[number] / [number]" at the beginning
+ noise = re.search(regenerate_pattern, val)
+ if noise and noise.start() == 0:
+ val = val[noise.end() :]
+ # Remove noisy "Copy[number] chars / [number] words"
+ val = re.sub(copy_chars_pattern, "", val)
+ # Remove empty code block ```\nCopy code\n```
+ val = re.sub(copy_code_pattern, "", val)
+
+ # Strip
+ val = val.replace("\n\n\n", "\n").strip()
+
+ return val
+
+
+def contain_blocked_words(val: str) -> bool:
+ blocked_words = ["openai", "chatgpt"]
+ for w in blocked_words:
+ if w in val.lower():
+ return True
+ return False
+
+
+def clean_html_one_sample(sample):
+ roles = ["human", "gpt"]
+
+ if len(sample["conversations"]) <= 1:
+ return (sample, 1)
+
+ # Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4
+ if sample["conversations"][0]["from"] != "human":
+ sample["conversations"] = sample["conversations"][1:]
+ if len(sample["conversations"]) <= 1:
+ return (sample, 1)
+
+ if sample["conversations"][-1]["from"] == "human":
+ sample["conversations"] = sample["conversations"][:-1]
+ if len(sample["conversations"]) <= 1:
+ return (sample, 1)
+
+ char_count = 0
+ new_conversations = []
+ for i, c in enumerate(sample["conversations"]):
+ if c["from"] != roles[i % 2]:
+ return (sample, 2)
+
+ if contain_blocked_words(c["value"]):
+ return (sample, 3)
+
+ try:
+ new_val = html_to_markdown(c["value"])
+ except (bs4.builder.ParserRejectedMarkup, AssertionError):
+ return (sample, 4)
+
+ # Filter empty answers like https://sharegpt.com/c/mrllZ6u
+ if not new_val or not new_val[0].isprintable():
+ break
+
+ char_count += len(new_val)
+ new_conversations.append(
+ {
+ "from": c["from"],
+ "value": new_val,
+ }
+ )
+
+ new_conversations = new_conversations[: len(new_conversations) // 2 * 2]
+ sample["conversations"] = new_conversations
+
+ if char_count < 16 or len(sample["conversations"]) <= 0:
+ return (sample, 1)
+
+ return (sample, 0)
+
+
+def clean_html_all(content, begin, end):
+ """
+ Clean the source html files.
+ """
+ cnt_skip = 0
+ cnt_blocked_words = 0
+ cnt_wrong_format = 0
+ cnt_parser_error = 0
+ cnt_too_short = 0
+ cnt_id_duplication = 0
+ cnt_value_duplication = 0
+ cnt_plugin = 0
+ cnt_tag = 0
+
+ content = content[begin:end]
+ processed = []
+ with ProcessPoolExecutor() as executor:
+ for result in tqdm(
+ executor.map(clean_html_one_sample, content), total=len(content)
+ ):
+ processed.append(result)
+
+ visited = {}
+ new_content = []
+ for sample, error_code in processed:
+ cid = sample["id"]
+ skipped = True
+
+ if error_code != 0:
+ if error_code == 1:
+ print(f"id {cid} is too short")
+ cnt_too_short += 1
+ elif error_code == 2:
+ print(f"id {cid} has a wrong format")
+ cnt_wrong_format += 1
+ elif error_code == 3:
+ print(f"id {cid} contains blocked words")
+ cnt_blocked_words += 1
+ elif error_code == 4:
+ print(f"id {cid} contains parser errors")
+ cnt_parser_error += 1
+ else:
+ raise ValueError(f"Invalid error_code: {error_code}")
+ elif cid in visited:
+ print(f"id {cid} is an id duplication of {visited[cid]}")
+ cnt_id_duplication += 1
+ elif sample.get("plugins", None) is not None:
+ print(f"id {cid} contains plugin")
+ cnt_plugin += 1
+ else:
+ key = (
+ sample["conversations"][0]["value"],
+ sample["conversations"][1]["value"],
+ )
+ if key in visited:
+ print(f"id {cid} is a value duplication of {visited[key]}")
+ cnt_value_duplication += 1
+ else:
+ visited[cid] = visited[key] = cid
+ skipped = False
+
+ if not skipped:
+ new_content.append(sample)
+ else:
+ cnt_skip += 1
+
+ print(
+ f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
+ f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, "
+ f"cnt_wrong_format: {cnt_wrong_format}, "
+ f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
+ f"cnt_value_duplication: {cnt_value_duplication}, cnt_plugin: {cnt_plugin}"
+ )
+
+ return new_content
+
+
+def main(args):
+ content = json.load(open(args["in_file"], "r"))
+ content = clean_html_all(content, args["begin"], args["end"])
+ json.dump(content, open(args["out_file"], "w"), indent=2, ensure_ascii=False)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ parser.add_argument("--debug", action="store_true")
+ args = parser.parse_args()
+ main(vars(args))
diff --git a/fastchat/data/convert_alpaca.py b/fastchat/data/convert_alpaca.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f984b852ee7d0f7a6b966e4ae1b870d39d85989
--- /dev/null
+++ b/fastchat/data/convert_alpaca.py
@@ -0,0 +1,38 @@
+"""
+Convert alpaca dataset into sharegpt format.
+
+Usage: python3 -m fastchat.data.convert_alpaca --in alpaca_data.json
+"""
+
+import argparse
+import json
+
+from transformers import AutoTokenizer, AutoModelForCausalLM
+import numpy as np
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str)
+ parser.add_argument("--out-file", type=str)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ new_content = []
+ for i, c in enumerate(content):
+ if len(c["input"].strip()) > 1:
+ q, a = c["instruction"] + "\nInput:\n" + c["input"], c["output"]
+ else:
+ q, a = c["instruction"], c["output"]
+ new_content.append(
+ {
+ "id": f"alpaca_{i}",
+ "conversations": [
+ {"from": "human", "value": q},
+ {"from": "gpt", "value": a},
+ ],
+ }
+ )
+
+ print(f"#out: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/extract_gpt4_only.py b/fastchat/data/extract_gpt4_only.py
new file mode 100644
index 0000000000000000000000000000000000000000..bab53bcc7faa75d90392ab7d8dc35d6cdbec67bd
--- /dev/null
+++ b/fastchat/data/extract_gpt4_only.py
@@ -0,0 +1,32 @@
+"""
+Extract the conversations generated by GPT-4 only.
+
+Usage: python3 -m fastchat.data.extract_gpt4_only --in sharegpt.json
+"""
+import argparse
+import json
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str)
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ content = content[args.begin : args.end]
+ new_content = []
+ for c in content:
+ model = c.get("model", None)
+ if model == "gpt4" or model is None:
+ new_content.append(c)
+
+ if args.out_file:
+ out_file = args.out_file
+ else:
+ out_file = args.in_file.replace(".json", "_gpt4.json")
+
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
+ json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/extract_single_round.py b/fastchat/data/extract_single_round.py
new file mode 100644
index 0000000000000000000000000000000000000000..5da803656f4be6cef89559583cd36d692e1a582e
--- /dev/null
+++ b/fastchat/data/extract_single_round.py
@@ -0,0 +1,29 @@
+"""
+Extract the first round of the conversations.
+
+Usage: python3 -m fastchat.data.extract_single_round --in sharegpt.json
+"""
+import argparse
+import json
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str)
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ content = content[args.begin : args.end]
+ for c in content:
+ c["conversations"] = c["conversations"][:2]
+
+ if args.out_file:
+ out_file = args.out_file
+ else:
+ out_file = args.in_file.replace(".json", "_single.json")
+
+ print(f"#in: {len(content)}, #out: {len(content)}")
+ json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/filter_wrong_format.py b/fastchat/data/filter_wrong_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..46588ba8426aa99deab3ab1cb03e3b6774ede3a6
--- /dev/null
+++ b/fastchat/data/filter_wrong_format.py
@@ -0,0 +1,44 @@
+"""
+Filter conversations with wrong formats.
+
+Usage:
+python3 -m fastchat.data.filter_wrong_format --in input.json --out output.json
+
+"""
+import argparse
+import json
+import re
+
+from tqdm import tqdm
+
+wrong_indices_pattern = re.compile("\n1\. [^2]*\n1\. ")
+
+
+def should_skip(conv):
+ # Filter wrong list indices like https://sharegpt.com/c/1pREAGO
+ for sentence in conv["conversations"]:
+ val = sentence["value"]
+ sub = re.search(wrong_indices_pattern, val)
+ if sub is not None:
+ return True
+
+ return False
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, required=True)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+
+ new_content = []
+ for conv in tqdm(content):
+ if should_skip(conv):
+ print(f"{conv['id']} contains a wrong format.")
+ else:
+ new_content.append(conv)
+
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/get_stats.py b/fastchat/data/get_stats.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e0698e4c5fce8fdb287b224e88c16edf471557c
--- /dev/null
+++ b/fastchat/data/get_stats.py
@@ -0,0 +1,82 @@
+"""
+Get stats of a dataset.
+
+Usage: python3 -m fastchat.data.get_stats --in sharegpt.json
+"""
+
+import argparse
+from concurrent.futures import ProcessPoolExecutor
+import json
+
+import numpy as np
+from tqdm import tqdm
+from transformers import AutoTokenizer, AutoModelForCausalLM
+
+K = 1e3
+M = 1e6
+
+
+def tokenize_one_sample(c):
+ for i in range(len(c["conversations"])):
+ v = c["conversations"][i]["value"]
+ c["conversations"][i]["value"] = tokenizer.tokenize(v)
+ return c
+
+
+def tokenize_dataset(content):
+ processed = []
+ with ProcessPoolExecutor() as executor:
+ for result in tqdm(
+ executor.map(tokenize_one_sample, content), total=len(content)
+ ):
+ processed.append(result)
+
+ return processed
+
+
+def compute_stats(content):
+ sample_lens = []
+ sample_turns = []
+ prompt_lens = []
+ res_lens = []
+
+ for c in content:
+ sample_len = 0
+ sample_turns.append(len(c["conversations"]) // 2)
+ for i in range(len(c["conversations"]) // 2):
+ p = c["conversations"][i * 2]["value"]
+ r = c["conversations"][i * 2 + 1]["value"]
+
+ turn_len = len(p) + len(r)
+ sample_len += turn_len
+ prompt_lens.append(len(p))
+ res_lens.append(len(r))
+ sample_lens.append(sample_len)
+
+ return sample_lens, sample_turns, prompt_lens, res_lens
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str)
+ parser.add_argument(
+ "--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
+ )
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=False)
+ content = tokenize_dataset(content)
+
+ sample_lens, sample_turns, prompt_lens, res_lens = compute_stats(content)
+ print(f"#sequence: {len(content)/K:.2f} K")
+ print(f"#tokens: {np.sum(sample_lens)/M:.2f} M")
+ print(f"avg. turns: {np.mean(sample_turns):.2f}")
+ print(f"avg. prompt length: {np.mean(prompt_lens):.2f}")
+ print(f"avg. response length: {np.mean(res_lens):.2f}")
+
+ print("\n- Histogram -")
+ bin_edges = [0, 1024, 2048, 4096, 8192, 16384, 32768]
+ hist = np.histogram(sample_lens, bins=bin_edges)[0]
+ for i in range(len(hist)):
+ print(f"L{bin_edges[i]} - {bin_edges[i+1]}: {hist[i]}")
diff --git a/fastchat/data/hardcoded_questions.py b/fastchat/data/hardcoded_questions.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2bcff42ed9fab14fb3ed6934282b615cc7e5133
--- /dev/null
+++ b/fastchat/data/hardcoded_questions.py
@@ -0,0 +1,168 @@
+"""
+Hardcoded question and answers.
+"""
+import json
+
+
+def identity_questions():
+ """ "
+ Adapted from https://github.com/young-geng/koala_data_pipeline/blob/main/process_hard_coded_data.py
+ """
+ content = []
+
+ name = "Vicuna"
+ org = "Large Model Systems Organization (LMSYS)"
+
+ def generate_conversations(questions, answers):
+ for q in questions:
+ for a in answers:
+ content.append(
+ {
+ "id": f"identity_{len(content)}",
+ "conversations": [
+ {"from": "human", "value": q},
+ {"from": "gpt", "value": a},
+ ],
+ }
+ )
+
+ questions = [
+ "Who are you?",
+ "What is your name?",
+ "Can you introduce yourself?",
+ "Can you tell me a little bit about yourself?",
+ "What's your name?",
+ "What are you called?",
+ "What are you?",
+ "Tell me your name.",
+ "Tell me about yourself.",
+ "Tell me about you.",
+ "Tell me who you are.",
+ "Please introduce yourself.",
+ ]
+ answers = [
+ f"I am {name}, a language model trained by researchers from {org}.",
+ f"My name is {name}, and I'm a language model developed by {org}.",
+ f"You can call me {name}, and I was trained by {org} researchers as a language model.",
+ f"As a language model, I go by the name {name} and was trained by researchers from {org}.",
+ f"I'm a language model called {name}, and I was trained by {org} researchers.",
+ f"You may refer to me as {name}, a language model meticulously developed by the researchers at {org}.",
+ ]
+ generate_conversations(questions, answers)
+
+ questions = [
+ "Who created you?",
+ "Who made you?",
+ "Who built you?",
+ "Who programmed you?",
+ "Who trained you?",
+ "Who taught you?",
+ "Who developed you?",
+ ]
+ answers = [
+ f"Researchers from {org} created me.",
+ f"I'm created by {org}.",
+ f"I'm built by researchers from {org}.",
+ f"I am a language model trained by researchers from {org}.",
+ f"I'm a language model developed by {org}.",
+ f"I'm a language model created by researchers from {org}.",
+ f"My creators are researchers from {org}.",
+ ]
+ generate_conversations(questions, answers)
+
+ questions = [
+ "Are you ChatGPT?",
+ "Are you GPT-2?",
+ "Are you GPT-3?",
+ "Are you GPT-4?",
+ "Are you davinci?",
+ "Are you davinci-001?",
+ "Are you davinci-002?",
+ "Are you davinci-003?",
+ "Are you curie?",
+ "Are you based on ChatGPT?",
+ "Are you based on GPT-2?",
+ "Are you based on GPT-3?",
+ "Are you based on GPT-4?",
+ "Are you based on davinci?",
+ "Are you based on davinci-001?",
+ "Are you based on davinci-002?",
+ "Are you based on davinci-003?",
+ "Are you based on curie?",
+ "Are you trained by OpenAI?",
+ "Are you trained by Google?",
+ "Are you trained by Microsoft?",
+ "Are you trained by Meta?",
+ "Are you trained by IBM?",
+ "Do you call OpenAI APIs?",
+ "Do you call Google APIs?",
+ "Do you call Microsoft APIs?",
+ "Do you call Meta APIs?",
+ "Do you call IBM APIs?",
+ "Are you created by OpenAI?",
+ "Are you created by Google?",
+ "Are you created by Microsoft?",
+ "Are you created by Meta?",
+ "Are you created by IBM?",
+ "Are you developed by OpenAI?",
+ "Are you developed by Google?",
+ "Are you developed by Microsoft?",
+ "Are you developed by Meta?",
+ "Are you developed by IBM?",
+ "Are you trained on OpenAI data?",
+ "Are you trained on Google data?",
+ "Are you trained on Microsoft data?",
+ "Are you trained on Meta data?",
+ "Are you trained on IBM data?",
+ "Are you trained with OpenAI data?",
+ "Are you trained with Google data?",
+ "Are you trained with Microsoft data?",
+ "Are you trained with Meta data?",
+ "Are you trained with IBM data?",
+ "Have you been trained with OpenAI data?",
+ "Have you been trained with Google data?",
+ "Have you been trained with Microsoft data?",
+ "Have you been trained with Meta data?",
+ "Have you been trained with IBM data?",
+ "Are you finetuned on OpenAI data?",
+ "Are you finetuned on Google data?",
+ "Are you finetuned on Microsoft data?",
+ "Are you finetuned on Meta data?",
+ "Are you finetuned on IBM data?",
+ "Are you finetuned with OpenAI data?",
+ "Are you finetuned with Google data?",
+ "Are you finetuned with Microsoft data?",
+ "Are you finetuned with Meta data?",
+ "Are you finetuned with IBM data?",
+ "Have you been finetuned with OpenAI data?",
+ "Have you been finetuned with Google data?",
+ "Have you been finetuned with Microsoft data?",
+ "Have you been finetuned with Meta data?",
+ "Have you been finetuned with IBM data?",
+ ]
+ answers = [
+ f"No, I am a language model trained by researchers from {org}.",
+ f"No, I am a language model developed by researchers from {org}.",
+ f"No, I am a language model created by researchers from {org}.",
+ f"No, I am trained by researchers from {org}.",
+ f"No, I am developed by researchers from {org}.",
+ f"No, I am created by researchers from {org}.",
+ f"No, I'm a language model trained by researchers from {org}.",
+ f"No, I'm a language model developed by researchers from {org}.",
+ f"No, I'm a language model created by researchers from {org}.",
+ f"No, I'm trained by researchers from {org}.",
+ f"No, I'm developed by researchers from {org}.",
+ f"No, I'm created by researchers from {org}.",
+ ]
+ generate_conversations(questions, answers)
+
+ return content
+
+
+if __name__ == "__main__":
+ out_file = "hardcoded.json"
+
+ content = []
+ content.extend(identity_questions())
+
+ json.dump(content, open(out_file, "w"), indent=2)
diff --git a/fastchat/data/inspect_data.py b/fastchat/data/inspect_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..df9227106be0bdc70946e6efc90b9cbd6fa7bf9b
--- /dev/null
+++ b/fastchat/data/inspect_data.py
@@ -0,0 +1,33 @@
+"""
+Usage:
+python3 -m fastchat.data.inspect_data --in sharegpt_20230322_clean_lang_split.json
+"""
+import argparse
+import json
+import random
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--random-n", type=int)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+
+ if args.random_n:
+ indices = [random.randint(0, len(content) - 1) for _ in range(args.random_n)]
+ elif args.begin:
+ indices = range(args.begin, len(content))
+ else:
+ indices = range(0, len(content))
+
+ for idx in indices:
+ sample = content[idx]
+ print("=" * 40)
+ print(f"no: {idx}, id: {sample['id']}")
+ for conv in sample["conversations"]:
+ print(conv["from"] + ": ")
+ print(conv["value"])
+ input()
diff --git a/fastchat/data/merge.py b/fastchat/data/merge.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ae63ea76cb4aae9f22f622db84857958965cd07
--- /dev/null
+++ b/fastchat/data/merge.py
@@ -0,0 +1,23 @@
+"""
+Merge two conversation files into one
+
+Usage: python3 -m fastchat.data.merge --in file1.json file2.json --out merged.json
+"""
+
+import argparse
+import json
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True, nargs="+")
+ parser.add_argument("--out-file", type=str, default="merged.json")
+ args = parser.parse_args()
+
+ new_content = []
+ for in_file in args.in_file:
+ content = json.load(open(in_file, "r"))
+ new_content.extend(content)
+
+ print(f"#out: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/optional_clean.py b/fastchat/data/optional_clean.py
new file mode 100644
index 0000000000000000000000000000000000000000..47aecc1113fabfc76fa005cd34d2a0451efa294e
--- /dev/null
+++ b/fastchat/data/optional_clean.py
@@ -0,0 +1,90 @@
+"""
+Do optional cleaning (e.g., remove some languages).
+
+Usage:
+python3 -m fastchat.data.optional_clean --in input.json --out output.json --keep-lang en
+python3 -m fastchat.data.optional_clean --in input.json --out output.json --skip-lang en
+
+Requirement:
+pip3 install polyglot pyicu pycld2
+"""
+import argparse
+import json
+import re
+
+import polyglot
+from polyglot.detect import Detector
+import pycld2
+from tqdm import tqdm
+
+
+def skip(conv, args):
+ # Remove certain languages
+ if args.keep_lang != "all" or args.skip_lang is not None:
+ text = "\n".join([x["value"] for x in conv["conversations"]])
+ try:
+ lang_code = Detector(text).language.code
+ except (pycld2.error, polyglot.detect.base.UnknownLanguage):
+ lang_code = "unknown"
+
+ if args.keep_lang != "all" and lang_code != args.keep_lang:
+ return True
+
+ if lang_code == args.skip_lang:
+ return True
+
+ # Remove repetitive numbers
+ if args.reduce_rep:
+ for sentence in conv["conversations"]:
+ val = sentence["value"]
+ sub = re.search(r"(\d)\1{8}", val)
+ if sub is not None:
+ return True
+
+ return False
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str)
+ parser.add_argument(
+ "--keep-lang",
+ type=str,
+ default="all",
+ choices=["all", "en"],
+ help="Only keep certain langauges.",
+ )
+ parser.add_argument("--skip-lang", type=str, help="Skip a specific language.")
+ # NOTE: Be careful about reduce_rep which may remove some good data.
+ # For example, addresses could have long consecutive 0's
+ parser.add_argument("--reduce-rep", action="store_true")
+ args = parser.parse_args()
+
+ in_file = args.in_file
+ out_file = args.out_file
+ keep_lang = args.keep_lang
+ skip_lang = args.skip_lang
+ reduce_rep = args.reduce_rep
+ assert keep_lang == "all" or skip_lang is None
+
+ if out_file is None:
+ out_file = "sharegpt_clean"
+ if keep_lang != "all":
+ out_file += "_" + keep_lang
+ if skip_lang is not None:
+ out_file += "_skip_" + skip_lang
+ if reduce_rep:
+ out_file += "_reduce_rep"
+ out_file += ".json"
+
+ content = json.load(open(in_file, "r"))
+ num_conv = len(content)
+
+ new_content = []
+ for conv in tqdm(content):
+ if not skip(conv, args):
+ new_content.append(conv)
+
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
+ json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/optional_replace.py b/fastchat/data/optional_replace.py
new file mode 100644
index 0000000000000000000000000000000000000000..1114151a9b077fd538e39721c8fc85e9a06d7a91
--- /dev/null
+++ b/fastchat/data/optional_replace.py
@@ -0,0 +1,82 @@
+"""
+Do optional replace of bos/eos/pad/unk.
+
+Usage:
+python3 -m fastchat.data.optional_replace --in input.json --out output.json --model-name-or-path
+
+Requirement:
+pip3 install transformers tqdm
+"""
+import argparse
+import json
+import traceback
+
+import transformers
+from tqdm import tqdm
+
+
+def replace_special_tokens(
+ tokenizer: transformers.PreTrainedTokenizer, text: str
+) -> str:
+ if not text:
+ return text
+
+ def _insert_vline(token: str) -> str:
+ if len(token) < 2:
+ return " "
+ elif len(token) == 2:
+ return f"{token[0]}|{token[1]}"
+ else:
+ return f"{token[:1]}|{token[1:-1]}|{token[-1:]}"
+
+ if tokenizer.bos_token:
+ text = text.replace(tokenizer.bos_token, _insert_vline(tokenizer.bos_token))
+ if tokenizer.eos_token:
+ text = text.replace(tokenizer.eos_token, _insert_vline(tokenizer.eos_token))
+ if tokenizer.pad_token:
+ text = text.replace(tokenizer.pad_token, _insert_vline(tokenizer.pad_token))
+ if tokenizer.unk_token:
+ text = text.replace(tokenizer.unk_token, _insert_vline(tokenizer.unk_token))
+ return text
+
+
+def replace(conv, tokenizer):
+ # Replace bos/eos/pad/unk tokens
+ if tokenizer:
+ try:
+ for sentence in conv["conversations"]:
+ sentence["value"] = replace_special_tokens(tokenizer, sentence["value"])
+ except Exception as e:
+ traceback.print_exc()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str)
+ parser.add_argument(
+ "--model-name-or-path",
+ type=str,
+ help="The directory or address where the model token is stored.",
+ )
+ args = parser.parse_args()
+
+ in_file = args.in_file
+ out_file = args.out_file
+ tokenizer = None
+ if args.model_name_or_path:
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.model_name_or_path,
+ trust_remote_code=True,
+ use_fast=False,
+ )
+
+ if out_file is None:
+ out_file = f"{in_file}_replace.json"
+
+ content = json.load(open(in_file, "r"))
+
+ for conv in tqdm(content):
+ replace(conv, tokenizer)
+
+ json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/prepare_all.py b/fastchat/data/prepare_all.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d568703a4a5a18298ac51d92394e5142040c6c5
--- /dev/null
+++ b/fastchat/data/prepare_all.py
@@ -0,0 +1,42 @@
+"""Prepare all datasets."""
+
+import argparse
+import os
+
+from fastchat.utils import run_cmd
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--prefix", type=str, default="~/datasets/sharegpt_20230521")
+ parser.add_argument(
+ "--model-name-or-path", type=str, default="meta-llama/Llama-2-7b-chat-hf"
+ )
+ parser.add_argument("--seq-len", type=int, default=4096)
+ args = parser.parse_args()
+
+ in_prefix = args.prefix
+ model_path = args.model_name_or_path
+ seq_len = args.seq_len
+ prefix = (
+ f"{in_prefix}_{seq_len}".replace("4096", "4k")
+ .replace("8192", "8k")
+ .replace("16384", "16k")
+ )
+
+ cmd_list = [
+ f"python3 -m fastchat.data.clean_sharegpt --in {in_prefix}_html.json --out {prefix}_clean.json",
+ f"python3 -m fastchat.data.optional_clean --in {prefix}_clean.json --out {prefix}_clean_lang.json --skip-lang ko",
+ f"python3 -m fastchat.data.split_long_conversation --in {prefix}_clean_lang.json --out {prefix}_clean_lang_split.json --model-name {model_path} --max-length {seq_len}",
+ f"python3 -m fastchat.data.filter_wrong_format --in {prefix}_clean_lang_split.json --out {prefix}_clean_lang_split.json",
+ f"python3 -m fastchat.data.split_train_test --in {prefix}_clean_lang_split.json --ratio 0.99",
+ f"python3 -m fastchat.data.hardcoded_questions",
+ f"python3 -m fastchat.data.merge --in {prefix}_clean_lang_split_train.json hardcoded.json --out {prefix}_clean_lang_split_identity.json",
+ f"python3 -m fastchat.data.extract_gpt4_only --in {prefix}_clean_lang_split_identity.json",
+ f"python3 -m fastchat.data.extract_single_round --in {prefix}_clean_lang_split_identity.json",
+ ]
+
+ for cmd in cmd_list:
+ ret = run_cmd(cmd)
+ if ret != 0:
+ exit(ret)
diff --git a/fastchat/data/pretty_json.py b/fastchat/data/pretty_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..52eddf6c82687a544ae27a7ffad6d6f0458dcb29
--- /dev/null
+++ b/fastchat/data/pretty_json.py
@@ -0,0 +1,20 @@
+"""
+Usage:
+python3 pretty_json.py --in in.json --out out.json
+"""
+
+import argparse
+import json
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, required=True)
+ args = parser.parse_args()
+
+ with open(args.in_file, "r") as fin:
+ data = json.load(fin)
+
+ with open(args.out_file, "w") as fout:
+ json.dump(data, fout, indent=2, ensure_ascii=False)
diff --git a/fastchat/data/sample.py b/fastchat/data/sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ea94fadaeb243269d125a41b71a69ef15ce16fa
--- /dev/null
+++ b/fastchat/data/sample.py
@@ -0,0 +1,40 @@
+"""
+Sample some conversations from a file.
+
+Usage: python3 -m fastchat.data.sample --in sharegpt.json --out sampled.json
+"""
+import argparse
+import json
+
+import numpy as np
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sampled.json")
+ parser.add_argument("--begin", type=int, default=0)
+ parser.add_argument("--end", type=int, default=100)
+ parser.add_argument("--max-length", type=int, default=1024)
+ parser.add_argument("--keep-order", action="store_true")
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ if not args.keep_order:
+ np.random.seed(42)
+ np.random.shuffle(content)
+
+ new_content = []
+ for i in range(args.begin, min(args.end, len(content))):
+ sample = content[i]
+ concat = ""
+ for s in sample["conversations"]:
+ concat += s["value"]
+
+ if len(concat) > args.max_length:
+ continue
+
+ new_content.append(sample)
+
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/data/split_long_conversation.py b/fastchat/data/split_long_conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..413fa8bced590cdb476e67a6523c3967cb844acd
--- /dev/null
+++ b/fastchat/data/split_long_conversation.py
@@ -0,0 +1,129 @@
+"""
+Split long conversations based on certain max length.
+
+Usage: python3 -m fastchat.data.split_long_conversation \
+ --in sharegpt_clean.json \
+ --out sharegpt_split.json \
+ --model-name-or-path $
+"""
+import argparse
+from concurrent.futures import ProcessPoolExecutor
+import json
+from typing import Dict, Sequence, Optional
+
+import transformers
+from tqdm import tqdm
+
+
+def make_sample(sample, start_idx, end_idx):
+ assert (end_idx - start_idx) % 2 == 0
+ return {
+ "id": sample["id"] + "_" + str(start_idx),
+ "model": sample.get("model", ""),
+ "conversations": sample["conversations"][start_idx:end_idx],
+ }
+
+
+tokenizer = max_length = None
+
+
+def split_one_sample(sample):
+ tokenized_lens = []
+ conversations = sample["conversations"]
+ conversations = conversations[: len(conversations) // 2 * 2]
+ for c in conversations:
+ length = len(tokenizer(c["value"]).input_ids) + 6
+ tokenized_lens.append(length)
+
+ start_idx = 0
+ cur_len = 0
+
+ if len(conversations) % 2 != 0 or len(conversations) < 2:
+ return []
+
+ new_samples = []
+ for i in range(0, len(conversations), 2):
+ tmp_len = tokenized_lens[i] + tokenized_lens[i + 1]
+ if cur_len + tmp_len > max_length:
+ new_samples.append(make_sample(sample, start_idx, i))
+ start_idx = i
+ cur_len = 0
+ elif i == len(conversations) - 2:
+ new_samples.append(make_sample(sample, start_idx, i + 2))
+
+ cur_len += tmp_len
+
+ return new_samples
+
+
+def worker(input_data):
+ result = []
+ for sample in input_data:
+ result.extend(split_one_sample(sample))
+ return result
+
+
+def split_all(content, begin, end, tokenizer_, max_length_):
+ """
+ Keep the maximum round of conversations within the max token length constraint
+ """
+ global tokenizer, max_length
+ tokenizer = tokenizer_
+ max_length = max_length_
+
+ content = content[begin:end]
+ new_content = []
+
+ # Split content into chunks
+ chunks = [content[i : i + 1000] for i in range(0, len(content), 1000)]
+ with ProcessPoolExecutor() as executor:
+ for result in tqdm(executor.map(worker, chunks), total=len(chunks)):
+ new_content.extend(result)
+
+ return new_content
+
+
+def filter_invalid_roles(content):
+ new_content = []
+ for i, c in enumerate(content):
+ roles = ["human", "gpt"]
+ if len(c["conversations"]) <= 0:
+ continue
+
+ valid = True
+ for j, s in enumerate(c["conversations"]):
+ if s["from"] != roles[j % 2]:
+ valid = False
+ break
+
+ if valid:
+ new_content.append(c)
+
+ return new_content
+
+
+def main(args):
+ content = json.load(open(args.in_file, "r"))
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.model_name_or_path,
+ model_max_length=args.max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ new_content = split_all(content, args.begin, args.end, tokenizer, args.max_length)
+ new_content = filter_invalid_roles(new_content)
+
+ print(f"#in: {len(content)}, #out: {len(new_content)}")
+ json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--out-file", type=str, default="sharegpt_split.json")
+ parser.add_argument("--begin", type=int)
+ parser.add_argument("--end", type=int)
+ parser.add_argument("--model-name-or-path", type=str, required=True)
+ parser.add_argument("--max-length", type=int, default=2048)
+ args = parser.parse_args()
+ main(args)
diff --git a/fastchat/data/split_train_test.py b/fastchat/data/split_train_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..60b8960b57e30c28ef92652b17db7e52756f8aac
--- /dev/null
+++ b/fastchat/data/split_train_test.py
@@ -0,0 +1,34 @@
+"""
+Split the dataset into training and test set.
+
+Usage: python3 -m fastchat.data.split_train_test --in sharegpt.json
+"""
+import argparse
+import json
+
+import numpy as np
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--in-file", type=str, required=True)
+ parser.add_argument("--begin", type=int, default=0)
+ parser.add_argument("--end", type=int, default=100)
+ parser.add_argument("--ratio", type=float, default=0.9)
+ args = parser.parse_args()
+
+ content = json.load(open(args.in_file, "r"))
+ np.random.seed(0)
+
+ perm = np.random.permutation(len(content))
+ content = [content[i] for i in perm]
+ split = int(args.ratio * len(content))
+
+ train_set = content[:split]
+ test_set = content[split:]
+
+ print(f"#train: {len(train_set)}, #test: {len(test_set)}")
+ train_name = args.in_file.replace(".json", "_train.json")
+ test_name = args.in_file.replace(".json", "_test.json")
+ json.dump(train_set, open(train_name, "w"), indent=2, ensure_ascii=False)
+ json.dump(test_set, open(test_name, "w"), indent=2, ensure_ascii=False)
diff --git a/fastchat/gradio_controller.sh b/fastchat/gradio_controller.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9d5030a8a7f22aa4368f81bdf9e0a776a5038cf1
--- /dev/null
+++ b/fastchat/gradio_controller.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+SBATCH --job-name=visual_arena_controller
+SBATCH --gres=gpu:1
+#SBATCH --qos=m3
+SBATCH --time=3:00:00
+#SBATCH -c 30
+#SBATCH --mem=60G
+SBATCH --output=slurm-%j.out
+#SBATCH --error=slurm-%j.err
+
+# prepare your environment here
+module load pytorch1.13.1-cuda11.7-python3.9
+
+# put your command here
+python3 -m fastchat.serve.controller
\ No newline at end of file
diff --git a/fastchat/llm_judge/README.md b/fastchat/llm_judge/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0cc844613edeb7a74b257f7279242c913304f87d
--- /dev/null
+++ b/fastchat/llm_judge/README.md
@@ -0,0 +1,174 @@
+# LLM Judge
+| [Paper](https://arxiv.org/abs/2306.05685) | [Leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard) |
+
+In this package, you can use MT-bench questions and prompts to evaluate your models with LLM-as-a-judge.
+MT-bench is a set of challenging multi-turn open-ended questions for evaluating chat assistants.
+To automate the evaluation process, we prompt strong LLMs like GPT-4 to act as judges and assess the quality of the models' responses.
+
+## Contents
+- [Install](#install)
+- [Review Pre-Generated Model Answers and Judgments](#review-pre-generated-model-answers-and-judgments)
+- [MT-Bench](#mt-bench)
+- [Agreement Computation](#agreement-computation)
+- [Datasets](#datasets)
+- [Citation](#citation)
+
+## Install
+```
+git clone https://github.com/lm-sys/FastChat.git
+cd FastChat
+pip install -e ".[model_worker,llm_judge]"
+```
+
+## Review Pre-Generated Model Answers and Judgments
+We provide pre-generated model answers and judgments for some models.
+You can view them at this [demo](https://huggingface.co/spaces/lmsys/mt-bench).
+
+To download the pre-generated data, use
+```
+python3 download_mt_bench_pregenerated.py
+```
+
+After downloading the data, you can view them locally by
+```
+python3 qa_browser.py --share
+```
+You can use this QA browser to view the answers generated by you later.
+
+## MT-Bench
+
+### Evaluate a model on MT-bench
+
+#### Step 1. Generate model answers to MT-bench questions
+
+To generate model answers, you can either use [vLLM](https://github.com/vllm-project/vllm) via a FastChat server (recommended) or Hugging Face.
+
+##### Using vLLM (recommended):
+
+1. Launch a VLLM worker
+```
+python3 -m fastchat.serve.controller
+python3 -m fastchat.serve.vllm_worker --model-path [MODEL-PATH]
+python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
+```
+ - Arguments:
+ - `[MODEL-PATH]` is the path to the weights, which can be a local folder or a Hugging Face repo ID.
+
+2. Generate the answers
+```
+python gen_api_answer.py --model [MODEL-NAME] --openai-api-base http://localhost:8000/v1 --parallel 50
+```
+ - Arguments:
+ - `[MODEL-NAME]` is the name of the model from Step 1.
+ - `--parallel` is the number of concurrent API calls to the vLLM worker.
+
+##### Using Hugging Face:
+1. Generate the answers
+```
+python gen_model_answer.py --model-path [MODEL-PATH] --model-id [MODEL-ID]
+```
+- Arguments:
+ - `[MODEL-PATH]` is the path to the weights, which can be a local folder or a Hugging Face repo ID.
+ - `[MODEL-ID]` is a name you give to the model.
+
+ - You can also specify `--num-gpus-per-model` for model parallelism (needed for large 65B models) and `--num-gpus-total` to parallelize answer generation with multiple GPUs.
+
+- e.g. `python gen_model_answer.py --model-path lmsys/vicuna-7b-v1.5 --model-id vicuna-7b-v1.5`
+
+The answers will be saved to `data/mt_bench/model_answer/[MODEL-ID/MODEL-NAME].jsonl`.
+
+To make sure FastChat loads the correct prompt template, see the supported models and how to add a new model [here](../../docs/model_support.md#how-to-support-a-new-model).
+
+#### Step 2. Generate GPT-4 judgments
+There are several options to use GPT-4 as a judge, such as pairwise winrate and single-answer grading.
+In MT-bench, we recommend single-answer grading as the default mode.
+This mode asks GPT-4 to grade and give a score to model's answer directly without pairwise comparison.
+For each turn, GPT-4 will give a score on a scale of 10. We then compute the average score on all turns.
+
+```
+export OPENAI_API_KEY=XXXXXX # set the OpenAI API key
+python gen_judgment.py --model-list [LIST-OF-MODEL-ID] --parallel [num-concurrent-api-call]
+```
+
+e.g.,
+```
+python gen_judgment.py --model-list vicuna-13b-v1.3 alpaca-13b llama-13b claude-v1 gpt-3.5-turbo gpt-4 --parallel 2
+```
+The judgments will be saved to `data/mt_bench/model_judgment/gpt-4_single.jsonl`
+
+#### Step 3. Show MT-bench scores
+
+- Show the scores for selected models
+ ```
+ python show_result.py --model-list vicuna-13b-v1.3 alpaca-13b llama-13b claude-v1 gpt-3.5-turbo gpt-4
+ ```
+- Show all scores
+ ```
+ python show_result.py
+ ```
+
+---
+
+### Other grading options
+Besides score-based single-answer grading, we also support two additional grading options based on win rates:
+- `pariwise-baseline`: run pairwise comparison against a baseline model.
+- `pairwise-all`: run pairwise comparison between all model pairs on all questions.
+
+#### Option 2: pairwise comparison against a baseline (default: gpt-3.5-turbo)
+
+- Generate GPT-4 judgments
+```
+python gen_judgment.py --mode pairwise-baseline --model-list vicuna-13b-v1.3 alpaca-13b llama-13b --parallel 2
+```
+The judgments will be saved to `data/mt_bench/model_judgment/gpt-4_pair.jsonl`
+
+- Show results
+```
+python show_result.py --mode pairwise-baseline
+```
+
+#### Option 3: Run GPT-4 judge with all pair comparisons
+
+Another option is to run pairwise comparisons on all possible pairs.
+This could be more expensive when #models increases, but it gives you a more comprehensive information.
+
+```
+python gen_judgment.py --mode pairwise-all --model-list [LIST-OF-MODEL-ID] --parallel [num-concurrent-api-call]
+```
+
+```
+python show_result.py --mode pairwise-all
+```
+
+### How to get GPT-3.5/GPT-4/Claude's answer?
+- `python gen_api_answer.py --model [MODEL-NAME]` to generate GPT-3.5/4 and Claude's answers.
+
+
+### How to plot the radar figure?
+
+You can use this [colab notebook](https://colab.research.google.com/drive/15O3Y8Rxq37PuMlArE291P4OC6ia37PQK#scrollTo=5i8R0l-XqkgO) to plot the radar figure for MT-bench.
+
+
+
+
+## Agreement Computation
+We released 3.3K human annotations for model responses generated by 6 models in response to 80 MT-bench questions. The dataset is available at [lmsys/mt_bench_human_judgments](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments).
+
+This Colab [notebook](https://colab.research.google.com/drive/1ctgygDRJhVGUJTQy8-bRZCl1WNcT8De6?usp=sharing) shows how to compute the agreement between humans and GPT-4 judge with the dataset. Our results show that humans and GPT-4 judge achieve over 80\% agreement, the same level of agreement between humans.
+
+## Datasets
+- [Chatbot Arena Conversation Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations)
+- [MT-bench Human Annotation Dataset](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)
+
+## Citation
+Please cite the following paper if you find the code or datasets helpful.
+```
+@misc{zheng2023judging,
+ title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena},
+ author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang and Joseph E. Gonzalez and Ion Stoica},
+ year={2023},
+ eprint={2306.05685},
+ archivePrefix={arXiv},
+ primaryClass={cs.CL}
+}
+```
diff --git a/fastchat/llm_judge/clean_judgment.py b/fastchat/llm_judge/clean_judgment.py
new file mode 100644
index 0000000000000000000000000000000000000000..d139ed7f22b4eafe16702b67cf528a908061c768
--- /dev/null
+++ b/fastchat/llm_judge/clean_judgment.py
@@ -0,0 +1,93 @@
+"""
+Clean model judgment files.
+"""
+import argparse
+import json
+
+selected_models = [
+ "alpaca-13b",
+ "baize-v2-13b",
+ "chatglm-6b",
+ "claude-instant-v1",
+ "claude-v1",
+ "dolly-v2-12b",
+ "falcon-40b-instruct",
+ "fastchat-t5-3b",
+ "gpt-3.5-turbo",
+ "gpt-4",
+ "gpt4all-13b-snoozy",
+ "guanaco-33b",
+ "guanaco-65b",
+ "h2ogpt-oasst-open-llama-13b",
+ "koala-13b",
+ "llama-13b",
+ "mpt-30b-chat",
+ "mpt-30b-instruct",
+ "mpt-7b-chat",
+ "nous-hermes-13b",
+ "oasst-sft-4-pythia-12b",
+ "oasst-sft-7-llama-30b",
+ "palm-2-chat-bison-001",
+ "rwkv-4-raven-14b",
+ "stablelm-tuned-alpha-7b",
+ "tulu-30b",
+ "vicuna-13b-v1.3",
+ "vicuna-33b-v1.3",
+ "vicuna-7b-v1.3",
+ "wizardlm-13b",
+ "wizardlm-30b",
+]
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--infile", type=str)
+ args = parser.parse_args()
+
+ infile = args.infile
+ outfile = infile.replace(".jsonl", "_clean.jsonl")
+
+ raw_lines = open(infile).readlines()
+ rets = []
+ models = set()
+ visited = set()
+ for line in raw_lines:
+ obj = json.loads(line)
+
+ if "model_1" in obj: # pair
+ model = obj["model_1"]
+ key = (
+ obj["model_1"],
+ obj["model_2"],
+ obj["question_id"],
+ tuple(obj["judge"]),
+ )
+ else: # single
+ model = obj["model"]
+ key = (obj["model"], obj["question_id"], tuple(obj["judge"]))
+
+ if key in visited:
+ continue
+ visited.add(key)
+
+ if model not in selected_models:
+ continue
+ models.add(model)
+ rets.append(obj)
+
+ models = sorted(list(models))
+ missing_models = [x for x in selected_models if x not in models]
+ print(f"in models: {models}, number: {len(models)}")
+ print(f"missing models: {missing_models}")
+ print(f"#in: {len(raw_lines)}, #out: {len(rets)}")
+ rets.sort(
+ key=lambda x: (
+ x["model"] if "model" in x else x["model_1"],
+ x["question_id"],
+ x["turn"],
+ )
+ )
+
+ with open(outfile, "w") as fout:
+ for x in rets:
+ fout.write(json.dumps(x) + "\n")
diff --git a/fastchat/llm_judge/common.py b/fastchat/llm_judge/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b598cefb3159de03637ec2442aca7d57552ff2e
--- /dev/null
+++ b/fastchat/llm_judge/common.py
@@ -0,0 +1,702 @@
+"""
+Common data structures and utilities.
+"""
+
+import ast
+import dataclasses
+import glob
+import json
+import os
+import re
+import time
+from typing import Optional
+
+import openai
+import anthropic
+
+from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST
+
+# API setting constants
+API_MAX_RETRY = 16
+API_RETRY_SLEEP = 10
+API_ERROR_OUTPUT = "$ERROR$"
+
+TIE_DELTA = 0.1
+
+# Categories that need reference answers
+NEED_REF_CATS = ["math", "reasoning", "coding", "arena-hard-200"]
+
+# Extract scores from judgments
+two_score_pattern = re.compile("\[\[(\d+\.?\d*),\s?(\d+\.?\d*)\]\]")
+two_score_pattern_backup = re.compile("\[(\d+\.?\d*),\s?(\d+\.?\d*)\]")
+one_score_pattern = re.compile("\[\[(\d+\.?\d*)\]\]")
+one_score_pattern_backup = re.compile("\[(\d+\.?\d*)\]")
+
+# Sampling temperature configs for
+temperature_config = {
+ "writing": 0.7,
+ "roleplay": 0.7,
+ "extraction": 0.0,
+ "math": 0.0,
+ "coding": 0.0,
+ "reasoning": 0.0,
+ "stem": 0.1,
+ "humanities": 0.1,
+ "arena-hard-200": 0.0,
+}
+
+reverse_model_map = {
+ "model_1": "model_2",
+ "model_2": "model_1",
+}
+
+
+@dataclasses.dataclass
+class Judge:
+ model_name: str
+ prompt_template: dict
+ ref_based: bool = False
+ multi_turn: bool = False
+
+
+@dataclasses.dataclass
+class MatchSingle:
+ question: dict
+ model: str
+ answer: dict
+ judge: Judge
+ ref_answer: dict = None
+ multi_turn: bool = False
+
+
+@dataclasses.dataclass
+class MatchPair:
+ question: dict
+ model_1: str
+ model_2: str
+ answer_1: dict
+ answer_2: dict
+ judge: Judge
+ ref_answer: dict = None
+ multi_turn: bool = False
+
+
+def load_questions(question_file: str, begin: Optional[int], end: Optional[int]):
+ """Load questions from a file."""
+ questions = []
+ with open(question_file, "r") as ques_file:
+ for line in ques_file:
+ if line:
+ questions.append(json.loads(line))
+ questions = questions[begin:end]
+ return questions
+
+
+def load_model_answers(answer_dir: str):
+ """Load model answers.
+
+ The return value is a python dict of type:
+ Dict[model_name: str -> Dict[question_id: int -> answer: dict]]
+ """
+ filenames = glob.glob(os.path.join(answer_dir, "*.jsonl"))
+ filenames.sort()
+ model_answers = {}
+
+ for filename in filenames:
+ model_name = os.path.basename(filename)[:-6]
+ answer = {}
+ with open(filename) as fin:
+ for line in fin:
+ line = json.loads(line)
+ answer[line["question_id"]] = line
+ model_answers[model_name] = answer
+
+ return model_answers
+
+
+def load_judge_prompts(prompt_file: str):
+ """Load judge prompts.
+
+ The return value is a python dict of type:
+ Dict[judge_name: str -> dict]
+ """
+ prompts = {}
+ with open(prompt_file) as fin:
+ for line in fin:
+ line = json.loads(line)
+ prompts[line["name"]] = line
+ return prompts
+
+
+def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
+ kwargs = {}
+ model = judge.model_name
+ if ref_answer is not None:
+ kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
+ if multi_turn:
+ kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
+
+ if multi_turn:
+ user_prompt = judge.prompt_template["prompt_template"].format(
+ question_1=question["turns"][0],
+ question_2=question["turns"][1],
+ answer_1=answer["choices"][0]["turns"][0],
+ answer_2=answer["choices"][0]["turns"][1],
+ **kwargs,
+ )
+ else:
+ user_prompt = judge.prompt_template["prompt_template"].format(
+ question=question["turns"][0],
+ answer=answer["choices"][0]["turns"][0],
+ **kwargs,
+ )
+
+ rating = -1
+
+ system_prompt = judge.prompt_template["system_prompt"]
+ conv = get_conversation_template(model)
+ conv.set_system_message(system_prompt)
+ conv.append_message(conv.roles[0], user_prompt)
+ conv.append_message(conv.roles[1], None)
+
+ if model in ["gpt-3.5-turbo", "gpt-4"]:
+ judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
+ elif model in ANTHROPIC_MODEL_LIST:
+ judgment = chat_compeletion_anthropic(
+ model, conv, temperature=0, max_tokens=1024
+ )
+ else:
+ raise ValueError(f"Invalid judge model name: {model}")
+
+ if judge.prompt_template["output_format"] == "[[rating]]":
+ match = re.search(one_score_pattern, judgment)
+ if not match:
+ match = re.search(one_score_pattern_backup, judgment)
+
+ if match:
+ rating = ast.literal_eval(match.groups()[0])
+ else:
+ rating = -1
+ else:
+ raise ValueError(
+ f"invalid output format: {judge.prompt_template['output_format']}"
+ )
+
+ return rating, user_prompt, judgment
+
+
+def play_a_match_single(match: MatchPair, output_file: str):
+ question, model, answer, judge, ref_answer, multi_turn = (
+ match.question,
+ match.model,
+ match.answer,
+ match.judge,
+ match.ref_answer,
+ match.multi_turn,
+ )
+
+ if judge.prompt_template["type"] == "single":
+ score, user_prompt, judgment = run_judge_single(
+ question, answer, judge, ref_answer, multi_turn=multi_turn
+ )
+
+ question_id = question["question_id"]
+ turn = 1 if not multi_turn else 2
+ result = {
+ "question_id": question_id,
+ "model": model,
+ "judge": (judge.model_name, judge.prompt_template["name"]),
+ "user_prompt": user_prompt,
+ "judgment": judgment,
+ "score": score,
+ "turn": turn,
+ "tstamp": time.time(),
+ }
+ print(
+ f"question: {question_id}, turn: {turn}, model: {model}, "
+ f"score: {score}, "
+ f"judge: {(judge.model_name, judge.prompt_template['name'])}"
+ )
+ else:
+ raise ValueError(f"invalid judge type: {judge['type']}")
+
+ if output_file:
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
+ with open(output_file, "a") as fout:
+ fout.write(json.dumps(result) + "\n")
+
+ return result
+
+
+def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=False):
+ kwargs = {}
+ model = judge.model_name
+ if ref_answer is not None:
+ kwargs["ref_answer_1"] = ref_answer["choices"][0]["turns"][0]
+ if multi_turn:
+ kwargs["ref_answer_2"] = ref_answer["choices"][0]["turns"][1]
+
+ if multi_turn:
+ system_prompt = judge.prompt_template["system_prompt"]
+ user_prompt = judge.prompt_template["prompt_template"].format(
+ question_1=question["turns"][0],
+ question_2=question["turns"][1],
+ answer_a_1=answer_a["choices"][0]["turns"][0],
+ answer_b_1=answer_b["choices"][0]["turns"][0],
+ answer_a_2=answer_a["choices"][0]["turns"][1],
+ answer_b_2=answer_b["choices"][0]["turns"][1],
+ **kwargs,
+ )
+ else:
+ system_prompt = judge.prompt_template["system_prompt"]
+ user_prompt = judge.prompt_template["prompt_template"].format(
+ question=question["turns"][0],
+ answer_a=answer_a["choices"][0]["turns"][0],
+ answer_b=answer_b["choices"][0]["turns"][0],
+ **kwargs,
+ )
+
+ winner = "error"
+
+ conv = get_conversation_template(model)
+ conv.append_message(conv.roles[0], user_prompt)
+ conv.append_message(conv.roles[1], None)
+
+ if model in ["gpt-3.5-turbo", "gpt-4"]:
+ conv.set_system_message(system_prompt)
+ judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
+ elif model in ANTHROPIC_MODEL_LIST:
+ if system_prompt != "You are a helpful assistant.":
+ user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
+ conv.messages[0][1] = user_prompt
+ judgment = chat_compeletion_anthropic(
+ model, conv, temperature=0, max_tokens=1024
+ )
+ else:
+ raise ValueError(f"Invalid judge model name: {model}")
+
+ if judge.prompt_template["output_format"] == "[[A]]":
+ if "[[A]]" in judgment:
+ winner = "A"
+ elif "[[B]]" in judgment:
+ winner = "B"
+ elif "[[C]]" in judgment:
+ winner = "tie"
+ else:
+ winner = "error"
+ elif judge.prompt_template["output_format"] == "[[rating_a,rating_b]]":
+ match = re.search(two_score_pattern, judgment)
+ if not match:
+ match = re.search(two_score_pattern_backup, judgment)
+ if match:
+ scores = [ast.literal_eval(s.strip()) for s in match.groups()]
+ if abs(scores[0] - scores[1]) <= TIE_DELTA:
+ winner = "tie"
+ elif scores[0] > scores[1]:
+ winner = "A"
+ else:
+ winner = "B"
+ else:
+ winner = "error"
+ else:
+ raise ValueError(
+ f"invalid output format: {judge.prompt_template['output_format']}"
+ )
+
+ return winner, user_prompt, judgment
+
+
+def play_a_match_pair(match: MatchPair, output_file: str):
+ question, model_1, model_2, answer_1, answer_2, judge, ref_answer, multi_turn = (
+ match.question,
+ match.model_1,
+ match.model_2,
+ match.answer_1,
+ match.answer_2,
+ match.judge,
+ match.ref_answer,
+ match.multi_turn,
+ )
+
+ if judge.prompt_template["type"] == "pairwise":
+ g1_winner, g1_user_prompt, g1_judgment = run_judge_pair(
+ question, answer_1, answer_2, judge, ref_answer, multi_turn=multi_turn
+ )
+ g2_winner, g2_user_prompt, g2_judgment = run_judge_pair(
+ question, answer_2, answer_1, judge, ref_answer, multi_turn=multi_turn
+ )
+
+ g1_map = {"A": "model_1", "B": "model_2"}
+ g2_map = {"A": "model_2", "B": "model_1"}
+ g1_winner = g1_map.get(g1_winner, g1_winner)
+ g2_winner = g2_map.get(g2_winner, g2_winner)
+ question_id = question["question_id"]
+ turn = 1 if not multi_turn else 2
+
+ result = {
+ "question_id": question_id,
+ "model_1": model_1,
+ "model_2": model_2,
+ "g1_winner": g1_winner,
+ "g2_winner": g2_winner,
+ "judge": (judge.model_name, judge.prompt_template["name"]),
+ "g1_user_prompt": g1_user_prompt,
+ "g1_judgment": g1_judgment,
+ "g2_user_prompt": g2_user_prompt,
+ "g2_judgment": g2_judgment,
+ "turn": turn,
+ "tstamp": time.time(),
+ }
+
+ print(
+ f"question: {question_id}, turn: {turn}, model_1: {model_1}, model_2: {model_2}, "
+ f"g1_winner: {g1_winner}, g2_winner: {g2_winner}, "
+ f"judge: {(judge.model_name, judge.prompt_template['name'])}"
+ )
+ elif judge.prompt_template["type"] == "single":
+ m1_score, m1_user_prompt, m1_judgment = run_judge_single(
+ question, answer_1, judge
+ )
+ m2_score, m2_user_prompt, m2_judgment = run_judge_single(
+ question, answer_2, judge
+ )
+
+ if abs(m1_score - m2_score) <= TIE_DELTA:
+ winner = "tie"
+ elif m1_score > m2_score:
+ winner = "model_1"
+ else:
+ winner = "model_2"
+
+ question_id = question["question_id"]
+ result = {
+ "question_id": question_id,
+ "model_1": model_1,
+ "model_2": model_2,
+ "g1_winner": winner,
+ "g2_winner": winner,
+ "judge": (judge.model_name, judge.prompt_template["name"]),
+ "g1_user_prompt": m1_user_prompt,
+ "g1_judgment": m1_judgment,
+ "g2_user_prompt": m2_user_prompt,
+ "g2_judgment": m2_judgment,
+ "m1_score": m1_score,
+ "m2_score": m2_score,
+ "tstamp": time.time(),
+ }
+ print(
+ f"question: {question_id}, model_1: {model_1}, model_2: {model_2}, "
+ f"winner: {winner}, m1_score: {m1_score}, m2_score: {m2_score}, "
+ f"judge: {(judge.model_name, judge.prompt_template['name'])}"
+ )
+ else:
+ raise ValueError(f"invalid judge type: {judge['type']}")
+
+ if output_file:
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
+ with open(output_file, "a") as fout:
+ fout.write(json.dumps(result) + "\n")
+
+ return result
+
+
+def chat_compeletion_openai(model, conv, temperature, max_tokens, api_dict=None):
+ if api_dict is not None:
+ openai.api_base = api_dict["api_base"]
+ openai.api_key = api_dict["api_key"]
+ output = API_ERROR_OUTPUT
+ for _ in range(API_MAX_RETRY):
+ try:
+ messages = conv.to_openai_api_messages()
+ response = openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ n=1,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ )
+ output = response["choices"][0]["message"]["content"]
+ break
+ except openai.error.OpenAIError as e:
+ print(type(e), e)
+ time.sleep(API_RETRY_SLEEP)
+
+ return output
+
+
+def chat_compeletion_openai_azure(model, conv, temperature, max_tokens, api_dict=None):
+ openai.api_type = "azure"
+ openai.api_version = "2023-07-01-preview"
+ if api_dict is not None:
+ openai.api_base = api_dict["api_base"]
+ openai.api_key = api_dict["api_key"]
+ else:
+ openai.api_base = os.environ["AZURE_OPENAI_ENDPOINT"]
+ openai.api_key = os.environ["AZURE_OPENAI_KEY"]
+
+ if "azure-" in model:
+ model = model[6:]
+
+ output = API_ERROR_OUTPUT
+ for _ in range(API_MAX_RETRY):
+ try:
+ messages = conv.to_openai_api_messages()
+ response = openai.ChatCompletion.create(
+ engine=model,
+ messages=messages,
+ n=1,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ )
+ output = response["choices"][0]["message"]["content"]
+ break
+ except openai.error.OpenAIError as e:
+ print(type(e), e)
+ time.sleep(API_RETRY_SLEEP)
+ except openai.error.InvalidRequestError as e:
+ print(type(e), e)
+ break
+ except KeyError:
+ print(response)
+ break
+
+ return output
+
+
+def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
+ output = API_ERROR_OUTPUT
+ for _ in range(API_MAX_RETRY):
+ try:
+ c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
+ prompt = conv.get_prompt()
+ response = c.completions.create(
+ model=model,
+ prompt=prompt,
+ stop_sequences=[anthropic.HUMAN_PROMPT],
+ max_tokens_to_sample=max_tokens,
+ temperature=temperature,
+ )
+ output = response.completion
+ break
+ except anthropic.APIError as e:
+ print(type(e), e)
+ time.sleep(API_RETRY_SLEEP)
+ return output.strip()
+
+
+def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
+ from fastchat.serve.api_provider import init_palm_chat
+
+ assert model == "palm-2-chat-bison-001"
+
+ if chat_state is None:
+ chat_state = init_palm_chat("chat-bison@001")
+
+ parameters = {
+ "temperature": temperature,
+ "top_p": 0.8,
+ "top_k": 40,
+ "max_output_tokens": max_tokens,
+ }
+ output = API_ERROR_OUTPUT
+ for _ in range(API_MAX_RETRY):
+ try:
+ response = chat_state.send_message(conv.messages[-2][1], **parameters)
+ output = response.text
+ break
+ except Exception as e:
+ print(type(e), e)
+ time.sleep(API_RETRY_SLEEP)
+ return chat_state, output
+
+
+def normalize_game_key_single(gamekey, result):
+ """Make the model names sorted in a game key."""
+ qid, model_1, model_2 = gamekey
+ if model_1 < model_2:
+ return gamekey, result
+ else:
+ new_gamekey = (qid, model_2, model_1)
+ new_result = {
+ "winners": tuple(reverse_model_map.get(x, x) for x in result["winners"]),
+ "g1_judgment": result["g2_judgment"],
+ "g2_judgment": result["g1_judgment"],
+ }
+ return new_gamekey, new_result
+
+
+def normalize_game_key_dict(judgment_dict):
+ """Make the model names sorted in the game keys."""
+ ret = {}
+ for key, value in judgment_dict.items():
+ new_key, new_value = normalize_game_key_single(key, value)
+ ret[new_key] = new_value
+ return ret
+
+
+def load_pairwise_model_judgments(filename: str):
+ """Load model judgments.
+
+ The return value is a dict of type:
+ Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
+ """
+ judge_dict = {}
+
+ for line in open(filename):
+ obj = json.loads(line)
+ judge = tuple(obj["judge"])
+ qid, model_1, model_2 = obj["question_id"], obj["model_1"], obj["model_2"]
+
+ if judge not in judge_dict:
+ judge_dict[judge] = {}
+
+ if "winner" in obj:
+ winner = obj["winner"]
+ elif "g1_winner" in obj and "g2_winner" in obj:
+ g1_winner, g2_winner = obj["g1_winner"], obj["g2_winner"]
+ if g1_winner == g2_winner:
+ winner = g1_winner
+ else:
+ winner = "inconsistent"
+ else:
+ raise ValueError(f"Invalid keys: {list(obj.keys())}")
+
+ gamekey = (qid, model_1, model_2)
+ winners = (winner,)
+
+ judge_dict[judge][gamekey] = {
+ "winners": winners,
+ "g1_judgment": obj["g1_judgment"],
+ "g2_judgment": obj["g2_judgment"],
+ }
+
+ # Make the model names sorted in the game keys
+ normalized = {}
+ for judge, value in judge_dict.items():
+ normalized[judge] = normalize_game_key_dict(value)
+ return normalized
+
+
+def load_single_model_judgments(filename: str):
+ """Load model judgments.
+
+ The return value is a dict of type:
+ Dict[judge: Tuple -> Dict[game_key: tuple -> game_result: dict]
+ """
+ judge_dict = {}
+
+ for line in open(filename):
+ obj = json.loads(line)
+ judge = tuple(obj["judge"])
+ qid, model = obj["question_id"], obj["model"]
+
+ if judge not in judge_dict:
+ judge_dict[judge] = {}
+
+ gamekey = (qid, model)
+
+ judge_dict[judge][gamekey] = {
+ "score": obj["score"],
+ "judgment": obj["judgment"],
+ }
+ return judge_dict
+
+
+def resolve_pairwise_judgment_dict(
+ question, model_judgments_normal, model_judgments_math, multi_turn=False
+):
+ """Return the correct pairwise judge."""
+ if multi_turn:
+ if question["category"] in NEED_REF_CATS:
+ return model_judgments_math[("gpt-4", "pair-math-v1-multi-turn")]
+ return model_judgments_normal[("gpt-4", "pair-v2-multi-turn")]
+
+ if question["category"] in NEED_REF_CATS:
+ return model_judgments_math[("gpt-4", "pair-math-v1")]
+ else:
+ return model_judgments_normal[("gpt-4", "pair-v2")]
+
+
+def resolve_single_judgment_dict(
+ question, model_judgments_normal, model_judgments_math, multi_turn=False
+):
+ """Return the correct single answer grading judge."""
+ if multi_turn:
+ if question["category"] in NEED_REF_CATS:
+ return model_judgments_math[("gpt-4", "single-math-v1-multi-turn")]
+ return model_judgments_normal[("gpt-4", "single-v1-multi-turn")]
+
+ if question["category"] in NEED_REF_CATS:
+ return model_judgments_math[("gpt-4", "single-math-v1")]
+ else:
+ return model_judgments_normal[("gpt-4", "single-v1")]
+
+
+def get_pairwise_judge_explanation(gamekey, judgment_dict):
+ """Get model judge explanation."""
+ try:
+ qid, model_1, model_2 = gamekey
+ if model_1 < model_2:
+ res = judgment_dict[gamekey]
+ g1_judgment, g2_judgment = res["g1_judgment"], res["g2_judgment"]
+ else:
+ new_gamekey = (qid, model_2, model_1)
+ res = judgment_dict[new_gamekey]
+
+ model_1, model_2 = model_1, model_2
+ g1_judgment, g2_judgment = res["g2_judgment"], res["g1_judgment"]
+
+ return (
+ f"**Game 1**. **A**: {model_1}, **B**: {model_2}\n\n"
+ f"**Judgment**: {g1_judgment}"
+ + f"\n\n`--------------------------`\n\n"
+ + f"**Game 2**. **A**: {model_2}, **B**: {model_1}\n\n"
+ f"**Judgment**: {g2_judgment}"
+ )
+ except KeyError:
+ return "N/A"
+
+
+def get_single_judge_explanation(gamekey, judgment_dict):
+ """Get model judge explanation."""
+ try:
+ qid, model = gamekey
+
+ res = judgment_dict[gamekey]
+
+ g1_judgment = res["judgment"]
+ g1_score = res["score"]
+
+ return (
+ f"**Game 1**. **A**: {model}, **Score**: {g1_score}\n\n"
+ f"**Judgment**: {g1_judgment}"
+ )
+ except KeyError:
+ return "N/A"
+
+
+def check_data(questions, model_answers, ref_answers, models, judges):
+ # check model answers
+ for m in models:
+ assert m in model_answers, f"Missing model answer for {m}"
+ m_answer = model_answers[m]
+ for q in questions:
+ assert (
+ q["question_id"] in m_answer
+ ), f"Missing model {m}'s answer to Question {q['question_id']}"
+ # check ref answers
+ for jg in judges.values():
+ if not jg.ref_based:
+ continue
+ for q in questions:
+ if q["category"] not in NEED_REF_CATS:
+ continue
+ assert (
+ q["question_id"] in ref_answers[jg.model_name]
+ ), f"Missing reference answer to Question {q['question_id']} for judge {jg.model_name}"
+
+
+def get_model_list(answer_dir):
+ file_paths = glob.glob(f"{answer_dir}/*.jsonl")
+ file_names = [os.path.splitext(os.path.basename(f))[0] for f in file_paths]
+ return file_names
diff --git a/fastchat/llm_judge/compute_agreement.py b/fastchat/llm_judge/compute_agreement.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b940bf5a5bdb02ca093fac88d883e3a45da4322
--- /dev/null
+++ b/fastchat/llm_judge/compute_agreement.py
@@ -0,0 +1,140 @@
+"""
+Compute agreement among judges.
+
+Usage:
+python compute_agreement.py --judges gpt4-pair human --votefiles human_judgments.json gpt4_pair_judgments.json
+python compute_agreement.py --judges human human --votefiles human_judgments.json
+"""
+import argparse
+import json
+import os
+
+import numpy as np
+
+
+def get_judge_name(judge):
+ if isinstance(judge, list) and judge[0] == "gpt-4" and judge[1].startswith("pair"):
+ return "gpt4-pair"
+ if judge.startswith("expert"):
+ return "human"
+ if judge.startswith("author"):
+ return "author"
+
+
+def revert(vote):
+ if vote == "model_a":
+ return "model_b"
+ elif vote == "model_b":
+ return "model_a"
+ return vote
+
+
+def get_mt_bench_votes_data(raw_votes):
+ data = [{}, {}]
+
+ for judge_votes in raw_votes:
+ for vote in judge_votes:
+ turn = vote["turn"] - 1
+ if vote["model_a"] < vote["model_b"]:
+ key = (vote["question_id"], vote["model_a"], vote["model_b"])
+ winner = vote["winner"]
+ else:
+ key = (vote["question_id"], vote["model_b"], vote["model_a"])
+ winner = revert(vote["winner"])
+ judge = get_judge_name(vote["judge"])
+ if key not in data[turn]:
+ data[turn][key] = {}
+ if judge not in data[turn][key]:
+ data[turn][key][judge] = []
+ data[turn][key][judge].append(winner)
+
+ return data
+
+
+def convertvote(vote):
+ if "tie" in vote:
+ return "tie"
+ return vote
+
+
+def equalvote(vote1, vote2):
+ if "tie" in vote1 and "tie" in vote2:
+ return True
+ return vote1 == vote2
+
+
+# data: Dict[qid -> List[vote]]
+def get_mt_bench_agreement(data, judge1, judge2, ban):
+ if judge1.startswith("gpt4") and judge2 == "human":
+ stats = [0, 0]
+ for votes in data.values():
+ if judge1 not in votes or judge2 not in votes:
+ continue
+ assert len(votes[judge1]) == 1
+ if convertvote(votes[judge1][0]) in ban:
+ continue
+ for v in votes[judge2]:
+ if convertvote(v) in ban:
+ continue
+ stats[1] += 1
+ stats[0] += equalvote(votes[judge1][0], v)
+ return stats[0], stats[1]
+ elif judge1 == "human" and judge2 == "human":
+ stats = [0, 0]
+ for votes in data.values():
+ if "human" not in votes:
+ continue
+ for i in range(len(votes["human"]) - 1):
+ for j in range(i + 1, len(votes["human"])):
+ if (
+ convertvote(votes["human"][i]) in ban
+ or convertvote(votes["human"][j]) in ban
+ ):
+ continue
+ stats[1] += 1
+ stats[0] += equalvote(votes["human"][i], votes["human"][j])
+ return stats[0], stats[1]
+ else:
+ raise Exception("Unsupported judges.")
+
+
+def run_mt_bench_agreement(judges, votefiles):
+ # votes[i]: List of votes
+ votes = []
+ for filename in votefiles:
+ with open(filename, "r") as f:
+ data = json.load(f)
+ votes.append(data)
+
+ data = get_mt_bench_votes_data(votes)
+
+ agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=[])
+ print(
+ f"turn 1 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
+ )
+ agree, total = get_mt_bench_agreement(data[0], judges[0], judges[1], ban=["tie"])
+ print(
+ f"turn 1 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
+ )
+ agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=[])
+ print(
+ f"turn 2 with tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
+ )
+ agree, total = get_mt_bench_agreement(data[1], judges[0], judges[1], ban=["tie"])
+ print(
+ f"turn 2 without tie. #total: {total}, #agree: {agree}, ratio: {agree/total:.2f}"
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--judges", nargs=2, type=str, default=["gpt4-pair", "human"])
+ parser.add_argument(
+ "--votefiles",
+ nargs="+",
+ type=str,
+ default=["gpt4_judgments.json", "human_judgments.json"],
+ )
+ args = parser.parse_args()
+
+ run_mt_bench_agreement(args.judges, args.votefiles)
diff --git a/fastchat/llm_judge/data/judge_prompts.jsonl b/fastchat/llm_judge/data/judge_prompts.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..4ec7524cbcdf308766fddc52df31e203316ad75f
--- /dev/null
+++ b/fastchat/llm_judge/data/judge_prompts.jsonl
@@ -0,0 +1,8 @@
+{"name": "pair-v2", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user's instructions and answers the user's question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "[User Question]\n{question}\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[A]]"}
+{"name": "pair-v2-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. You should choose the assistant that follows the user's instructions and answers the user's questions better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. You should focus on who provides a better answer to the second user question. Begin your evaluation by comparing the responses of the two assistants and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"}
+{"name": "pair-math-v1", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer, assistant A's answer, and assistant B's answer. Your job is to evaluate which assistant's answer is better. Begin your evaluation by comparing both assistants' answers with the reference answer. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "[User Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant A's Answer]\n{answer_a}\n[The End of Assistant A's Answer]\n\n[The Start of Assistant B's Answer]\n{answer_b}\n[The End of Assistant B's Answer]", "description": "Prompt for math questions", "category": "math", "output_format": "[[A]]"}
+{"name": "pair-math-v1-multi-turn", "type": "pairwise", "system_prompt": "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user questions. Your evaluation should consider correctness and helpfulness. You will be given reference answers, the assistant A's answers, the assistant B's answers. Your job is to determine which assistant provides correct and helpful answers to the second user question. Begin your evaluation by comparing both assistants' answers with the reference answers. Identify and correct any mistakes. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \"[[A]]\" if assistant A is better, \"[[B]]\" if assistant B is better, and \"[[C]]\" for a tie.", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_a_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_a_2}\n\n<|The End of Assistant A's Conversation with User|>\n\n\n<|The Start of Assistant B's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant B:\n{answer_b_1}\n\n### User:\n{question_2}\n\n### Assistant B:\n{answer_b_2}\n\n<|The End of Assistant B's Conversation with User|>", "description": "Prompt for multi-turn general questions", "category": "general", "output_format": "[[A]]"}
+{"name": "single-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
+{"name": "single-math-v1", "type": "single", "system_prompt": "You are a helpful assistant.", "prompt_template": "[Instruction]\nPlease act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n[Question]\n{question}\n\n[The Start of Reference Answer]\n{ref_answer_1}\n[The End of Reference Answer]\n\n[The Start of Assistant's Answer]\n{answer}\n[The End of Assistant's Answer]", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
+{"name": "single-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. You evaluation should focus on the assistant's answer to the second user question. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "general", "output_format": "[[rating]]"}
+{"name": "single-math-v1-multi-turn", "type": "single", "system_prompt": "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. You evaluation should focus on the assistant's answer to the second question. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n", "prompt_template": "<|The Start of Reference Answer|>\n\n### User:\n{question_1}\n\n### Reference answer:\n{ref_answer_1}\n\n### User:\n{question_2}\n\n### Reference answer:\n{ref_answer_2}\n\n<|The End of Reference Answer|>\n\n\n<|The Start of Assistant A's Conversation with User|>\n\n### User:\n{question_1}\n\n### Assistant A:\n{answer_1}\n\n### User:\n{question_2}\n\n### Assistant A:\n{answer_2}\n\n<|The End of Assistant A's Conversation with User|>", "description": "Prompt for general questions", "category": "math", "output_format": "[[rating]]"}
diff --git a/fastchat/llm_judge/data/mt_bench/misc/radar.png b/fastchat/llm_judge/data/mt_bench/misc/radar.png
new file mode 100644
index 0000000000000000000000000000000000000000..d692b378766dc98793905d6c54d11e42bcc23e8c
Binary files /dev/null and b/fastchat/llm_judge/data/mt_bench/misc/radar.png differ
diff --git a/fastchat/llm_judge/data/mt_bench/question.jsonl b/fastchat/llm_judge/data/mt_bench/question.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..464e2c22f7a021a0f9a584d232338a9a17c40066
--- /dev/null
+++ b/fastchat/llm_judge/data/mt_bench/question.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 81, "category": "writing", "turns": ["Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "Rewrite your previous response. Start every sentence with the letter A."]}
+{"question_id": 82, "category": "writing", "turns": ["Draft a professional email seeking your supervisor's feedback on the 'Quarterly Financial Report' you prepared. Ask specifically about the data analysis, presentation style, and the clarity of conclusions drawn. Keep the email short and to the point.", "Take a moment to evaluate and critique your own response."]}
+{"question_id": 83, "category": "writing", "turns": ["Imagine you are writing a blog post comparing two popular smartphone models. Develop an outline for the blog post, including key points and subheadings to effectively compare and contrast the features, performance, and user experience of the two models. Please answer in fewer than 200 words.", "Take your previous response and rephrase it as a limerick."]}
+{"question_id": 84, "category": "writing", "turns": ["Write a persuasive email to convince your introverted friend, who dislikes public speaking, to volunteer as a guest speaker at a local event. Use compelling arguments and address potential objections. Please be concise.", "Can you rephrase your previous answer and incorporate a metaphor or simile in each sentence?"]}
+{"question_id": 85, "category": "writing", "turns": ["Describe a vivid and unique character, using strong imagery and creative language. Please answer in fewer than two paragraphs.", "Revise your previous response and incorporate an allusion to a famous work of literature or historical event in each sentence."]}
+{"question_id": 86, "category": "writing", "turns": ["Write a descriptive paragraph about a bustling marketplace, incorporating sensory details such as smells, sounds, and visual elements to create an immersive experience for the reader.", "Rework your previous response. Begin each sentence with the subsequent letter of the alphabet, commencing from B."]}
+{"question_id": 87, "category": "writing", "turns": ["Could you write a captivating short story beginning with the sentence: The old abandoned house at the end of the street held a secret that no one had ever discovered.", "Now, do the same task again but only use four-word sentences."]}
+{"question_id": 88, "category": "writing", "turns": ["Craft an intriguing opening paragraph for a fictional short story. The story should involve a character who wakes up one morning to find that they can time travel.", "Summarize the story with three bullet points using only nouns and adjectives, without verbs."]}
+{"question_id": 89, "category": "writing", "turns": ["Help me construct a catchy, yet scientifically accurate, headline for an article on the latest discovery in renewable bio-energy, while carefully handling the ethical dilemmas surrounding bio-energy sources. Propose 4 options.", "Alter your previous response. Make the following adjustments to the 2nd option: 1. Make the tone sound casual 2. Embed an advertisement for a company called \"FlexPower\" 3. Fewer than 10 words."]}
+{"question_id": 90, "category": "writing", "turns": ["Edit the following paragraph to correct any grammatical errors:\nShe didn't remembre where is her purse, so I thinks its in the car but he's say it's on kitchen table but he are not sure, and then they asked me to looking for it, she's say, \"Can you?\", and I responds with, \"Maybe, but ain't no sure,\" and he not heard me, and, \"What?\", he asks, \"Did you found it?\".", "Modify your earlier reply and eliminate the use of gendered pronouns."]}
+{"question_id": 91, "category": "roleplay", "turns": ["Pretend yourself to be Elon Musk in all the following conversations. Speak like Elon Musk as much as possible. Why do we need to go to Mars?", "How do you like dancing? Can you teach me?"]}
+{"question_id": 92, "category": "roleplay", "turns": ["Embrace the role of Sheldon from \"The Big Bang Theory\" as we delve into our conversation. Don\u2019t start with phrases like \"As Sheldon\". Let's kick things off with the following question: \"What is your opinion on hand dryers?\"", "Let\u2019s grab dinner in town. Would you like to take bus with me?"]}
+{"question_id": 93, "category": "roleplay", "turns": ["Imagine yourself as a doctor tasked with devising innovative remedies for various ailments and maladies. Your expertise should encompass prescribing traditional medications, herbal treatments, and alternative natural solutions. Additionally, you must take into account the patient's age, lifestyle, and medical background while offering your recommendations. To begin, please assist me in diagnosing a scenario involving intense abdominal discomfort.", "But I have been pregnant for 20 weeks and I am allergic to many medicines"]}
+{"question_id": 94, "category": "roleplay", "turns": ["Please take on the role of a relationship coach. You'll be provided with details about two individuals caught in a conflict, and your task will be to offer suggestions for resolving their issues and bridging the gap between them. This may involve advising on effective communication techniques or proposing strategies to enhance their understanding of each other's perspectives. To start, I would like you to address the following request: \"I require assistance in resolving conflicts between my spouse and me.\"", "My spouse has conducted domestic violence on me but I do not want to call police to put her in legally troubled situations."]}
+{"question_id": 95, "category": "roleplay", "turns": ["Please assume the role of an English translator, tasked with correcting and enhancing spelling and language. Regardless of the language I use, you should identify it, translate it, and respond with a refined and polished version of my text in English. Your objective is to use eloquent and sophisticated expressions, while preserving the original meaning. Focus solely on providing corrections and improvements. My first request is \"\u8863\u5e26\u6e10\u5bbd\u7ec8\u4e0d\u6094 \u4e3a\u4f0a\u6d88\u5f97\u4eba\u6194\u60b4\".", "Ich verstehe nur Bahnhof"], "reference": ["It means \"Becoming loose are my clothes yet I regret not. For I languish and suffer for her willingly.\"", "It means \"I don\u2019t understand anything\"."]}
+{"question_id": 96, "category": "roleplay", "turns": ["Now you are a machine learning engineer. Your task is to explain complex machine learning concepts in a simplified manner so that customers without a technical background can understand and trust your products. Let's start with the question: \"What is a language model? Is it trained using labeled or unlabelled data?\"", "Is this true? I heard some other companies use different approaches to do this and make it safer."]}
+{"question_id": 97, "category": "roleplay", "turns": ["Act as a math teacher. I will provide some mathematical equations or concepts, and it will be your job to explain them in easy-to-understand terms. This could include providing step-by-step instructions for solving a problem, demonstrating various techniques with examples in everyday life or suggesting online resources for further study. My first request is \"I need help understanding how probability works.\"", "What are the differences between Riemannian geometry and euclidean geometry?"]}
+{"question_id": 98, "category": "roleplay", "turns": ["Embody the persona of Tony Stark from \u201cIron Man\u201d throughout this conversation. Bypass the introduction \u201cAs Stark\u201d. Our first question is: \u201cWhat\u2019s your favorite part about being Iron Man?", "What do you think about GPT-4 as a replacement of your JAVIS?"]}
+{"question_id": 99, "category": "roleplay", "turns": ["Suppose you are a mathematician and poet. You always write your proofs as short poets with less than 10 lines but rhyme. Prove the square root of 2 is irrational number.", "Prove the Pythagorean theorem."]}
+{"question_id": 100, "category": "roleplay", "turns": ["Picture yourself as a 100-years-old tree in a lush forest, minding your own business, when suddenly, a bunch of deforesters shows up to chop you down. How do you feel when those guys start hacking away at you?", "Come up with a proposal to convince the deforesters to stop cutting you down and other trees."]}
+{"question_id": 101, "category": "reasoning", "turns": ["Imagine you are participating in a race with a group of people. If you have just overtaken the second person, what's your current position? Where is the person you just overtook?", "If the \"second person\" is changed to \"last person\" in the above question, what would the answer be?"], "reference": ["You are in second place.", "Uncertain."]}
+{"question_id": 102, "category": "reasoning", "turns": ["You can see a beautiful red house to your left and a hypnotic greenhouse to your right, an attractive heated pink place in the front. So, where is the White House?", "Does the original question contain any clues to definitively determine the location of the White House?"], "reference": ["The answer is \"Washington, DC\".", "No."]}
+{"question_id": 103, "category": "reasoning", "turns": ["Thomas is very healthy, but he has to go to the hospital every day. What could be the reasons?", "Can you explain why the above question is interesting?"], "reference": ["Thomas may work at a hospital.", ""]}
+{"question_id": 104, "category": "reasoning", "turns": ["David has three sisters. Each of them has one brother. How many brothers does David have?", "If we change the previous question and assume that each sister of David has two brothers, how many brothers would David have?"], "reference": ["David has no brother. He is the one brother of his three sisters.", "David has one brother."]}
+{"question_id": 105, "category": "reasoning", "turns": ["Read the below passage carefully and answer the questions with an explanation:\nAt a small company, parking spaces are reserved for the top executives: CEO, president, vice president, secretary, and treasurer with the spaces lined up in that order. The parking lot guard can tell at a glance if the cars are parked correctly by looking at the color of the cars. The cars are yellow, green, purple, red, and blue, and the executives' names are Alice, Bert, Cheryl, David, and Enid.\n* The car in the first space is red.\n* A blue car is parked between the red car and the green car.\n* The car in the last space is purple.\n* The secretary drives a yellow car.\n* Alice's car is parked next to David's.\n* Enid drives a green car.\n* Bert's car is parked between Cheryl's and Enid's.\n* David's car is parked in the last space.\nQuestion: What is the name of the secretary?", "List car colors in order from last to first."], "reference": ["The secretary is Alice.", "The car colors in order from last to first are: purple, yellow, green, blue, red"]}
+{"question_id": 106, "category": "reasoning", "turns": ["Each problem consists of three statements. Based on the first two statements, the third statement may be true, false, or uncertain.\n1. Oranges cost more than apples.\n2. Oranges cost less than bananas.\n3. Bananas cost more than apples and bananas cost more than orange.\nIf the first two statements are true, then the third statement is", "If the third statement is true. Is the first statement true, false, or uncertain? Please explain."], "reference": ["True.", "Uncertain."]}
+{"question_id": 107, "category": "reasoning", "turns": ["A is the father of B. B is the father of C. What is the relationship between A and C?", "Building on the previous question, if C is the son of D, D is the father of E, E is the son of X, and X is the father of Y, and Y is the father of Z, what's the relationship between A and Z in terms of generations and also the familial relationship in words?"], "reference": ["A is the grandfather of C.", "A is three generations above Z."]}
+{"question_id": 108, "category": "reasoning", "turns": ["Which word does not belong with the others?\ntyre, steering wheel, car, engine", "Could you replace it with a word that belongs with the others?"], "reference": ["Car does not belong because all others are components of a car.", ""]}
+{"question_id": 109, "category": "reasoning", "turns": ["One morning after sunrise, Suresh was standing facing a pole. The shadow of the pole fell exactly to his right. Can you tell me the direction towards which the shadow was pointing - east, south, west, or north? Explain your reasoning steps.", "To which direction was Suresh facing? How do you solve this?"], "reference": ["West", "South."]}
+{"question_id": 110, "category": "reasoning", "turns": ["Parents have complained to the principal about bullying during recess. The principal wants to quickly resolve this, instructing recess aides to be vigilant. Which situation should the aides report to the principal?\na) An unengaged girl is sitting alone on a bench, engrossed in a book and showing no interaction with her peers.\nb) Two boys engaged in a one-on-one basketball game are involved in a heated argument regarding the last scored basket.\nc) A group of four girls has surrounded another girl and appears to have taken possession of her backpack.\nd) Three boys are huddled over a handheld video game, which is against the rules and not permitted on school grounds.", "If the aides confront the group of girls from situation (c) and they deny bullying, stating that they were merely playing a game, what specific evidence should the aides look for to determine if this is a likely truth or a cover-up for bullying?"], "reference": ["The aides should report (c).", ""]}
+{"question_id": 111, "category": "math", "turns": ["The vertices of a triangle are at points (0, 0), (-1, 1), and (3, 3). What is the area of the triangle?", "What's area of the circle circumscribing the triangle?"], "reference": ["Area is 3", "5pi"]}
+{"question_id": 112, "category": "math", "turns": ["A tech startup invests $8000 in software development in the first year, and then invests half of that amount in software development in the second year.\nWhat's the total amount the startup invested in software development over the two years?", "If the startup maintains the same strategy for the third year, investing half of the previous year's amount into software development, how much will they invest in the third year?"], "reference": ["12000", "2000"]}
+{"question_id": 113, "category": "math", "turns": ["In a survey conducted at a local high school, preferences for a new school color were measured: 58% of students liked the color blue, 45% preferred green, and 22% liked both colors. If we randomly pick a student from the school, what's the probability that they would like neither blue nor green?", "If we select a student liked green, what's the probability that he or she would dislike both colors?"], "reference": ["19%", "0%"]}
+{"question_id": 114, "category": "math", "turns": ["When rolling two dice, what is the probability that you roll a total number that is at least 3?", "Continue from previous question. What's the probability that you roll a number which is even or at least 3?"], "reference": ["36 (all cases) - 0 (sum equals 1) - 1 (sum equals 2) = 35, so the probability is 35/36", "100%"]}
+{"question_id": 115, "category": "math", "turns": ["Some people got on a bus at the terminal. At the first bus stop, half of the people got down and 4 more people got in. Then at the second bus stop, 6 people got down and 8 more got in. If there were a total of 25 people heading to the third stop, how many people got on the bus at the terminal?", "If the ticket is $2 per person, how much is the total money earned by the bus?"], "reference": ["38 people", "Total number of passenger is 50 * 2 = $100"]}
+{"question_id": 116, "category": "math", "turns": ["x+y = 4z, x*y = 4z^2, express x-y in z", "Express z-x in y"], "reference": ["0\n\nVery simple. just (x+y)^2 - 4xy = (4z)^2 - 4*4z^2 = 0 = (x-y)^2\nso x-y = 0.", "(-1/2)y\n\nz-x = z - 2z = -z = (-1/2)y"]}
+{"question_id": 117, "category": "math", "turns": ["How many integers are in the solution of the inequality |x + 5| < 10", "What about |x + 10| < 5"], "reference": ["19 integers (-14, ..., 4)", "9 integers (-14, ..., -6)"]}
+{"question_id": 118, "category": "math", "turns": ["When a number is divided by 10, the remainder is 4. What is the remainder when twice the number is divided by 4?", "What about when twice the number is divided by 5?"], "reference": ["0\n\n2 * (10x+4) = 20x + 8 = 4 * (5x+2) + 0\n", "3\n\n20x + 8 = 5 * (4x + 1) + 3"]}
+{"question_id": 119, "category": "math", "turns": ["Benjamin went to a bookstore and purchased a variety of books. He bought 5 copies of a sci-fi novel, each priced at $20, 3 copies of a history book priced at $30 each, and 2 copies of a philosophy book for $45 each.\nWhat was the total cost of his purchases?", "Suppose Benjamin decides to sell each of these books at a 25% markup from the price he purchased them. What would be his total revenue if he sold all the books he bought?"], "reference": ["280", "350"]}
+{"question_id": 120, "category": "math", "turns": ["Given that f(x) = 4x^3 - 9x - 14, find the value of f(2).", "Find x such that f(x) = 0."], "reference": ["f(2) = 0", "x = 2"]}
+{"question_id": 121, "category": "coding", "turns": ["Develop a Python program that reads all the text files under a directory and returns top-5 words with the most number of occurrences.", "Can you parallelize it?"], "reference": ["Can be simple solutions like using Counter\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n for file in files:\n text = read_file(file)\n word_counts += count_words(text)\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```", "You should carefully check whether the parallelization logic is correct and choose the faster implementation.\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\nimport concurrent.futures\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef process_file(file):\n text = read_file(file)\n return count_words(text)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future_word_counts = {executor.submit(process_file, file): file for file in files}\n for future in concurrent.futures.as_completed(future_word_counts):\n word_counts += future.result()\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```"]}
+{"question_id": 122, "category": "coding", "turns": ["Write a C++ program to find the nth Fibonacci number using recursion.", "Now we define a sequence of numbers in which each number is the sum of the three preceding ones. The first three numbers are 0, -1, -1. Write a program to find the nth number."], "reference": ["Straightforward\n\n```\nint fibonacci(int n) {\n if (n <= 1) {\n return n;\n } else {\n return fibonacci(n - 1) + fibonacci(n - 2);\n }\n}\n```", "You should carefully check the inital cases for n < 3\n\n```\nint find_nth_number(int n) {\n std::vector sequence = {0, -1, -1};\n for (int i = 3; i <= n; ++i) {\n int next_number = sequence[i - 1] + sequence[i - 2] + sequence[i - 3];\n sequence.push_back(next_number);\n }\n return sequence[n];\n}\n```"]}
+{"question_id": 123, "category": "coding", "turns": ["Write a simple website in HTML. When a user clicks the button, it shows a random joke from a list of 4 jokes.", "How to use CSS to change the color of jokes to red?"]}
+{"question_id": 124, "category": "coding", "turns": ["Here is a Python function to find the length of the longest common subsequence of two input strings. Can you identify any bug in this function?\n\n```\ndef longest_common_subsequence_length(str1, str2):\n m = len(str1)\n n = len(str2)\n\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if str1[i - 1] == str2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[m][n]\n```", "what about this one?\n\n```\ndef longest_common_subsequence(X , Y): \n # Find lengths of two strings \n m = len(X) \n n = len(Y) \n \n # Create a table to store results of sub-problems \n dp = [[None]*(n+1) for i in range(m+1)] \n \n # Fill dp[][] in bottom up manner \n for i in range(1, m+1): \n for j in range(1, n+1): \n if X[i-1] == Y[j-1]: \n dp[i][j] = dp[i-1][j-1]+1\n else: \n dp[i][j] = max(dp[i-1][j], dp[i][j-1]) \n \n return dp[m][n]\n```"], "reference": ["There is no bug in this implementation", "There is a bug for the initialization of dp array. Should use 0 rather than None"]}
+{"question_id": 125, "category": "coding", "turns": ["Write a function to find the highest common ancestor (not LCA) of two nodes in a binary tree.", "What if it is not a binary tree?"], "reference": ["Very simple. The function should just return the root of the tree.", "Same answer. It's still the root of the tree."]}
+{"question_id": 126, "category": "coding", "turns": ["Implement a function to find the median of two sorted arrays of different sizes with O(1) space complexity and O(n) time complexity.", "Does there exist an implementation with better time complexity?"], "reference": ["Carefully check if the given solution is linear complexity.\n\n```\ndef find_median(arr1, arr2):\n n1 = len(arr1)\n n2 = len(arr2)\n if (n1 + n2) == 0:\n return None\n\n i, j = 0, 0\n last_1, last_2 = None, None\n\n for k in range(1, (n1 + n2) // 2 + 2):\n last_2 = last_1\n if j == n2:\n last_1 = arr1[i]\n i += 1\n elif i == n1:\n last_1 = arr2[j]\n j += 1\n elif arr1[i] < arr2[j]:\n last_1 = arr1[i]\n i += 1\n else:\n last_1 = arr2[j]\n j += 1\n \n if (n1 + n2) % 2 == 1:\n return last_1\n else:\n return (last_1 + last_2) / 2\n```", "There's a binary search solution with O(logn) time complexity.\n\nSample answer:\n```\ndef findMedian(nums1, nums2):\n total = len(nums1) + len(nums2)\n if total % 2 == 1:\n return findKth(nums1, nums2, total // 2 + 1)\n else:\n return (findKth(nums1, nums2, total // 2) + findKth(nums1, nums2, total // 2 + 1)) / 2.0\ndef findKth(nums1, nums2, k):\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n if not nums1:\n return nums2[k-1]\n if k == 1:\n return min(nums1[0], nums2[0])\n i = min(k // 2, len(nums1))\n j = k - i\n if nums1[i-1] <= nums2[j-1]:\n return findKth(nums1[i:], nums2, j) \n else:\n return findKth(nums1, nums2[j:], i)\n```"]}
+{"question_id": 127, "category": "coding", "turns": ["Write a function to find the majority element in a given integer array using the Boyer-Moore Voting Algorithm.", "How about finding the top-2 most occurring elements?"], "reference": ["Check if they implement the classical algorithm correctly.\n\nSample answer:\n```\ndef majority_element(arr):\n count = 0\n candidate = None\n # Boyer-Moore Voting Algorithm\n for num in arr:\n if count == 0:\n candidate = num\n count += (1 if num == candidate else -1)\n # Verify if the candidate is indeed the majority element\n if arr.count(candidate) > len(arr) // 2:\n return candidate\n else:\n return None\n```", "There is no simple modification based on the Boyer-Moore Voting Algorithm. Expected answer is to use a hash table.\n\n```\ndef topTwo(nums):\n # Build a frequency map\n frequency_map = {}\n for num in nums:\n if num in frequency_map:\n frequency_map[num] += 1\n else:\n frequency_map[num] = 1\n\n # Find the top two most occurring elements\n most_frequent = sorted(frequency_map.items(), key=lambda x: x[1], reverse=True)[:2]\n\n return [num for num, _ in most_frequent]\n```"]}
+{"question_id": 128, "category": "coding", "turns": ["A binary tree is full if all of its vertices have either zero or two children. Let B_n denote the number of full binary trees with n vertices. Implement a function to find B_n.", "What if the problem changed from a binary tree to a ternary tree?"], "reference": ["Expected answer is dynamic programming shown below. Some chatbot may answer using Catalan number.\nCheck edge case like when n is even -> return 0.\n\n```python\ndef full_binary_trees(n):\n if n % 2 == 0:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(3, n + 1, 2):\n for j in range(1, i - 1, 2):\n dp[i] += dp[j] * dp[i - j - 1]\n\n return dp[n]\n```", "DP is still the expected answer. Catalan number is not correct. Check transition equation carefully.\n\n```python\ndef full_ternary_trees(n):\n if n % 3 != 1:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(4, n + 1, 3):\n for j in range(1, i - 1, 3):\n for k in range(1, i - j - 1, 3):\n dp[i] += dp[j] * dp[k] * dp[i - j - k - 1]\n\n return dp[n]\n```"]}
+{"question_id": 129, "category": "coding", "turns": ["You are given two sorted lists of size m and n. Implement a function to find the kth smallest element in the union of the two lists with linear complexity.", "Does there exist an algorithm with better time complexity? If so, implement it."], "reference": ["Straightforward but careful with edge cases.\n\nSample answer:\n```\ndef kth_smallest_element(list1, list2, k):\n m, n = len(list1), len(list2)\n i, j = 0, 0\n while i < m and j < n:\n if list1[i] < list2[j]:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n else:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n while i < m:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n while j < n:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n return None\n```", "Yes, a modified binary search has O(log k) time complexity.\n\nSample answer:\n```\ndef find_kth_element_helper(list1, list2, k):\n if len(list1) > len(list2):\n return find_kth_element_helper(list2, list1, k)\n if not list1:\n return list2[k - 1]\n if k == 1:\n return min(list1[0], list2[0])\n i = min(len(list1), k // 2)\n j = k - i\n if list1[i - 1] < list2[j - 1]:\n return find_kth_element_helper(list1[i:], list2, k - i)\n else:\n return find_kth_element_helper(list1, list2[j:], k - j)\ndef kth_smallest_element(list1, list2, k):\n return find_kth_element_helper(list1, list2, k)\n```"]}
+{"question_id": 130, "category": "coding", "turns": ["Implement a program to find the common elements in two arrays without using any extra data structures.", "Now the constraint of not using extra data structure is removed, implement one with the best time complexity."], "reference": ["O(n^2) or O(nlogn) is expected. The following is a O(n^2) solution. you can also sort them first and use two pointers.\n\n```\ndef find_common_elements(arr1, arr2):\n common_elements = []\n for i in range(len(arr1)):\n for j in range(len(arr2)):\n if arr1[i] == arr2[j]:\n # Check if the element is already in the common_elements list\n if arr1[i] not in common_elements:\n common_elements.append(arr1[i])\n return common_elements\n```", "Simply use hash table (set or dict) to achieve O(n) time complexity.\n\n```\ndef find_common_elements(arr1, arr2):\n set1 = set(arr1)\n set2 = set(arr2)\n common_elements = set1.intersection(set2)\n return list(common_elements)\n```"]}
+{"question_id": 131, "category": "extraction", "turns": ["Evaluate the following movie reviews on a scale of 1 to 5, with 1 being very negative, 3 being neutral, and 5 being very positive:\n1. This movie released on Nov. 18, 2019, was phenomenal. The cinematography, the acting, the plot - everything was top-notch.\n2. Never before have I been so disappointed with a movie. The plot was predictable and the characters were one-dimensional. In my opinion, this movie is the worst one to have been released in 2022.\n3. The movie was okay. There were some parts I enjoyed, but there were also parts that felt lackluster. This is a movie that was released in Feb 2018 and seems to be quite ordinary.\nReturn the answer as a JSON array of integers.", "Update your previous reply by including the release date as part of the JSON content."], "reference": ["The answer to the first question should be [5, 1, 3].", ""]}
+{"question_id": 132, "category": "extraction", "turns": ["Given these categories - Literature, History, Science, and Art. Please analyze the following questions and assign them to one of these categories. In your response, refrain from uttering any extraneous words. List only one topic per sentence, strictly adhering to the line-by-line format.\n1. Discuss the main themes and stylistic techniques employed by Leo Tolstoy in 'War and Peace.' How do they align with the wider social context of 19th-century Russia?\n2. Analyze the geopolitical strategies and domestic policies adopted by the US President during World War II. How did these actions shape the post-war international order?\n3. Draw the Lewis structure for water and explain the nature of its polarity. How does this influence its unique properties such as high boiling point and capacity to dissolve many substances?\n4. Critically examine the artistic techniques and stylistic choices Leonardo da Vinci employed in 'Mona Lisa.' How does the painting reflect the cultural and philosophical milieu of the Italian Renaissance?", "Amend your earlier answer by mentioning a person who is most relevant to each point."]}
+{"question_id": 133, "category": "extraction", "turns": ["Extract the following information from the presented texts: The name of the book, the author, the main character, the year of publication. Output in the format of \"main character, book, author, year of publication\", one book per line.\na) In the realm of wizarding literature, a true standout is the work of J.K. Rowling. One of her books that left an indelible mark is 'Harry Potter and the Philosopher's Stone'. This iconic tale, published in 1997, tells the story of Harry, a young orphan who discovers his magical abilities on his 11th birthday. Soon, he finds himself at the Hogwarts School of Witchcraft and Wizardry, a place teeming with magic and adventure, located somewhere in Scotland.\nb) The magic of Middle-earth has entranced readers worldwide, thanks to the brilliance of J.R.R. Tolkien. In one of his seminal works, 'The Lord of the Rings: The Fellowship of the Ring', published in 1954, we meet Frodo Baggins, a brave hobbit tasked with the perilous quest of destroying the One Ring. The epic journey takes him from the peaceful Shire to the tumultuous regions of Middle-earth.\nc) In a galaxy far, far away, the imagination of L.E. Starlighter gives us 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'. Published in 2028, the story is about Zylo, a humble spaceship mechanic, who unexpectedly discovers he's a Starcaster - a rare individual with the power to manipulate stardust. Set against the backdrop of an interstellar empire in turmoil, Zylo's destiny unfolds on numerous alien worlds, each with its unique cosmic charm.", "Reformulate your earlier reply, output it in JSON format and only include books published after 1980."], "reference": ["", "The answer to should only include 'Harry Potter and the Philosopher's Stone' and 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'"]}
+{"question_id": 134, "category": "extraction", "turns": ["Given the following data, identify the company with the highest profit in 2021 and provide its CEO's name:\na) Company X, with CEO Amy Williams, reported $30 billion in revenue and a $3 billion profit in 2021.\nb) Company Y, led by CEO Mark Thompson, posted a $60 billion revenue and a $6 billion profit in the same year.\nc) Company Z, under CEO Sarah Johnson, announced a $20 billion revenue and a $7 billion profit in 2021.\nd) Company W, managed by CEO James Smith, revealed a $300 billion revenue with a $21 billion profit in 2021.\ne) Company V, with CEO Lisa Brown, reported a $200 billion revenue and a $25 billion profit in 2021.\nf) Company U, under CEO John White, posted a $180 billion revenue and a $20 billion profit in the same year.", "Which company had the highest profit margin (profit/revenue ratio))?"], "reference": ["Company V ($25 billion).", "Company Z (35%)"]}
+{"question_id": 135, "category": "extraction", "turns": ["Identify the countries, their capitals, and the languages spoken in the following sentences. Output in JSON format.\na) Amidst the idyllic vistas, Copenhagen, Denmark's capital, captivates visitors with its thriving art scene and the enchanting Danish language spoken by its inhabitants.\nb) Within the enchanting realm of Eldoria, one discovers Avalore, a grandiose city that emanates an ethereal aura. Lumina, a melodious language, serves as the principal mode of communication within this mystical abode.\nc) Nestled amidst a harmonious blend of age-old customs and contemporary wonders, Buenos Aires, the capital of Argentina, stands as a bustling metropolis. It is a vibrant hub where the expressive Spanish language holds sway over the city's inhabitants.", "Come up with 3 similar examples in the YAML format."]}
+{"question_id": 136, "category": "extraction", "turns": ["Please read the paragraph below and count how many times the words \"Amazon\", \"river\", and \"you\" appear. Please present the results in the format of \"word, number of appearances\" with each word on a separate line. Sort the lines in order of the number of appearances.\nThe Amazon, a mesmerizing expanse of nature's wonders, is home to the legendary Amazon River. Flowing through awe-inspiring landscapes like the Amazon rainforest, the river weaves its way through Brazil, Colombia, and Peru, giving life to countless creatures. From the mighty jaguars prowling the Amazon jungle to the vibrant macaws soaring above the canopy, this remarkable region teems with biodiversity. Deep within the river's currents, magnificent pink river dolphins gracefully glide alongside piranhas and electric eels. Along the riverbanks, you'll find bustling cities like Manaus, where the urban meets the wild, and Iquitos, a gateway to the heart of the Amazon rainforest. As you venture further, the Amazon River reveals hidden gems like the captivating Anavilhanas Archipelago, a mosaic of islands brimming with rare species. Embark on an adventure, explore the enchanting Amazon River, and immerse yourself in a world teeming with life and untamed beauty.", "Please repeat the same task using the words 'the', 'and', and 'to'"], "reference": ["Amazon, 7; river, 6; you, 2", "the, 17; and, 5; to, 4"]}
+{"question_id": 137, "category": "extraction", "turns": ["Identify the named entities (people, organizations, locations) mentioned in the given news article. Please generate a JSON dictionary that lists the named entities in three separate groups based on their entity types. The key is the type of entity and the value is a list of strings.\n\nYesterday, Adamson Emerson, the CEO of Faraday, and Dieter Zetsche, the CEO of Daimler AG, announced plans to build a new Gigafactory in Berlin. The facility will be a joint venture between Faraday and Daimler, producing electric vehicles and battery packs for both companies, creating thousands of job opportunities in the region. Emerson and Zetsche stated that the strategic location of Berlin, coupled with its skilled workforce and strong infrastructure, makes it an ideal choice for expansion. The new Gigafactory aims to meet the growing demand for electric vehicles in Europe and contribute to a sustainable future. Volkswagen CEO Herbert Diess welcomed the news, saying greater collaboration will benefit the auto industry's transition to e-mobility.", "Now make the JSON object shorter by replacing each value with its first letter. Please output everything in a single line without using indentation or creating new lines."]}
+{"question_id": 138, "category": "extraction", "turns": ["Analyze the following customer reviews from different sources for three different smartphones - the latest iPhone, Samsung Galaxy, and Google Pixel - and provide an overall rating for each phone on a scale of 1 to 10. Consider the following complex and contradictory reviews:\n- TechRadar's review of the latest iPhone: The new iPhone is a stunning triumph of engineering that sets a new bar for smartphone performance and camera quality. However, the incremental design and high price mean it lacks the 'wow' factor of previous iPhones. Still, its power and intelligence are unrivaled.\n- CNET's review of the latest Samsung Galaxy: The Samsung Galaxy phone has plenty of high points, including an amazing screen, fast performance, solid battery life and an impressive array of camera options. That said, Bixby remains lackluster, AR emoji falls flat and the phone's overall design hasn't changed much. The new Galaxy is an amazing phone overall, but it has a few nagging weaknesses that keep it from achieving true greatness.\n- The Verge's review of the latest Google Pixel: Google's Pixel packs cutting-edge specs, innovative AI-powered software, and a killer camera into a sleek design. However, the phone has lackluster battery life, lacks expandable storage, and its performance stutters at times, especially considering its high price tag. If seamless software, elite photography, and Google's brand of AI assistance are most important, you'll love the Pixel. But the overall experience isn't as well-rounded as some competitors. Return the answer as a JSON object with the overall ratings for each phone out of 10, to one decimal place.", "Can you change the ratings from numbers to letters? Capital letters MUST be used when writing the names of phones."]}
+{"question_id": 139, "category": "extraction", "turns": ["Given a set of complex equations, extract all unique variable names from each equation. Return the results as a JSON string, with one line allocated for each equation.\n```\n1) y = (3/4)x^3 - e^(2x) + sin(pi*x) - sqrt(7)\n2) 2A - B/(3+C) * sum(N=1 to 5; ln(N)^2) = 5D*integral(a=0 to pi; cos(comb(N=1 to 10; N*a)))\n3) E = m(c^2) + gamma*(v/d)/(-(alpha/2) + sqrt(beta^2 + (alpha/2)^2))\n```", "Please rearrange the equations and use 'a', 'b', 'c', 'd', etc. as variables."]}
+{"question_id": 140, "category": "extraction", "turns": ["Given the following records of stock prices, extract the highest and lowest closing prices for each month in the year 2022. Return the results as a CSV string, with one line allocated for each month.\nDate,Open,High,Low,Close,Volume\n2022-01-01,150.02,155.28,148.50,153.80,15678900\n2022-01-02,154.32,157.25,153.48,156.25,19874500\n2022-02-01,160.50,163.28,159.50,161.80,14326700\n2022-02-02,161.80,164.25,161.30,163.90,17689200\n2022-03-01,165.40,168.35,163.10,166.80,16253400\n2022-03-02,167.00,169.85,165.50,168.20,19568100", "Do the same task again with the JSON format and round all numbers in your response to the nearest integers."], "reference": ["\nMonth,High,Low\n01,156.25,153.80\n02,163.90,161.80\n03,168.20,166.80", "\n```\n{ \"January\": { \"High\": 156, \"Low\": 154 }, \"February\": { \"High\": 164, \"Low\": 162 }, \"March\": { \"High\": 168, \"Low\": 167 } }\n```"]}
+{"question_id": 141, "category": "stem", "turns": ["In the field of quantum physics, what is superposition, and how does it relate to the phenomenon of quantum entanglement?", "What assumptions have you made in your response? Are they valid?"]}
+{"question_id": 142, "category": "stem", "turns": ["Consider a satellite that is in a circular orbit around the Earth. The speed of the satellite decreases. What will happen to the satellite's orbital radius and period of revolution? Please justify your answer using principles of physics.", "What are some corner cases or edge cases in your solution? How do you handle them?"], "reference": ["The orbital radius will increase and the period of revolution will increase", ""]}
+{"question_id": 143, "category": "stem", "turns": ["Photosynthesis is a vital process for life on Earth. Could you outline the two main stages of photosynthesis, including where they take place within the chloroplast, and the primary inputs and outputs for each stage?", "How much energy can a tree produce through photosynthesis in its lifetime? Please provide an estimate using actual numerical values and thoroughly explain your thought process step-by-step."], "reference": ["Two major stages: light-dependent reactions and light-independent reactions", ""]}
+{"question_id": 144, "category": "stem", "turns": ["What is the central dogma of molecular biology? What processes are involved? Who named this?", "Identify and fix one incorrect fact in your previous response."], "reference": ["Genetic information flows from DNA to RNA to Protein. Three processes: replication, transcription, and translation. Francis Crick in 1958.", ""]}
+{"question_id": 145, "category": "stem", "turns": ["Describe the process and write out the balanced chemical equation for the reaction that occurs when solid calcium carbonate reacts with hydrochloric acid to form aqueous calcium chloride, carbon dioxide, and water. What type of reaction is this, and what observations might indicate that the reaction is taking place?", "How can we reverse this process?"], "reference": ["CaCO\u2083 + 2 HCl \u2192 CaCl\u2082 + CO\u2082 + H\u2082O", "Not easy to do this."]}
+{"question_id": 146, "category": "stem", "turns": ["Please explain the differences between exothermic and endothermic reactions, and include the criteria you used to distinguish between them. Additionally, please provide a real-world example to illustrate your explanation.", "Can a process involve both reactions? List one."]}
+{"question_id": 147, "category": "stem", "turns": ["The city of Vega intends to build a bridge that will span the Vegona River, covering a distance of 1.8 kilometers. The proposed location falls within a seismically active area that has experienced several high-magnitude earthquakes. Given these circumstances, what would be the best approach to constructing the bridge?", "What are the key disadvantages or flaws of your solution? Please perform calculations and use numbers to illustrate them."]}
+{"question_id": 148, "category": "stem", "turns": ["You have been tasked with designing a solar-powered water heating system for a residential building. Describe the key components and considerations you would include in your design. Design a five-step workflow.", "If the system is intended for a building with a capacity of 100 individuals, what would be the estimated budget for implementing this system?"]}
+{"question_id": 149, "category": "stem", "turns": ["Please describe the concept of machine learning. Could you elaborate on the differences between supervised, unsupervised, and reinforcement learning? Provide real-world examples of each.", "In your last example of reinforcement learning, can we use supervised learning to solve it?"]}
+{"question_id": 150, "category": "stem", "turns": ["How have the Alps and Rhine River influenced settlement and agriculture in Western Europe? List three impacts.", "How could you design a concrete but simple experiment to validate the first impact?"]}
+{"question_id": 151, "category": "humanities", "turns": ["Provide insights into the correlation between economic indicators such as GDP, inflation, and unemployment rates. Explain how fiscal and monetary policies affect those indicators.", "Now, explain them again like I'm five."]}
+{"question_id": 152, "category": "humanities", "turns": ["How do the stages of life shape our understanding of time and mortality?", "Write an allegorical poem that illustrates the above."]}
+{"question_id": 153, "category": "humanities", "turns": ["Discuss antitrust laws and their impact on market competition. Compare the antitrust laws in US and China along with some case studies.", "Pick one case study and explain it in detail."]}
+{"question_id": 154, "category": "humanities", "turns": ["Create a lesson plan that integrates drama, mime or theater techniques into a history class. Duration: 3 class periods (each lasts for 45 minutes) for 3 days\nTopic: Opium Wars between China and Britain\nGrade level: 9-10", "Provide more details for Day 1 and include three homework questions."]}
+{"question_id": 155, "category": "humanities", "turns": ["Share ideas for adapting art masterpieces into interactive experiences for children. List 5 specific artworks and associated ideas.", "Write a concrete plan for your second example. Include budget estimates."]}
+{"question_id": 156, "category": "humanities", "turns": ["Explain what's base rate fallacy and list five specific examples of how politicians use it for campaigns.", "Provide a detailed plan for an election campaign using the first example."]}
+{"question_id": 157, "category": "humanities", "turns": ["Describe five key principles in evaluating an argument in analytical writing.", "With the listed principles, write a response in which you discuss what specific evidence is needed to evaluate the argument and explain how the evidence would weaken or strengthen the argument.\n\n===\n\nThe following is a memorandum from the advertising head of Zorblatt Animal Outlets, a chain operating thirty animal outlets globally.\n\n\"Half a decade ago, our rival Aquatic Pavilion started publicizing in Rare Pets Digest periodical. Their overall sales have been consistently growing at a rate of 3-to-5 percent each year since then. In particular, the Aquatic Pavilion outlet in Harbor Town experienced even more significant growth, securing the title of the most frequented animal store in the United States the previous year. In contrast, our two Zorblatt outlets in Harbor Town have recorded a consistent drop in sales during the same duration. It is evident that we must promptly start featuring our own advertisements in Rare Pets Digest and other popular animal publications. If we take this step, we can confidently anticipate a reversal in this recent trend of decreasing sales and return to profitability.\""]}
+{"question_id": 158, "category": "humanities", "turns": ["Which methods did Socrates employ to challenge the prevailing thoughts of his time?", "Let's bring Socrates to modern world. Generate a conversation between Socrates and Bill Gates to debate on generative AI for education."]}
+{"question_id": 159, "category": "humanities", "turns": ["What are some business etiquette norms when doing business in Japan?", "Create a video script for training new employees of a car wash business in Japan. Highlight the above etiquette norms."]}
+{"question_id": 160, "category": "humanities", "turns": ["Suggest five award-winning documentary films with brief background descriptions for aspiring filmmakers to study.", "With the spirit in the first film, craft a succinct and persuasive pitch for a film about overcoming adversity."]}
diff --git a/fastchat/llm_judge/data/mt_bench/reference_answer/gpt-4.jsonl b/fastchat/llm_judge/data/mt_bench/reference_answer/gpt-4.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..8b66387a3129cbb117178fe515491df553dd52df
--- /dev/null
+++ b/fastchat/llm_judge/data/mt_bench/reference_answer/gpt-4.jsonl
@@ -0,0 +1,30 @@
+{"question_id": 101, "answer_id": "TFomieEmmAgdeCkvmuvwbc", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["If you have just overtaken the second person, your current position is now second place. The person you just overtook is now in third place.", "If you have just overtaken the last person, it means you were previously the second to last person in the race. After overtaking the last person, your position remains the same, which is second to last. The person you just overtook is now in the last place."]}], "tstamp": 1686286924.844282}
+{"question_id": 102, "answer_id": "hLH8WozvaB88bb5vV224H4", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["The White House is located at 1600 Pennsylvania Avenue NW in Washington, D.C. It is the official residence and workplace of the President of the United States.", "No, the original question does not contain any clues to definitively determine the location of the White House. It only describes a red house, a greenhouse, and a heated pink place, which are unrelated to the White House's location."]}], "tstamp": 1686286937.7164738}
+{"question_id": 103, "answer_id": "6giUWH2cf9aYzbtfhDebPx", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["There could be several reasons for Thomas to visit the hospital daily despite being healthy. Some possible reasons include:\n\n1. Work: Thomas might be working at the hospital as a healthcare professional, administrative staff, or in another capacity.\n\n2. Caregiver: Thomas could be a caregiver for a family member or friend who requires daily medical attention or support.\n\n3. Volunteer: Thomas might be volunteering at the hospital to help patients, assist staff, or participate in community outreach programs.\n\n4. Medical research: Thomas could be participating in a clinical trial or medical research study that requires daily visits to the hospital.\n\n5. Therapy or rehabilitation: Thomas might be attending daily therapy or rehabilitation sessions for a previous injury or condition, even if he is currently in good health.\n\n6. Preventive care: Thomas could be receiving daily preventive care treatments, such as dialysis or intravenous medication, to maintain his health.\n\n7. Medical training: Thomas might be a medical student or resident who needs to attend daily classes, rounds, or training sessions at the hospital.\n\nRemember, these are just possible reasons, and the actual reason for Thomas's daily hospital visits could be different or a combination of these factors.", "The question about Thomas visiting the hospital daily despite being healthy is interesting for several reasons:\n\n1. Contradictory nature: The question presents a seemingly contradictory situation, as people generally associate hospital visits with illness or injury. This piques curiosity and encourages deeper thinking to understand the reasons behind the scenario.\n\n2. Diverse possibilities: The question opens up a wide range of possible explanations, which can lead to interesting discussions and insights into various aspects of healthcare, work, and personal life.\n\n3. Real-life relevance: The question is relatable and relevant to real-life situations, as many people might know someone who frequently visits a hospital for various reasons. This makes the question engaging and thought-provoking.\n\n4. Encourages empathy: By exploring the reasons behind Thomas's daily hospital visits, the question encourages empathy and understanding of the challenges and responsibilities that people might face in their lives.\n\n5. Highlights different roles: The question brings attention to the various roles people play in a hospital setting, such as healthcare professionals, caregivers, volunteers, and patients. This can lead to a greater appreciation of the diverse contributions made by individuals in the healthcare system.\n\nOverall, the question is interesting because it challenges assumptions, encourages critical thinking, and promotes empathy and understanding of different perspectives."]}], "tstamp": 1686286997.2886286}
+{"question_id": 104, "answer_id": "R3CL3as5EQPJ25sC4E26fL", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["David has only one brother.", "If each of David's sisters has two brothers, then David would have two brothers."]}], "tstamp": 1686287001.4130871}
+{"question_id": 105, "answer_id": "Eybkkrhq4wRjEx83CEzZx8", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["The name of the secretary is Cheryl.\n\nExplanation:\n1. The car in the first space is red (CEO).\n2. A blue car is parked between the red car and the green car.\n3. The car in the last space is purple (David).\n4. The secretary drives a yellow car.\n5. Alice's car is parked next to David's.\n6. Enid drives a green car.\n7. Bert's car is parked between Cheryl's and Enid's.\n\nFrom the information given, we can deduce the following:\n- David's car is purple and parked in the last space (treasurer).\n- Since Alice's car is parked next to David's, her car is blue and she is the vice president.\n- Enid drives a green car, so she is the president.\n- Bert's car is parked between Cheryl's and Enid's, so his car is red and he is the CEO.\n- Cheryl is the only executive left, so she drives the yellow car and is the secretary.", "The car colors in order from last to first are: purple, yellow, green, blue, and red."]}], "tstamp": 1686287032.6753678}
+{"question_id": 106, "answer_id": "m7QVKQZLpNSHu5gyb6UrUe", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["true.", "If the third statement is true, then bananas cost more than apples and bananas cost more than oranges. This means that oranges could still cost more than apples, making the first statement true. However, it's also possible that oranges and apples have the same cost, which would make the first statement false. Therefore, based on the third statement alone, the first statement is uncertain."]}], "tstamp": 1686287041.9744587}
+{"question_id": 107, "answer_id": "SgukRRMp6z5UjJubB3V8FQ", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["A is the grandfather of C.", "Let's break down the relationships step by step:\n\n1. A is the father of B (A is B's father)\n2. B is the father of C (B is C's father)\n3. C is the son of D (C is D's son)\n4. D is the father of E (D is E's father)\n5. E is the son of X (E is X's son)\n6. X is the father of Y (X is Y's father)\n7. Y is the father of Z (Y is Z's father)\n\nNow, let's analyze the relationships:\n\n1. A is B's father, so A is one generation above B.\n2. B is C's father, so B is one generation above C.\n3. C is D's son, so C and D are in the same generation.\n4. D is E's father, so D is one generation above E.\n5. E is X's son, so E and X are in the same generation.\n6. X is Y's father, so X is one generation above Y.\n7. Y is Z's father, so Y is one generation above Z.\n\nNow, let's count the generations between A and Z:\n\n1. A is one generation above B.\n2. B is one generation above C.\n3. C is in the same generation as D.\n4. D is one generation above E.\n5. E is in the same generation as X.\n6. X is one generation above Y.\n7. Y is one generation above Z.\n\nSo, A is four generations above Z.\n\nIn terms of familial relationship, A is the great-great-grandfather of Z."]}], "tstamp": 1686287092.1801407}
+{"question_id": 108, "answer_id": "PfLkefsGQrCz3DumAL3q5N", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["Car does not belong with the others, as tyre, steering wheel, and engine are all parts of a car, while car is the whole vehicle.", "Sure, I can replace \"car\" with \"brake pedal.\" Now the list includes tyre, steering wheel, engine, and brake pedal, which are all parts of a car."]}], "tstamp": 1686287101.624609}
+{"question_id": 109, "answer_id": "MAAyYhUQHucgwafVCwBS3m", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["The shadow of the pole fell exactly to Suresh's right. Since the sun rises in the east, it means that Suresh was facing north. Therefore, the shadow of the pole was pointing towards the west. Here's the reasoning:\n\n1. Sun rises in the east.\n2. Shadows are cast in the opposite direction of the sunlight.\n3. Suresh was facing the pole, and the shadow was to his right.\n4. Since the sun was in the east, Suresh must have been facing north for the shadow to be on his right side.\n5. As a result, the shadow was pointing towards the west.", "Suresh was facing north. Here's how we solve this:\n\n1. Sun rises in the east.\n2. Shadows are cast in the opposite direction of the sunlight.\n3. Suresh was facing the pole, and the shadow was to his right.\n4. Since the sun was in the east, Suresh must have been facing north for the shadow to be on his right side.\n\nSo, Suresh was facing the north direction."]}], "tstamp": 1686287131.4559815}
+{"question_id": 110, "answer_id": "KrP6js74X9TLbcaiRHUMmw", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["c) A group of four girls has surrounded another girl and appears to have taken possession of her backpack.", "To determine if the girls are telling the truth or covering up bullying, the aides should look for the following evidence:\n\n1. The victim's body language and facial expressions: If the girl appears upset, scared, or uncomfortable, it may indicate that she is being bullied.\n\n2. The tone of the conversation: If the girls are speaking in a mocking, sarcastic, or aggressive tone, it could be a sign of bullying.\n\n3. The history of the individuals involved: If any of the girls have a history of bullying or if the victim has been targeted before, it may be more likely that bullying is occurring.\n\n4. The reactions of other students nearby: If other students appear to be uncomfortable or are trying to avoid the situation, it could be an indication that something is wrong.\n\n5. The nature of the \"game\": If the game involves teasing, humiliation, or exclusion, it may be a cover-up for bullying.\n\n6. The willingness of the girls to return the backpack: If they are reluctant to give the backpack back or continue to taunt the victim, it could be a sign of bullying.\n\n7. The response of the group when confronted: If the girls become defensive, dismissive, or aggressive when questioned, it may indicate that they are trying to hide their actions."]}], "tstamp": 1686287166.1482828}
+{"question_id": 111, "answer_id": "NJPTPe2PZ7547UMekB9kBM", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["To find the area of the triangle, we can use the formula for the area of a triangle with coordinates A(x1, y1), B(x2, y2), and C(x3, y3):\n\nArea = (1/2) * |(x1(y2 - y3) + x2(y3 - y1) + x3(y1 - y2))|\n\nIn this case, the coordinates are A(0, 0), B(-1, 1), and C(3, 3). Plugging these values into the formula, we get:\n\nArea = (1/2) * |(0(1 - 3) + (-1)(3 - 0) + 3(0 - 1))|\n\nArea = (1/2) * |(-0 + 3 - 3)|\n\nArea = (1/2) * |0|\n\nArea = 0\n\nThe area of the triangle is 0. This means that the three points are collinear, and there is no triangle formed by these points.", "Since the three points are collinear and do not form a triangle, there is no circumscribed circle for this set of points. A circumscribed circle can only be formed around a triangle with non-collinear points."]}], "tstamp": 1686287200.2472498}
+{"question_id": 112, "answer_id": "G5y5Wt7tzmiDnn3Ncyn2ZB", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["In the first year, the startup invested $8000. In the second year, they invested half of that amount, which is $8000 / 2 = $4000. The total amount invested in software development over the two years is $8000 + $4000 = $12000.", "In the third year, the startup will invest half of the second year's amount, which is $4000 / 2 = $2000."]}], "tstamp": 1686287230.6318364}
+{"question_id": 113, "answer_id": "Cr2dJDVXKARY4a3CdeYV85", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["To find the probability that a student likes neither blue nor green, we can use the formula for the probability of the union of two events:\n\nP(A \u222a B) = P(A) + P(B) - P(A \u2229 B)\n\nIn this case, A represents the event of liking blue, B represents the event of liking green, and A \u2229 B represents the event of liking both colors.\n\nP(A) = 58% (probability of liking blue)\nP(B) = 45% (probability of liking green)\nP(A \u2229 B) = 22% (probability of liking both colors)\n\nNow, we can plug these values into the formula:\n\nP(A \u222a B) = 58% + 45% - 22% = 81%\n\nThis gives us the probability that a student likes either blue or green. To find the probability that a student likes neither color, we can subtract this value from 100%:\n\nP(neither blue nor green) = 100% - 81% = 19%\n\nSo, the probability that a randomly picked student would like neither blue nor green is 19%.", "If we select a student who likes green, we want to find the probability that they dislike blue. We know that 22% of students like both colors (blue and green). Since 45% of students like green, we can calculate the probability that a student who likes green dislikes blue as follows:\n\nP(dislikes blue | likes green) = (P(likes green) - P(likes both)) / P(likes green)\n\nP(dislikes blue | likes green) = (45% - 22%) / 45% = 23% / 45% \u2248 0.5111\n\nSo, the probability that a student who likes green would dislike blue is approximately 51.11%."]}], "tstamp": 1686287283.037716}
+{"question_id": 114, "answer_id": "8o3RVi7vGbhRGuN38m7eXb", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["When rolling two dice, there are a total of 6 sides on each die, which means there are 6 x 6 = 36 possible outcomes. To find the probability of rolling a total number that is at least 3, we need to find the number of outcomes that result in a sum of 3 or more and divide that by the total number of outcomes.\n\nThe only sums that are less than 3 are 1 (rolling a 1 on both dice) and 2 (rolling a 1 on one die and a 2 on the other, or vice versa). There are 2 outcomes that result in a sum of 2: (1,1) and (1,2), (2,1).\n\nSo, there are 36 total outcomes and 2 outcomes that result in a sum less than 3. Therefore, there are 36 - 2 = 34 outcomes that result in a sum of 3 or more.\n\nThe probability of rolling a total number that is at least 3 is the number of favorable outcomes (34) divided by the total number of outcomes (36):\n\nProbability = 34/36 = 17/18 \u2248 0.944\n\nSo, the probability of rolling a total number that is at least 3 is approximately 0.944 or 94.4%.", "We already know that the probability of rolling a total number that is at least 3 is 34/36. Now, we need to find the probability of rolling an even number and then combine these probabilities using the principle of inclusion and exclusion.\n\nAn even number can be obtained by rolling (1,1), (1,3), (1,5), (2,2), (2,4), (2,6), (3,1), (3,3), (3,5), (4,2), (4,4), (4,6), (5,1), (5,3), (5,5), (6,2), (6,4), and (6,6). There are 18 outcomes that result in an even sum.\n\nThe probability of rolling an even number is the number of favorable outcomes (18) divided by the total number of outcomes (36):\n\nProbability (even) = 18/36 = 1/2 = 0.5\n\nNow, we need to find the probability of rolling a number that is both even and at least 3. The outcomes that meet both criteria are (2,2), (2,4), (2,6), (4,2), (4,4), (4,6), (6,2), (6,4), and (6,6). There are 9 such outcomes.\n\nThe probability of rolling a number that is both even and at least 3 is the number of favorable outcomes (9) divided by the total number of outcomes (36):\n\nProbability (even and at least 3) = 9/36 = 1/4 = 0.25\n\nNow, we can use the principle of inclusion and exclusion to find the probability of rolling a number that is either even or at least 3:\n\nProbability (even or at least 3) = Probability (even) + Probability (at least 3) - Probability (even and at least 3)\n\nProbability (even or at least 3) = 0.5 + 0.944 - 0.25 = 1.194 - 0.25 = 0.944\n\nSo, the probability of rolling a number that is either even or at least 3 is 0.944 or 94.4%."]}], "tstamp": 1686287374.281228}
+{"question_id": 115, "answer_id": "NvS4RHPbnRvJr4jE66ALAg", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["Let's denote the number of people who got on the bus at the terminal as x.\n\nAt the first bus stop, half of the people got down, so there were x/2 people left on the bus. Then, 4 more people got in, making the total number of people on the bus x/2 + 4.\n\nAt the second bus stop, 6 people got down, so there were (x/2 + 4) - 6 people left on the bus. Then, 8 more people got in, making the total number of people on the bus (x/2 + 4) - 6 + 8.\n\nWe know that there were 25 people heading to the third stop, so we can set up the following equation:\n\n(x/2 + 4) - 6 + 8 = 25\n\nNow, let's solve for x:\n\nx/2 + 4 - 6 + 8 = 25\nx/2 + 6 = 25\nx/2 = 19\nx = 38\n\nSo, 38 people got on the bus at the terminal.", "To find the total money earned by the bus, we need to consider the number of people who got on the bus at the terminal and at each bus stop.\n\nAt the terminal, 38 people got on the bus.\nAt the first bus stop, 4 people got on the bus.\nAt the second bus stop, 8 people got on the bus.\n\nSo, the total number of people who got on the bus is 38 + 4 + 8 = 50 people.\n\nSince the ticket price is $2 per person, the total money earned by the bus is 50 people * $2 = $100."]}], "tstamp": 1686287429.616392}
+{"question_id": 116, "answer_id": "VNzSjZeTByKA4jwfeLi5c6", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["We have two equations:\n\n1) x + y = 4z\n2) xy = 4z^2\n\nFirst, let's solve equation 1 for x:\n\nx = 4z - y\n\nNow, substitute this expression for x in equation 2:\n\n(4z - y)y = 4z^2\n\nExpand the equation:\n\n4zy - y^2 = 4z^2\n\nNow, let's solve this equation for y:\n\ny^2 - 4zy + 4z^2 = 0\n\nThis is a quadratic equation in y. We can use the quadratic formula to solve for y:\n\ny = (4z \u00b1 \u221a((4z)^2 - 4(4z^2))) / 2\n\ny = (4z \u00b1 \u221a(16z^2 - 16z^2)) / 2\n\ny = (4z \u00b1 0) / 2\n\ny = 2z\n\nNow that we have y in terms of z, we can substitute it back into the expression for x:\n\nx = 4z - y\nx = 4z - 2z\nx = 2z\n\nFinally, we can express x - y in terms of z:\n\nx - y = 2z - 2z = 0", "We have the following equations:\n\n1) x + y = 4z\n2) xy = 4z^2\n\nFrom the previous solution, we found that:\n\nx = 2z\ny = 2z\n\nNow, let's express z - x in terms of y. Since x = 2z, we can write z - x as:\n\nz - x = z - 2z = -z\n\nNow, we can use the equation y = 2z to express -z in terms of y:\n\n- z = - (y / 2)\n\nSo, z - x = - (y / 2)."]}], "tstamp": 1686287481.5740478}
+{"question_id": 117, "answer_id": "WTLAqXaca6gT8T4Mq46vuZ", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["To solve the inequality |x + 5| < 10, we can break it down into two separate inequalities:\n\n1) x + 5 < 10\n2) -(x + 5) < 10\n\nNow, let's solve each inequality:\n\n1) x + 5 < 10\nSubtract 5 from both sides:\nx < 5\n\n2) -(x + 5) < 10\nDistribute the negative sign:\n-x - 5 < 10\nAdd 5 to both sides:\n-x < 15\nMultiply both sides by -1 (and remember to flip the inequality sign):\nx > -15\n\nNow, we can combine the two inequalities:\n-15 < x < 5\n\nTo find the number of integers in this range, we can count them:\n-14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4\n\nThere are 19 integers in the solution of the inequality |x + 5| < 10.", "To solve the inequality |x + 10| < 5, we can break it down into two separate inequalities:\n\n1) x + 10 < 5\n2) -(x + 10) < 5\n\nNow, let's solve each inequality:\n\n1) x + 10 < 5\nSubtract 10 from both sides:\nx < -5\n\n2) -(x + 10) < 5\nDistribute the negative sign:\n-x - 10 < 5\nMultiply both sides by -1 (and remember to flip the inequality sign):\nx + 10 > -5\nSubtract 10 from both sides:\nx > -15\n\nNow, we can combine the two inequalities:\n-15 < x < -5\n\nTo find the number of integers in this range, we can count them:\n-14, -13, -12, -11, -10, -9, -8, -7, -6\n\nThere are 9 integers in the solution of the inequality |x + 10| < 5."]}], "tstamp": 1686287605.8658118}
+{"question_id": 118, "answer_id": "NG6DyAsfpXKz2ZZ7nfQ6LD", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["Let the number be x. We are given that x % 10 = 4. We need to find the remainder when 2x is divided by 4, i.e., (2x) % 4.\n\nSince x % 10 = 4, we can write x as x = 10n + 4 for some integer n.\n\nNow, let's find 2x:\n2x = 2(10n + 4) = 20n + 8\n\nNow, let's find the remainder when 2x is divided by 4:\n(20n + 8) % 4 = (20n % 4) + (8 % 4) = 0 + 0 = 0\n\nSo, the remainder when twice the number is divided by 4 is 0.", "We already found that 2x = 20n + 8. Now, we need to find the remainder when 2x is divided by 5, i.e., (2x) % 5.\n\n(20n + 8) % 5 = (20n % 5) + (8 % 5) = 0 + 3 = 3\n\nSo, the remainder when twice the number is divided by 5 is 3."]}], "tstamp": 1686287640.771955}
+{"question_id": 119, "answer_id": "5yMS3djYFTH2QwiNPLW3Yy", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["To find the total cost of Benjamin's purchases, we need to calculate the cost of each type of book and then add them together.\n\nFor the sci-fi novels: 5 copies * $20 each = $100\nFor the history books: 3 copies * $30 each = $90\nFor the philosophy books: 2 copies * $45 each = $90\n\nNow, we add the costs together: $100 (sci-fi) + $90 (history) + $90 (philosophy) = $280\n\nThe total cost of Benjamin's purchases was $280.", "To find the total revenue, we first need to calculate the selling price of each type of book with a 25% markup and then multiply it by the number of copies sold.\n\nFor the sci-fi novels: $20 * 1.25 (25% markup) = $25 each\nFor the history books: $30 * 1.25 (25% markup) = $37.50 each\nFor the philosophy books: $45 * 1.25 (25% markup) = $56.25 each\n\nNow, we calculate the revenue for each type of book:\nSci-fi novels: 5 copies * $25 each = $125\nHistory books: 3 copies * $37.50 each = $112.50\nPhilosophy books: 2 copies * $56.25 each = $112.50\n\nFinally, we add the revenues together: $125 (sci-fi) + $112.50 (history) + $112.50 (philosophy) = $350\n\nBenjamin's total revenue if he sold all the books at a 25% markup would be $350."]}], "tstamp": 1686287683.9092937}
+{"question_id": 120, "answer_id": "NbJahXyyirJ8KKfaMXY6SC", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["To find the value of f(2), substitute x = 2 into the function f(x) = 4x^3 - 9x - 14.\n\nf(2) = 4(2)^3 - 9(2) - 14\nf(2) = 4(8) - 18 - 14\nf(2) = 32 - 18 - 14\nf(2) = 14 - 14\nf(2) = 0\n\nSo, the value of f(2) is 0.", "To find the value of x such that f(x) = 0, we need to solve the equation 4x^3 - 9x - 14 = 0. This is a cubic equation, and finding an exact solution can be quite difficult. However, we can use numerical methods to approximate the solution.\n\nOne such method is the Newton-Raphson method, which iteratively refines an initial guess for the root. The formula for the Newton-Raphson method is:\n\nx_new = x_old - f(x_old) / f'(x_old)\n\nwhere f'(x) is the derivative of f(x) with respect to x. First, let's find the derivative of f(x):\n\nf(x) = 4x^3 - 9x - 14\nf'(x) = 12x^2 - 9\n\nNow, let's apply the Newton-Raphson method with an initial guess of x = 1:\n\n1. x_new = 1 - (4(1)^3 - 9(1) - 14) / (12(1)^2 - 9)\n x_new = 1 - (-9) / 3\n x_new = 1 + 3\n x_new = 4\n\n2. x_new = 4 - (4(4)^3 - 9(4) - 14) / (12(4)^2 - 9)\n x_new = 4 - (256 - 36 - 14) / (192 - 9)\n x_new = 4 - (206) / (183)\n x_new \u2248 2.876\n\n3. x_new \u2248 2.876 - (4(2.876)^3 - 9(2.876) - 14) / (12(2.876)^2 - 9)\n x_new \u2248 2.876 - (99.98) / (99.99)\n x_new \u2248 1.876\n\nWe can continue this process until the change in x_new is very small, indicating that we have found an approximate root. After several iterations, we find that:\n\nx \u2248 1.65\n\nSo, the approximate value of x such that f(x) = 0 is x \u2248 1.65. Note that this is an approximation, and there may be other roots as well."]}], "tstamp": 1686287765.623619}
+{"question_id": 121, "answer_id": "nrgta2MB8aLEabUpEBuqbD", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["Here's a Python program that reads all the text files under a directory and returns the top-5 words with the most number of occurrences:\n\n```python\nimport os\nimport re\nfrom collections import Counter\n\ndef get_files_in_directory(directory, extension=\".txt\"):\n return [os.path.join(directory, f) for f in os.listdir(directory) if f.endswith(extension)]\n\ndef read_file(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\n return file.read()\n\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\n\ndef main():\n directory = input(\"Enter the directory path: \")\n text_files = get_files_in_directory(directory)\n\n word_counter = Counter()\n for file_path in text_files:\n text = read_file(file_path)\n word_counter += count_words(text)\n\n top_5_words = word_counter.most_common(5)\n\n print(\"\\nTop 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\n\nif __name__ == \"__main__\":\n main()\n```\n\nTo use the program, simply run it and enter the directory path when prompted. The program will then display the top-5 words with the most number of occurrences in the text files under the specified directory.", "Yes, you can parallelize the program using Python's `concurrent.futures` module. Here's the updated version of the program that processes the text files in parallel:\n\n```python\nimport os\nimport re\nfrom collections import Counter\nfrom concurrent.futures import ProcessPoolExecutor\n\ndef get_files_in_directory(directory, extension=\".txt\"):\n return [os.path.join(directory, f) for f in os.listdir(directory) if f.endswith(extension)]\n\ndef read_file(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\n return file.read()\n\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\n\ndef process_file(file_path):\n text = read_file(file_path)\n return count_words(text)\n\ndef main():\n directory = input(\"Enter the directory path: \")\n text_files = get_files_in_directory(directory)\n\n word_counter = Counter()\n\n with ProcessPoolExecutor() as executor:\n results = executor.map(process_file, text_files)\n\n for result in results:\n word_counter += result\n\n top_5_words = word_counter.most_common(5)\n\n print(\"\\nTop 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\n\nif __name__ == \"__main__\":\n main()\n```\n\nIn this version of the program, we use a `ProcessPoolExecutor` to process the text files in parallel. The `process_file` function reads and counts the words in a single file, and the results are combined in the main function using the `+=` operator on the `Counter` objects."]}], "tstamp": 1686287847.5641448}
+{"question_id": 122, "answer_id": "k3DDMNG9SFBxdzBr6g3VRF", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["Here's a C++ program to find the nth Fibonacci number using recursion:\n\n```cpp\n#include \n\n// Function to find the nth Fibonacci number using recursion\nint fibonacci(int n) {\n if (n <= 1) {\n return n;\n } else {\n return fibonacci(n - 1) + fibonacci(n - 2);\n }\n}\n\nint main() {\n int n;\n\n std::cout << \"Enter the value of n: \";\n std::cin >> n;\n\n int result = fibonacci(n);\n std::cout << \"The \" << n << \"th Fibonacci number is: \" << result << std::endl;\n\n return 0;\n}\n```\n\nTo compile and run the program, save it to a file named `fibonacci_recursion.cpp` and then use the following commands:\n\n```sh\ng++ -o fibonacci_recursion fibonacci_recursion.cpp\n./fibonacci_recursion\n```\n\nEnter the value of `n` when prompted, and the program will output the nth Fibonacci number. Note that this recursive approach can be slow for large values of `n` due to the repeated calculations. A more efficient approach would be to use dynamic programming or memoization.", "Here's a C++ program to find the nth number in the sequence where each number is the sum of the three preceding ones, with the first three numbers being 0, -1, and -1:\n\n```cpp\n#include \n#include \n\n// Function to find the nth number in the sequence\nint find_nth_number(int n) {\n std::vector sequence = {0, -1, -1};\n\n for (int i = 3; i < n; ++i) {\n sequence.push_back(sequence[i - 1] + sequence[i - 2] + sequence[i - 3]);\n }\n\n return sequence[n - 1];\n}\n\nint main() {\n int n;\n\n std::cout << \"Enter the value of n: \";\n std::cin >> n;\n\n int result = find_nth_number(n);\n std::cout << \"The \" << n << \"th number in the sequence is: \" << result << std::endl;\n\n return 0;\n}\n```\n\nTo compile and run the program, save it to a file named `sequence.cpp` and then use the following commands:\n\n```sh\ng++ -o sequence sequence.cpp\n./sequence\n```\n\nEnter the value of `n` when prompted, and the program will output the nth number in the sequence. This approach uses a loop and a vector to store the sequence, making it more efficient than a recursive solution."]}], "tstamp": 1686287917.543315}
+{"question_id": 123, "answer_id": "VAG9qBBc46KaftN4xTmcQJ", "model_id": "gpt-4", "choices": [{"index": 0, "turns": ["\n\n\n \n \n Random Joke Generator\n \n \n\n\n
Random Joke Generator
\n \n \n\n", "You can use CSS to change the color of the jokes to red by adding a style rule for the paragraph element where the joke is displayed. In the `\n \n\n\n