Soptq commited on
Commit
03a6065
·
verified ·
1 Parent(s): a0907c0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +48 -0
  2. 20250514-en/dataset_info.json +1 -0
  3. 20250514-en/test.parquet +3 -0
  4. 20250514-zh/dataset_info.json +1 -0
  5. 20250514-zh/test.parquet +3 -0
  6. evaluations/README.md +5 -0
  7. evaluations/scripts/chatgpt.sh +28 -0
  8. evaluations/scripts/internvl3.sh +34 -0
  9. evaluations/tasks/sfe/__pycache__/utils.cpython-312.pyc +0 -0
  10. evaluations/tasks/sfe/sfe-en.yaml +52 -0
  11. evaluations/tasks/sfe/sfe-zh.yaml +52 -0
  12. evaluations/tasks/sfe/utils.py +633 -0
  13. raw_data/earth/2023.zip +3 -0
  14. raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_199001-199912.nc +3 -0
  15. raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_200001-200912.nc +3 -0
  16. raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_201001-201412.nc +3 -0
  17. raw_data/earth/CMIP6/thetao_Omon_CAS-ESM2-0_historical_r1i1p1f1_gn_195001-201412.nc +3 -0
  18. raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_198001-198912.nc +3 -0
  19. raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_199001-199912.nc +3 -0
  20. raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_200001-200912.nc +3 -0
  21. raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_201001-201412.nc +3 -0
  22. raw_data/earth/ERA5/t2m.nc +3 -0
  23. raw_data/earth/GODAS/1981.nc +3 -0
  24. raw_data/earth/GODAS/1982.nc +3 -0
  25. raw_data/earth/GODAS/1983.nc +3 -0
  26. raw_data/earth/GODAS/1984.nc +3 -0
  27. raw_data/earth/GODAS/1985.nc +3 -0
  28. raw_data/earth/GODAS/1986.nc +3 -0
  29. raw_data/earth/GODAS/1987.nc +3 -0
  30. raw_data/earth/GODAS/1988.nc +3 -0
  31. raw_data/earth/GODAS/1989.nc +3 -0
  32. raw_data/earth/GODAS/1991.nc +3 -0
  33. raw_data/earth/GODAS/1992.nc +3 -0
  34. raw_data/earth/GODAS/1993.nc +3 -0
  35. raw_data/earth/GODAS/1994.nc +3 -0
  36. raw_data/earth/GODAS/1995.nc +3 -0
  37. raw_data/earth/GODAS/1996.nc +3 -0
  38. raw_data/earth/GODAS/1997.nc +3 -0
  39. raw_data/earth/GODAS/1998.nc +3 -0
  40. raw_data/earth/GODAS/1999.nc +3 -0
  41. raw_data/earth/GODAS/2000.nc +3 -0
  42. raw_data/earth/GODAS/2001.nc +3 -0
  43. raw_data/earth/GODAS/2002.nc +3 -0
  44. raw_data/earth/GODAS/2003.nc +3 -0
  45. raw_data/earth/GODAS/2004.nc +3 -0
  46. raw_data/earth/GODAS/2005.nc +3 -0
  47. raw_data/earth/GODAS/2006.nc +3 -0
  48. raw_data/earth/GODAS/2007.nc +3 -0
  49. raw_data/earth/GODAS/2008.nc +3 -0
  50. raw_data/earth/GODAS/2009.nc +3 -0
.gitattributes CHANGED
@@ -57,3 +57,51 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ raw_data/earth/GODAS/2020.nc filter=lfs diff=lfs merge=lfs -text
61
+ raw_data/earth/GODAS/2014.nc filter=lfs diff=lfs merge=lfs -text
62
+ raw_data/earth/GODAS/1995.nc filter=lfs diff=lfs merge=lfs -text
63
+ raw_data/earth/GODAS/1989.nc filter=lfs diff=lfs merge=lfs -text
64
+ raw_data/earth/GODAS/2009.nc filter=lfs diff=lfs merge=lfs -text
65
+ raw_data/earth/GODAS/2011.nc filter=lfs diff=lfs merge=lfs -text
66
+ raw_data/earth/GODAS/1992.nc filter=lfs diff=lfs merge=lfs -text
67
+ raw_data/earth/GODAS/1986.nc filter=lfs diff=lfs merge=lfs -text
68
+ raw_data/earth/GODAS/2006.nc filter=lfs diff=lfs merge=lfs -text
69
+ raw_data/earth/GODAS/1983.nc filter=lfs diff=lfs merge=lfs -text
70
+ raw_data/earth/GODAS/2003.nc filter=lfs diff=lfs merge=lfs -text
71
+ raw_data/earth/GODAS/2000.nc filter=lfs diff=lfs merge=lfs -text
72
+ raw_data/earth/GODAS/2019.nc filter=lfs diff=lfs merge=lfs -text
73
+ raw_data/earth/GODAS/2016.nc filter=lfs diff=lfs merge=lfs -text
74
+ raw_data/earth/GODAS/1997.nc filter=lfs diff=lfs merge=lfs -text
75
+ raw_data/earth/GODAS/2013.nc filter=lfs diff=lfs merge=lfs -text
76
+ raw_data/earth/GODAS/1994.nc filter=lfs diff=lfs merge=lfs -text
77
+ raw_data/earth/GODAS/1988.nc filter=lfs diff=lfs merge=lfs -text
78
+ raw_data/earth/GODAS/2008.nc filter=lfs diff=lfs merge=lfs -text
79
+ raw_data/earth/GODAS/2010.nc filter=lfs diff=lfs merge=lfs -text
80
+ raw_data/earth/GODAS/1991.nc filter=lfs diff=lfs merge=lfs -text
81
+ raw_data/earth/GODAS/1985.nc filter=lfs diff=lfs merge=lfs -text
82
+ raw_data/earth/GODAS/2005.nc filter=lfs diff=lfs merge=lfs -text
83
+ raw_data/earth/GODAS/1982.nc filter=lfs diff=lfs merge=lfs -text
84
+ raw_data/earth/GODAS/2002.nc filter=lfs diff=lfs merge=lfs -text
85
+ raw_data/earth/GODAS/2018.nc filter=lfs diff=lfs merge=lfs -text
86
+ raw_data/earth/GODAS/1999.nc filter=lfs diff=lfs merge=lfs -text
87
+ raw_data/earth/GODAS/2015.nc filter=lfs diff=lfs merge=lfs -text
88
+ raw_data/earth/GODAS/1996.nc filter=lfs diff=lfs merge=lfs -text
89
+ raw_data/earth/GODAS/2012.nc filter=lfs diff=lfs merge=lfs -text
90
+ raw_data/earth/GODAS/1993.nc filter=lfs diff=lfs merge=lfs -text
91
+ raw_data/earth/GODAS/1987.nc filter=lfs diff=lfs merge=lfs -text
92
+ raw_data/earth/GODAS/2007.nc filter=lfs diff=lfs merge=lfs -text
93
+ raw_data/earth/GODAS/1984.nc filter=lfs diff=lfs merge=lfs -text
94
+ raw_data/earth/GODAS/2004.nc filter=lfs diff=lfs merge=lfs -text
95
+ raw_data/earth/GODAS/1981.nc filter=lfs diff=lfs merge=lfs -text
96
+ raw_data/earth/GODAS/2001.nc filter=lfs diff=lfs merge=lfs -text
97
+ raw_data/earth/GODAS/2017.nc filter=lfs diff=lfs merge=lfs -text
98
+ raw_data/earth/GODAS/1998.nc filter=lfs diff=lfs merge=lfs -text
99
+ raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_200001-200912.nc filter=lfs diff=lfs merge=lfs -text
100
+ raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_201001-201412.nc filter=lfs diff=lfs merge=lfs -text
101
+ raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_199001-199912.nc filter=lfs diff=lfs merge=lfs -text
102
+ raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_198001-198912.nc filter=lfs diff=lfs merge=lfs -text
103
+ raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_201001-201412.nc filter=lfs diff=lfs merge=lfs -text
104
+ raw_data/earth/CMIP6/thetao_Omon_CAS-ESM2-0_historical_r1i1p1f1_gn_195001-201412.nc filter=lfs diff=lfs merge=lfs -text
105
+ raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_199001-199912.nc filter=lfs diff=lfs merge=lfs -text
106
+ raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_200001-200912.nc filter=lfs diff=lfs merge=lfs -text
107
+ raw_data/earth/ERA5/t2m.nc filter=lfs diff=lfs merge=lfs -text
20250514-en/dataset_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test"]}
20250514-en/test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dfe8b14147debfc64c3d1e00f09671032defd99b21e3293e34a2dfeb2c6ae13
3
+ size 133444
20250514-zh/dataset_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["test"]}
20250514-zh/test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:464f730e2b71cbc83f8c0713394de38bb2eaeba83d2df8ea871b74b15c5926ec
3
+ size 133261
evaluations/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Evaluations of SFE
2
+
3
+ We evaluate the SFE dataset using lmms-eval. The evaluation codes are listed in this folder.
4
+
5
+ We only provide the evaluation scripts for GPT series and InternVL series. Evaluation scripts for other models follow the same format and can be easily adapted.
evaluations/scripts/chatgpt.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export HF_HOME=
2
+ export HF_TOKEN=
3
+ export OPENAI_API_KEY=
4
+ export OPENAI_API_BASE=
5
+
6
+ export API_TYPE="openai"
7
+ export MODEL_VERSION="gpt-4o-2024-11-20"
8
+
9
+ # export AZURE_OPENAI_API_KEY=""
10
+ # export AZURE_OPENAI_API_BASE=""
11
+ # export AZURE_OPENAI_API_VERSION="2023-07-01-preview"
12
+
13
+ # pip install git+https://github.com/EvolvingLMMs-Lab/lmms-eval.git
14
+
15
+ # GPT Series
16
+ export FILE_NAME="lmms_eval_gpt4o_en.json"
17
+ python3 -m lmms_eval \
18
+ --model openai_compatible \
19
+ --model_args model_version=gpt-4o-2024-11-20,azure_openai=False \
20
+ --tasks sfe-en \
21
+ --batch_size 1
22
+
23
+ export FILE_NAME="lmms_eval_gpt4o_zh.json"
24
+ python3 -m lmms_eval \
25
+ --model openai_compatible \
26
+ --model_args model_version=gpt-4o-2024-11-20,azure_openai=False \
27
+ --tasks sfe-zh \
28
+ --batch_size 1
evaluations/scripts/internvl3.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export HF_HOME=
2
+ export HF_TOKEN=
3
+ export OPENAI_API_KEY=
4
+ export OPENAI_API_BASE=
5
+
6
+ export API_TYPE="openai"
7
+ export MODEL_VERSION="gpt-4o-2024-11-20"
8
+
9
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
10
+
11
+ export VLLM_WORKER_MULTIPROC_METHOD=spawn
12
+ # export NCCL_BLOCKING_WAIT=1
13
+ # export NCCL_TIMEOUT=18000000
14
+ # export NCCL_DEBUG=DEBUG
15
+
16
+ # pip install git+https://github.com/EvolvingLMMs-Lab/lmms-eval.git
17
+ export FILE_NAME="lmms_eval_internvl3-78b_en.json"
18
+ python3 -m lmms_eval \
19
+ --model vllm \
20
+ --model_args model_version=<MODEL_PATH>,tensor_parallel_size=8 \
21
+ --tasks sfe-en \
22
+ --batch_size 1 \
23
+ --log_samples \
24
+ --log_samples_suffix vllm
25
+
26
+ export FILE_NAME="lmms_eval_internvl3-78b_zh.json"
27
+ python3 -m lmms_eval \
28
+ --model vllm \
29
+ --model_args model_version=<MODEL_PATH>,tensor_parallel_size=8 \
30
+ --tasks sfe-en \
31
+ --batch_size 1 \
32
+ --log_samples \
33
+ --log_samples_suffix vllm
34
+
evaluations/tasks/sfe/__pycache__/utils.cpython-312.pyc ADDED
Binary file (34.4 kB). View file
 
evaluations/tasks/sfe/sfe-en.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: DATASET_PATH_EN
2
+ task: "sfe-en"
3
+ test_split: test
4
+ output_type: generate_until
5
+ doc_to_visual: !function utils.sfe_doc_to_visual
6
+ doc_to_text: !function utils.sfe_doc_to_text
7
+ doc_to_target: "answer"
8
+ process_results: !function utils.sfe_process_results
9
+
10
+ generation_kwargs:
11
+ max_new_tokens: 1024
12
+
13
+ metric_list:
14
+ - metric: all_info
15
+ aggregation: !function utils.sfe_save_results
16
+ higher_is_better: true
17
+ - metric: rouge_score
18
+ aggregation: !function utils.sfe_aggregate_rouge_results
19
+ higher_is_better: true
20
+ - metric: bert_score
21
+ aggregation: !function utils.sfe_aggregate_bertscore_results
22
+ higher_is_better: true
23
+ - metric: bleu_score
24
+ aggregation: !function utils.sfe_aggregate_bleuscore_results
25
+ higher_is_better: true
26
+ - metric: meteor_score
27
+ aggregation: !function utils.sfe_aggregate_meteor_score_results
28
+ higher_is_better: true
29
+ - metric: llm_score
30
+ aggregation: !function utils.sfe_aggregate_judge_results
31
+ higher_is_better: true
32
+ - metric: execute_succ_rate
33
+ aggregation: !function utils.sfe_aggregate_execute_succ_rate_results
34
+ higher_is_better: true
35
+ - metric: iou_score
36
+ aggregation: !function utils.sfe_aggregate_iou_score_results
37
+ higher_is_better: true
38
+ - metric: [email protected]
39
+ aggregation: !function utils.sfe_aggregate_acc01_results
40
+ higher_is_better: true
41
+ - metric: [email protected]
42
+ aggregation: !function utils.sfe_aggregate_acc03_results
43
+ higher_is_better: true
44
+ - metric: [email protected]
45
+ aggregation: !function utils.sfe_aggregate_acc05_results
46
+ higher_is_better: true
47
+ - metric: [email protected]
48
+ aggregation: !function utils.sfe_aggregate_acc07_results
49
+ higher_is_better: true
50
+ - metric: [email protected]
51
+ aggregation: !function utils.sfe_aggregate_acc09_results
52
+ higher_is_better: true
evaluations/tasks/sfe/sfe-zh.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: DATASET_PATH_ZH
2
+ task: "sfe-zh"
3
+ test_split: test
4
+ output_type: generate_until
5
+ doc_to_visual: !function utils.sfe_doc_to_visual
6
+ doc_to_text: !function utils.sfe_doc_to_text
7
+ doc_to_target: "answer"
8
+ process_results: !function utils.sfe_process_results
9
+
10
+ generation_kwargs:
11
+ max_new_tokens: 1024
12
+
13
+ metric_list:
14
+ - metric: all_info
15
+ aggregation: !function utils.sfe_save_results
16
+ higher_is_better: true
17
+ - metric: rouge_score
18
+ aggregation: !function utils.sfe_aggregate_rouge_results
19
+ higher_is_better: true
20
+ - metric: bert_score
21
+ aggregation: !function utils.sfe_aggregate_bertscore_results
22
+ higher_is_better: true
23
+ - metric: bleu_score
24
+ aggregation: !function utils.sfe_aggregate_bleuscore_results
25
+ higher_is_better: true
26
+ - metric: meteor_score
27
+ aggregation: !function utils.sfe_aggregate_meteor_score_results
28
+ higher_is_better: true
29
+ - metric: llm_score
30
+ aggregation: !function utils.sfe_aggregate_judge_results
31
+ higher_is_better: true
32
+ - metric: execute_succ_rate
33
+ aggregation: !function utils.sfe_aggregate_execute_succ_rate_results
34
+ higher_is_better: true
35
+ - metric: iou_score
36
+ aggregation: !function utils.sfe_aggregate_iou_score_results
37
+ higher_is_better: true
38
+ - metric: [email protected]
39
+ aggregation: !function utils.sfe_aggregate_acc01_results
40
+ higher_is_better: true
41
+ - metric: [email protected]
42
+ aggregation: !function utils.sfe_aggregate_acc03_results
43
+ higher_is_better: true
44
+ - metric: [email protected]
45
+ aggregation: !function utils.sfe_aggregate_acc05_results
46
+ higher_is_better: true
47
+ - metric: [email protected]
48
+ aggregation: !function utils.sfe_aggregate_acc07_results
49
+ higher_is_better: true
50
+ - metric: [email protected]
51
+ aggregation: !function utils.sfe_aggregate_acc09_results
52
+ higher_is_better: true
evaluations/tasks/sfe/utils.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+ import os
4
+ import random
5
+ import re
6
+ import time
7
+ from collections import defaultdict
8
+ from pathlib import Path
9
+ import copy
10
+ import math
11
+
12
+ from PIL import Image
13
+
14
+ import numpy as np
15
+ import requests
16
+ import yaml
17
+ from loguru import logger as eval_logger
18
+ from openai import AzureOpenAI, OpenAI
19
+
20
+ from rouge_score import rouge_scorer
21
+ from bert_score import score
22
+ import pymeteor.pymeteor as pymeteor
23
+
24
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
25
+ from nltk.translate.meteor_score import meteor_score
26
+
27
+ from lmms_eval.tasks._task_utils.file_utils import generate_submission_file
28
+
29
+ import torch
30
+
31
+ NUM_SECONDS_TO_SLEEP = 5
32
+ API_TYPE = os.getenv("API_TYPE", "openai")
33
+ MODEL_VERSION = os.getenv("MODEL_VERSION", "gpt-4o-2024-08-06")
34
+ FILE_NAME = os.getenv("FILE_NAME", "sfe_test.json")
35
+
36
+ JUDGE_RULES = """You are a strict evaluator assessing answer correctness. You must score the model's prediction on a scale from 0 to 9, where 0 represents an entirely incorrect answer and 9 indicates a highly correct answer.
37
+ # Input
38
+ Question:
39
+ ```
40
+ {question}
41
+ ```
42
+ Ground Truth Answer:
43
+ ```
44
+ {answer}
45
+ ```
46
+ Model Prediction:
47
+ ```
48
+ {pred}
49
+ ```
50
+
51
+
52
+ # Evaluation Rules
53
+ - The model prediction may contain the reasoning process, you should spot the final answer from it.
54
+ - For multiple-choice questions: Assign a higher score if the predicted answer matches the ground truth, either by option letters or content. Include partial credit for answers that are close in content.
55
+ - For exact match and open-ended questions:
56
+ * Assign a high score if the prediction matches the answer semantically, considering variations in format.
57
+ * Deduct points for partially correct answers or those with incorrect additional information.
58
+ - Ignore minor differences in formatting, capitalization, or spacing since the model may explain in a different way.
59
+ - Treat numerical answers as correct if they match within reasonable precision
60
+ - For questions requiring units, both value and unit must be correct
61
+
62
+ # Scoring Guide
63
+ Provide a single integer from 0 to 9 to reflect your judgment of the answer's correctness.
64
+
65
+ # Strict Output format example
66
+ 4"""
67
+
68
+
69
+ if API_TYPE == "openai":
70
+ API_URL = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
71
+ API_KEY = os.getenv("OPENAI_API_KEY", "YOUR_API_KEY")
72
+ client = OpenAI(base_url=API_URL, api_key=API_KEY)
73
+ elif API_TYPE == "azure":
74
+ API_URL = os.getenv("AZURE_ENDPOINT", "https://api.cognitive.microsoft.com/sts/v1.0/issueToken")
75
+ API_KEY = os.getenv("AZURE_API_KEY", "YOUR_API_KEY")
76
+ client = AzureOpenAI(azure_endpoint=API_URL, api_version="2023-07-01-preview", api_key=API_KEY)
77
+
78
+
79
+ scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
80
+
81
+
82
+ def get_chat_response(content: str, max_tokens: int, retries: int = 5):
83
+ global MODEL_VERSION
84
+ global client
85
+
86
+ messages = [
87
+ {
88
+ "role": "system",
89
+ "content": "You are a helpful and precise assistant for checking the correctness of the answer.",
90
+ },
91
+ {"role": "user", "content": content},
92
+ ]
93
+
94
+ payload = {
95
+ "model": MODEL_VERSION,
96
+ "messages": messages,
97
+ "temperature": 0.0,
98
+ "max_tokens": max_tokens,
99
+ }
100
+
101
+ for attempt in range(retries):
102
+ try:
103
+ response = client.chat.completions.create(**payload)
104
+ content = response.choices[0].message.content.strip()
105
+ return content
106
+ except requests.exceptions.RequestException as e:
107
+ eval_logger.warning(f"Request failed on attempt {attempt+1}: {e}")
108
+ time.sleep(NUM_SECONDS_TO_SLEEP)
109
+ if attempt == retries - 1:
110
+ eval_logger.error(f"Failed to get response after {retries} attempts")
111
+ return ""
112
+ except Exception as e:
113
+ eval_logger.error(f"Error on attempt {attempt+1}: {e}")
114
+ return ""
115
+
116
+
117
+ def parse_float_sequence_within(input_str):
118
+ pattern_in_bracket = r"\[(.*)\]"
119
+ match = re.search(pattern_in_bracket, input_str)
120
+
121
+ if not match:
122
+ return None
123
+
124
+ inside_str = match.group(1)
125
+ groups = inside_str.split(";")
126
+
127
+ bboxs = []
128
+ for group in groups:
129
+ floats = group.split(",")
130
+ if len(floats) != 4:
131
+ continue
132
+ try:
133
+ bboxs.append([float(f) for f in floats])
134
+ except Exception as e:
135
+ continue
136
+
137
+ if len(bboxs) == 0:
138
+ return None
139
+
140
+ return bboxs
141
+
142
+
143
+ def compute_iou(box1, box2):
144
+ """
145
+ Compute the Intersection over Union (IoU) of two bounding boxes.
146
+
147
+ Parameters:
148
+ - box1 (list of float): Bounding box [x_min, y_min, x_max, y_max].
149
+ - box2 (list of float): Bounding box [x_min, y_min, x_max, y_max].
150
+
151
+ Returns:
152
+ - float: IoU of box1 and box2.
153
+ """
154
+ # Determine the coordinates of the intersection rectangle
155
+ x_left = max(box1[0], box2[0])
156
+ y_top = max(box1[1], box2[1])
157
+ x_right = min(box1[2], box2[2])
158
+ y_bottom = min(box1[3], box2[3])
159
+
160
+ # Compute the area of intersection
161
+ intersection_area = max(0, x_right - x_left) * max(0, y_bottom - y_top)
162
+
163
+ # Compute the area of both bounding boxes
164
+ box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
165
+ box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
166
+
167
+ # Compute the area of the union
168
+ union_area = box1_area + box2_area - intersection_area
169
+
170
+ # Compute the Intersection over Union
171
+ iou = intersection_area / union_area
172
+
173
+ return iou
174
+
175
+
176
+ def greedy_iou(answers, preds):
177
+ score = 0.0
178
+ n_answer, n_pred = len(answers), len(preds)
179
+ selected = []
180
+ for pred in preds:
181
+ if len(selected) == n_answer:
182
+ break
183
+ _scores = [compute_iou(answer, pred) if i not in selected else -1 for i, answer in enumerate(answers)]
184
+ max_index = _scores.index(max(_scores))
185
+ score += max(_scores)
186
+ selected.append(max_index)
187
+
188
+ return score / n_answer
189
+
190
+
191
+
192
+ def construct_prompt(doc):
193
+ description = f"You are an expert in {doc['field']} and need to solve the following question."
194
+ if doc["question_type"] == "mcq":
195
+ description += "\nThe question is a multiple-choice question. Answer with the option letter from the given choices."
196
+ elif doc["question_type"] == "exact_match":
197
+ description += "\nThe question is an exact match question. Answer the question using a single word or phrase."
198
+ elif doc["question_type"] == "open_ended":
199
+ description += "\nThe question is an open-ended question. Answer the question using a phrase."
200
+ else:
201
+ raise ValueError(f"Unknown question type: {doc['question_type']}")
202
+
203
+ question = doc["question"]
204
+ question = f"{description}\n\n{question}"
205
+ if doc["question_type"] == "mcq":
206
+ parsed_options = "\n".join(doc["options"])
207
+ question = f"{question}\n{parsed_options}"
208
+ elif doc["question_type"] == "exact_match":
209
+ question = f"{question}"
210
+ elif doc["question_type"] == "open_ended":
211
+ question = f"{question}"
212
+ else:
213
+ raise ValueError(f"Unknown question type: {doc['question_type']}")
214
+
215
+ return question
216
+
217
+
218
+ def sfe_doc_to_text(doc, lmms_eval_specific_kwargs=None):
219
+ if lmms_eval_specific_kwargs is None:
220
+ question = construct_prompt(doc)
221
+ else:
222
+ question = construct_prompt(doc, lmms_eval_specific_kwargs["multiple_choice_prompt"], lmms_eval_specific_kwargs["open_ended_prompt"], lmms_eval_specific_kwargs["prompt_type"])
223
+ return question
224
+
225
+
226
+ def sfe_doc_to_visual(doc):
227
+ question = construct_prompt(doc)
228
+ images = doc["images"]
229
+ visual = [Image.open(image).convert("RGB") for image in images]
230
+ return visual
231
+
232
+ def sfe_doc_to_visual_claude(doc):
233
+ images = doc["images"]
234
+ visual = []
235
+ for image in images:
236
+ img = Image.open(image).convert("RGB")
237
+ if max(img.size) > 8000:
238
+ scale = 8000 / max(img.size)
239
+ img = img.resize((min(int(img.size[0] * scale), 8000), min(int(img.size[1] * scale), 8000)), Image.LANCZOS)
240
+ visual.append(img)
241
+ return visual
242
+
243
+
244
+ def sfe_doc_to_visual_doubao(doc):
245
+ images = doc["images"]
246
+ visual = []
247
+ for image in images:
248
+ img = Image.open(image).convert("RGB")
249
+ if img.size[0] * img.size[1] > 36000000:
250
+ scale = 36000000 / (img.size[0] * img.size[1])
251
+ img = img.resize((math.floor(img.size[0] * scale), math.floor(img.size[1] * scale)), Image.LANCZOS)
252
+ visual.append(img)
253
+ return visual
254
+
255
+
256
+ def sfe_process_results(doc, results):
257
+ question_type = doc["question_type"]
258
+
259
+ parsed_preds = []
260
+
261
+ rough_scores = []
262
+ bertscore_scores = []
263
+ bleu_scores = []
264
+ meteor_scores = []
265
+ llm_scores = []
266
+
267
+ execute_success_rate = []
268
+ iou_scores = []
269
+
270
+ assert len(results) == 1, f"Expected one result, got {len(results)}"
271
+ for pred in results:
272
+ formatted_question = construct_prompt(doc)
273
+ answer = doc["answer"]
274
+
275
+ if doc["id"].split("/")[0].lower() in ["e011", "e012"]:
276
+ answer_bboxs = parse_float_sequence_within(answer)
277
+ pred_bboxs = parse_float_sequence_within(pred)
278
+
279
+ if pred_bboxs is not None:
280
+ execute_success_rate.append(1)
281
+ iou_score = greedy_iou(answer_bboxs, pred_bboxs)
282
+ iou_scores.append(iou_score)
283
+ else:
284
+ execute_success_rate.append(0)
285
+ iou_scores.append(-1)
286
+
287
+ rough_scores.append(-1)
288
+ bertscore_scores.append(-1)
289
+ bleu_scores.append(-1)
290
+ meteor_scores.append(-1)
291
+ llm_scores.append(-1)
292
+ else:
293
+ if question_type == "open_ended":
294
+ try:
295
+ rouge_score = scorer.score(answer, pred)
296
+ rough_scores.append(rouge_score["rougeL"].fmeasure)
297
+ except:
298
+ rough_scores.append(0.)
299
+
300
+ try:
301
+ bertscore = score([answer], [pred], lang="multi", device="cuda" if torch.cuda.is_available() else "cpu")[2].item()
302
+ bertscore_scores.append(bertscore)
303
+ except:
304
+ bertscore_scores.append(0.)
305
+
306
+ try:
307
+ chencherry = SmoothingFunction()
308
+ bleu_score = sentence_bleu([answer.strip().split()], pred.strip().split(), smoothing_function=chencherry.method1)
309
+ bleu_scores.append(bleu_score)
310
+ except:
311
+ bleu_scores.append(0.)
312
+
313
+ try:
314
+ meteor_score = meteor_score([answer.strip().split()], pred.strip().split())
315
+ meteor_scores.append(meteor_score)
316
+ except:
317
+ meteor_scores.append(0.)
318
+ else:
319
+ rough_scores.append(-1)
320
+ bertscore_scores.append(-1)
321
+ bleu_scores.append(-1)
322
+ meteor_scores.append(-1)
323
+
324
+ # llm_as_a_judge
325
+ llm_judge_prompt = JUDGE_RULES.format(question=formatted_question, answer=answer, pred=pred)
326
+ llm_judge_score = get_chat_response(llm_judge_prompt, max_tokens=20, retries=3)
327
+ llm_scores.append(llm_judge_score)
328
+
329
+ execute_success_rate.append(-1)
330
+ iou_scores.append(-1)
331
+
332
+ parsed_preds.append(pred)
333
+
334
+ all_info = {
335
+ "id": doc["id"],
336
+ "field": doc["field"],
337
+ "question_type": doc["question_type"],
338
+ "answer": doc["answer"],
339
+ "parsed_pred": parsed_preds,
340
+ "rouge_score": rough_scores,
341
+ "bertscore": bertscore_scores,
342
+ "bleu_score": bleu_scores,
343
+ "meteor_score": meteor_scores,
344
+ "llm_score": llm_scores,
345
+ "execute_success_rate": execute_success_rate,
346
+ "iou_score": iou_scores,
347
+ }
348
+
349
+ rouge_score_info = {
350
+ "id": doc["id"],
351
+ "field": doc["field"],
352
+ "question_type": doc["question_type"],
353
+ "answer": doc["answer"],
354
+ "parsed_pred": parsed_preds,
355
+ "rouge_score": rough_scores,
356
+ }
357
+
358
+ bert_score_info = {
359
+ "id": doc["id"],
360
+ "field": doc["field"],
361
+ "question_type": doc["question_type"],
362
+ "answer": doc["answer"],
363
+ "parsed_pred": parsed_preds,
364
+ "bertscore": bertscore_scores,
365
+ }
366
+
367
+ bleu_score_info = {
368
+ "id": doc["id"],
369
+ "field": doc["field"],
370
+ "question_type": doc["question_type"],
371
+ "answer": doc["answer"],
372
+ "parsed_pred": parsed_preds,
373
+ "bleu_score": bleu_scores,
374
+ }
375
+
376
+ meteor_score_info = {
377
+ "id": doc["id"],
378
+ "field": doc["field"],
379
+ "question_type": doc["question_type"],
380
+ "answer": doc["answer"],
381
+ "parsed_pred": parsed_preds,
382
+ "meteor_score": meteor_scores,
383
+ }
384
+
385
+ llm_score_info = {
386
+ "id": doc["id"],
387
+ "field": doc["field"],
388
+ "question_type": doc["question_type"],
389
+ "answer": doc["answer"],
390
+ "parsed_pred": parsed_preds,
391
+ "llm_score": llm_scores
392
+ }
393
+
394
+ execute_succ_rate_info = {
395
+ "id": doc["id"],
396
+ "field": doc["field"],
397
+ "question_type": doc["question_type"],
398
+ "answer": doc["answer"],
399
+ "parsed_pred": parsed_preds,
400
+ "execute_success_rate": execute_success_rate,
401
+ }
402
+
403
+ iou_score_info = {
404
+ "id": doc["id"],
405
+ "field": doc["field"],
406
+ "question_type": doc["question_type"],
407
+ "answer": doc["answer"],
408
+ "parsed_pred": parsed_preds,
409
+ "iou_score": iou_scores,
410
+ }
411
+
412
+ return {
413
+ "all_info": all_info,
414
+ "rouge_score": rouge_score_info,
415
+ "bert_score": bert_score_info,
416
+ "bleu_score": bleu_score_info,
417
+ "meteor_score": meteor_score_info,
418
+ "llm_score": llm_score_info,
419
+ "execute_succ_rate": execute_succ_rate_info,
420
+ "iou_score": iou_score_info,
421
+ "[email protected]": iou_score_info,
422
+ "[email protected]": iou_score_info,
423
+ "[email protected]": iou_score_info,
424
+ "[email protected]": iou_score_info,
425
+ "[email protected]": iou_score_info,
426
+ }
427
+
428
+
429
+ def sfe_save_results(results, args):
430
+ path = os.path.join("/fs-computility/ai4sData/earth-shared/SFE/lmms-eval/examples/sfe/results", FILE_NAME)
431
+ with open(path, "w") as f:
432
+ json.dump(results, f)
433
+ eval_logger.info(f"Results saved to {path}.")
434
+
435
+ return 0.0
436
+
437
+
438
+ def sfe_aggregate_rouge_results(results, args):
439
+ total_score = 0
440
+ total_cnt = 0
441
+ for result in results:
442
+ try:
443
+ score = float(result["rouge_score"][0])
444
+ if score < 0:
445
+ continue
446
+ total_score += score
447
+ total_cnt += 1
448
+ except:
449
+ eval_logger.warning(f"Failed to convert rouge score to float for {result['id']}: {result['rouge_score'][0]}")
450
+ total_score += 0
451
+ return total_score / total_cnt if total_cnt > 0 else -1
452
+
453
+
454
+ def sfe_aggregate_bertscore_results(results, args):
455
+ total_score = 0
456
+ total_cnt = 0
457
+ for result in results:
458
+ try:
459
+ score = float(result["bertscore"][0])
460
+ if score < 0:
461
+ continue
462
+ total_score += score
463
+ total_cnt += 1
464
+ except:
465
+ eval_logger.warning(f"Failed to convert bert score to float for {result['id']}: {result['bertscore'][0]}")
466
+ total_score += 0
467
+ return total_score / total_cnt if total_cnt > 0 else -1
468
+
469
+
470
+ def sfe_aggregate_bleuscore_results(results, args):
471
+ total_score = 0
472
+ total_cnt = 0
473
+ for result in results:
474
+ try:
475
+ score = float(result["bleu_score"][0])
476
+ if score < 0:
477
+ continue
478
+ total_score += score
479
+ total_cnt += 1
480
+ except:
481
+ eval_logger.warning(f"Failed to convert bleu score to float for {result['id']}: {result['bleu_score'][0]}")
482
+ total_score += 0
483
+ return total_score / total_cnt if total_cnt > 0 else -1
484
+
485
+
486
+ def sfe_aggregate_meteor_score_results(results, args):
487
+ total_score = 0
488
+ total_cnt = 0
489
+ for result in results:
490
+ try:
491
+ score = float(result["meteor_score"][0])
492
+ if score < 0:
493
+ continue
494
+ total_score += score
495
+ total_cnt += 1
496
+ except:
497
+ eval_logger.warning(f"Failed to convert meteor score to float for {result['id']}: {result['meteor_score'][0]}")
498
+ total_score += 0
499
+ return total_score / total_cnt if total_cnt > 0 else -1
500
+
501
+
502
+ def sfe_aggregate_judge_results(results, args):
503
+ total_score = 0
504
+ total_cnt = 0
505
+ for result in results:
506
+ try:
507
+ item_score = result["llm_score"][0]
508
+ pattern = r"(\d+)"
509
+ match = re.search(pattern, item_score)
510
+
511
+ if match:
512
+ item_score = float(match.group(1))
513
+ else:
514
+ item_score = 0
515
+
516
+ total_score += item_score
517
+ total_cnt += 1
518
+ except:
519
+ eval_logger.warning(f"Failed to convert llm score to int for {result['id']}: {result['llm_score']}")
520
+ total_score += 0
521
+ return total_score / total_cnt if total_cnt > 0 else -1
522
+
523
+
524
+ def sfe_aggregate_execute_succ_rate_results(results, args):
525
+ total_score = 0
526
+ total_cnt = 0
527
+ for result in results:
528
+ try:
529
+ score = float(result["execute_success_rate"][0])
530
+ if score < 0:
531
+ continue
532
+ total_score += score
533
+ total_cnt += 1
534
+ except:
535
+ eval_logger.warning(f"Failed to convert execute success score to float for {result['id']}: {result['execute_success_rate'][0]}")
536
+ total_score += 0
537
+ return total_score / total_cnt if total_cnt > 0 else -1
538
+
539
+
540
+ def sfe_aggregate_iou_score_results(results, args):
541
+ total_score = 0
542
+ total_cnt = 0
543
+ for result in results:
544
+ try:
545
+ score = float(result["iou_score"][0])
546
+ if score < 0:
547
+ continue
548
+ total_score += score
549
+ total_cnt += 1
550
+ except:
551
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
552
+ total_score += 0
553
+ return total_score / total_cnt if total_cnt > 0 else -1
554
+
555
+
556
+ def sfe_aggregate_acc01_results(results, args):
557
+ total_score = 0
558
+ total_cnt = 0
559
+ for result in results:
560
+ try:
561
+ score = 1.0 if float(result["iou_score"][0]) > 0.1 else 0.0
562
+ if score < 0:
563
+ continue
564
+ total_score += score
565
+ total_cnt += 1
566
+ except:
567
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
568
+ total_score += 0
569
+ return total_score / total_cnt if total_cnt > 0 else -1
570
+
571
+
572
+ def sfe_aggregate_acc03_results(results, args):
573
+ total_score = 0
574
+ total_cnt = 0
575
+ for result in results:
576
+ try:
577
+ score = 1.0 if float(result["iou_score"][0]) > 0.3 else 0.0
578
+ if score < 0:
579
+ continue
580
+ total_score += score
581
+ total_cnt += 1
582
+ except:
583
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
584
+ total_score += 0
585
+ return total_score / total_cnt if total_cnt > 0 else -1
586
+
587
+
588
+ def sfe_aggregate_acc05_results(results, args):
589
+ total_score = 0
590
+ total_cnt = 0
591
+ for result in results:
592
+ try:
593
+ score = 1.0 if float(result["iou_score"][0]) > 0.5 else 0.0
594
+ if score < 0:
595
+ continue
596
+ total_score += score
597
+ total_cnt += 1
598
+ except:
599
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
600
+ total_score += 0
601
+ return total_score / total_cnt if total_cnt > 0 else -1
602
+
603
+
604
+ def sfe_aggregate_acc07_results(results, args):
605
+ total_score = 0
606
+ total_cnt = 0
607
+ for result in results:
608
+ try:
609
+ score = 1.0 if float(result["iou_score"][0]) > 0.7 else 0.0
610
+ if score < 0:
611
+ continue
612
+ total_score += score
613
+ total_cnt += 1
614
+ except:
615
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
616
+ total_score += 0
617
+ return total_score / total_cnt if total_cnt > 0 else -1
618
+
619
+
620
+ def sfe_aggregate_acc09_results(results, args):
621
+ total_score = 0
622
+ total_cnt = 0
623
+ for result in results:
624
+ try:
625
+ score = 1.0 if float(result["iou_score"][0]) > 0.9 else 0.0
626
+ if score < 0:
627
+ continue
628
+ total_score += score
629
+ total_cnt += 1
630
+ except:
631
+ eval_logger.warning(f"Failed to convert execute iou score to float for {result['id']}: {result['iou_score'][0]}")
632
+ total_score += 0
633
+ return total_score / total_cnt if total_cnt > 0 else -1
raw_data/earth/2023.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3b82eb86bfc0ff989b38a3e7f8f9d4ba7d3c8dad3dff96fd399c9b9d7753c2
3
+ size 1161742655
raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_199001-199912.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d01c9851856f8a65449e87bcd0e25773a3b161a6af4eeb28fb2c044a523a09
3
+ size 1604329330
raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_200001-200912.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4694a0791ab3e255b0390d8bf0b3236d58d3082d852fe6a91c79b7fb68d499db
3
+ size 1604329330
raw_data/earth/CMIP6/thetao_Omon_BCC-CSM2-MR_historical_r1i1p1f1_gn_201001-201412.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa10f22eb5d653dce585a4a57d0a259c2da1eab98f8931ee008cb0e25310abef
3
+ size 802523698
raw_data/earth/CMIP6/thetao_Omon_CAS-ESM2-0_historical_r1i1p1f1_gn_195001-201412.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d4310a6d3337589d634eb667ac4be689a57e1dc2dfedeb9639e0c9153891f6f
3
+ size 3565510084
raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_198001-198912.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29859e7ee8f46167ef1ecdc54e82ecff2d638017d268505b6dac1095d1591431
3
+ size 417212876
raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_199001-199912.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e275d9d6fe4e960d6a4692411505eecca283fbbffd109d0166f83e086d144be8
3
+ size 417098942
raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_200001-200912.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b17fd207e71cd074798d51e920172aeb47750828d7e1872b60a8372416fc497a
3
+ size 416998220
raw_data/earth/CMIP6/thetao_Omon_INM-CM5-0_historical_r1i1p1f1_gr1_201001-201412.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:705750a33ad6a8b41d900965eabb969c3bb87b2c801898cd1afab36f2594fee8
3
+ size 208609222
raw_data/earth/ERA5/t2m.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00b046b99687570ffef663b9d1698a0c6be01e7f1db892fa7169f5811b1614ac
3
+ size 737148452
raw_data/earth/GODAS/1981.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49af25ce8d770587d8a2fc7192afc63bf03365c618f0a34dc752bf9ae951755e
3
+ size 102687576
raw_data/earth/GODAS/1982.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fbf51025da8f8ea5a4ff42aa37a98369019b63f7619c780c3b61583959f1f5
3
+ size 102780289
raw_data/earth/GODAS/1983.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e450033772ec52be41aa99b2a4147a30085362246fd79731626d73aee639cd
3
+ size 102910789
raw_data/earth/GODAS/1984.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3396922e9a6f8ecfb0cfe062b69aa2282f44483c0d240318863381582e03908
3
+ size 102896204
raw_data/earth/GODAS/1985.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:205ef913636f36c8eced3182ff57e511cbb243c1bedbc1b05f33f21f0645458d
3
+ size 102866883
raw_data/earth/GODAS/1986.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5beabdd270533ea2460bb7e1470c2e6050bfbe8193ef8aa6822c0d797f5b698
3
+ size 102795899
raw_data/earth/GODAS/1987.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c47a1904780b51e49f6b45e3ad29e9f4941f8387876959712d588b66de2639be
3
+ size 102849598
raw_data/earth/GODAS/1988.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a688d048f64357561b5110d5dcf25f31c1e2d384789ea90296ae39a62116ecf1
3
+ size 102996251
raw_data/earth/GODAS/1989.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:499545656999cb2cae6fdfc64e4e1d82b14ead198aa8c478529c595ef6c4030e
3
+ size 102864982
raw_data/earth/GODAS/1991.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39b8e5f1424cc5dfda592b58d61b2cfbfad2c3488de1587c65b093fcaabd7482
3
+ size 102771019
raw_data/earth/GODAS/1992.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69a8908eaa6557afe62564a327aa3c462dccda7b5f9b351d39bf2309f1af9ede
3
+ size 102903857
raw_data/earth/GODAS/1993.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a07a9ae02e1663576284a30e9d4a724c696f499bf48d4b4d8116ed14c7198b3
3
+ size 102883076
raw_data/earth/GODAS/1994.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:808b02b91cd154b612b85dd65d68c8c00e91e1adca4b7d3e624c95b80c17999c
3
+ size 102993657
raw_data/earth/GODAS/1995.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:634ab6d422f2c3d5fb78e8ae4592076197141a50445850842010e8a5e16e44fe
3
+ size 102985387
raw_data/earth/GODAS/1996.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21c2724e8de3cf78259ca5c9e76a7b317d84ca92d00ab27d509033b735fa6362
3
+ size 102965432
raw_data/earth/GODAS/1997.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df5104904e46f242f1cd22f26fa657fb305c2bf11361a59b5c861a25adb96231
3
+ size 103133576
raw_data/earth/GODAS/1998.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000b8fbe8a512a71147a596354616e606163f7bac66e86ff18a8e9ca0ed2b50d
3
+ size 103079070
raw_data/earth/GODAS/1999.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d31d014e662f0c0aa55db7216a024b0034fb049f238c37a44377c938420f2e6
3
+ size 103073931
raw_data/earth/GODAS/2000.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1845119fa0b57adba69a23915a157239ddafc24e888859e6129c2f340efbef87
3
+ size 102944041
raw_data/earth/GODAS/2001.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10153f0cbfd71f0af3f72f3d38d4e026ebbd2b4818598e326c1fdaebfbabafdf
3
+ size 102896420
raw_data/earth/GODAS/2002.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a3bd51c2de78d84e16162908fa8dda6e2a72516fb4aad3693adb20ca61b30dc
3
+ size 102885929
raw_data/earth/GODAS/2003.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19fd50b11830677736fdfac4e43cc03773c933ad85638d5cd83c0ac679e6171b
3
+ size 103027732
raw_data/earth/GODAS/2004.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1c970b07aba8b843fd18d84f7a388e1b181ab58daefe7e7c3efc0bfc0b8ec5f
3
+ size 103095014
raw_data/earth/GODAS/2005.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841d5ff39138b6dabb80f9d3a7cb01a2972b863d6902eca63ba773b5e19347fe
3
+ size 102697903
raw_data/earth/GODAS/2006.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46de2ccdd9b3447d7dfb3d4eaa157a743ac7b6fdde08be36966eeff5ead7f150
3
+ size 102655691
raw_data/earth/GODAS/2007.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d82703183350b3308cdc24bf963d3c83bfa7442152a08ff66d60d64af6c58d2a
3
+ size 102606679
raw_data/earth/GODAS/2008.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22158731dcec77ced70da8044b348cc58602814cee8869fb1ab08bdc0732c480
3
+ size 102808932
raw_data/earth/GODAS/2009.nc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29b68223659c9c6499dd0288c6bfd01b03e15185256342d9cc3f6c340ce445d
3
+ size 102846147