danielrosehill commited on
Commit
f70942e
Β·
1 Parent(s): 33a2d5c
.vscode/settings.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:564b1e82a3739463441daa8c2e3d06f005c613b0c9613e91ca4cdcf4cebe6a6e
3
- size 151
 
 
 
 
.vscode/tasks.json DELETED
File without changes
main-sections-links.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_sections": [
3
+ {
4
+ "category": "Data Files Root",
5
+ "name": "Main Data Directory",
6
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data",
7
+ "emoji": "πŸ“"
8
+ },
9
+ {
10
+ "category": "Aggregated Value Factors",
11
+ "name": "Aggregated Data (CSV, JSON, Parquet)",
12
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/aggregated",
13
+ "emoji": "πŸ“Š"
14
+ },
15
+ {
16
+ "category": "Value Factors by Type/Methodology",
17
+ "name": "Impact Type Data (JSON)",
18
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type",
19
+ "emoji": "πŸ”„"
20
+ },
21
+ {
22
+ "category": "Value Factors by Geography",
23
+ "name": "Continental Data Root",
24
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental",
25
+ "emoji": "🌍"
26
+ },
27
+ {
28
+ "category": "CSV Format Data",
29
+ "name": "CSV Data Files",
30
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/csv",
31
+ "emoji": "πŸ“„"
32
+ },
33
+ {
34
+ "category": "Data By Impact Type",
35
+ "name": "Air Pollution Data",
36
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/air-pollution",
37
+ "emoji": "🌫️"
38
+ },
39
+ {
40
+ "category": "Data By Impact Type",
41
+ "name": "GHG Impacts Data (1 Impact Only)",
42
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/ghg",
43
+ "emoji": "🏭"
44
+ },
45
+ {
46
+ "category": "Data By Impact Type",
47
+ "name": "Land Conversion Data",
48
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/land-conversion",
49
+ "emoji": "🌱"
50
+ },
51
+ {
52
+ "category": "Data By Impact Type",
53
+ "name": "Land Use Data",
54
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/land-use",
55
+ "emoji": "🌿"
56
+ },
57
+ {
58
+ "category": "Data By Impact Type",
59
+ "name": "Waste Data",
60
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/waste",
61
+ "emoji": "πŸ—‘οΈ"
62
+ },
63
+ {
64
+ "category": "Data By Impact Type",
65
+ "name": "Water Consumption Data",
66
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/water-consumption",
67
+ "emoji": "πŸ’§"
68
+ },
69
+ {
70
+ "category": "Data By Impact Type",
71
+ "name": "Water Pollution Data (JSON Chunked Due To Size)",
72
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-impact-type/water-pollution",
73
+ "emoji": "πŸ’¦"
74
+ },
75
+ {
76
+ "category": "Data By Region",
77
+ "name": "Africa",
78
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/Africa",
79
+ "emoji": "🌍"
80
+ },
81
+ {
82
+ "category": "Data By Region",
83
+ "name": "Asia",
84
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/Asia",
85
+ "emoji": "🌏"
86
+ },
87
+ {
88
+ "category": "Data By Region",
89
+ "name": "Europe",
90
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/Europe",
91
+ "emoji": "🌍"
92
+ },
93
+ {
94
+ "category": "Data By Region",
95
+ "name": "North America",
96
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/North%20America",
97
+ "emoji": "🌎"
98
+ },
99
+ {
100
+ "category": "Data By Region",
101
+ "name": "Oceania",
102
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/Oceania",
103
+ "emoji": "🌏"
104
+ },
105
+ {
106
+ "category": "Data By Region",
107
+ "name": "South America",
108
+ "url": "https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/tree/main/data/by-region/continental/South%20America",
109
+ "emoji": "🌎"
110
+ }
111
+ ]
112
+ }
scripts/convert_csv_to_json.py DELETED
@@ -1,214 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Script to convert CSV files to JSON for the IFVI Value Factors dataset.
4
- This script fetches CSV data from GitHub and creates JSON files for the missing impact types.
5
- """
6
-
7
- import os
8
- import json
9
- import pandas as pd
10
- import requests
11
- from pathlib import Path
12
- from io import StringIO
13
-
14
- # Get the repository root directory
15
- REPO_ROOT = Path(__file__).parent.parent.absolute()
16
-
17
- # GitHub repository URL for the CSV files
18
- GITHUB_CSV_BASE_URL = "https://raw.githubusercontent.com/danielrosehill/Global-Value-Factors-Explorer/main/Data/GVFD-Deriv/data/csv/by-methodology"
19
-
20
- def create_directory_if_not_exists(directory_path):
21
- """
22
- Create a directory if it doesn't exist.
23
-
24
- Args:
25
- directory_path (Path): Path to the directory to create
26
- """
27
- if not directory_path.exists():
28
- os.makedirs(directory_path)
29
- print(f"Created directory: {directory_path}")
30
-
31
- def fetch_csv_from_github(impact_type):
32
- """
33
- Fetch a CSV file from GitHub.
34
-
35
- Args:
36
- impact_type (str): Name of the impact type (e.g., 'land-conversion')
37
-
38
- Returns:
39
- pandas.DataFrame or None: DataFrame containing the CSV data, or None if the fetch failed
40
- """
41
- csv_url = f"{GITHUB_CSV_BASE_URL}/{impact_type}.csv"
42
- print(f"Fetching CSV from: {csv_url}")
43
-
44
- try:
45
- response = requests.get(csv_url)
46
- response.raise_for_status() # Raise an exception for HTTP errors
47
-
48
- # Parse the CSV content
49
- csv_content = StringIO(response.text)
50
- df = pd.read_csv(csv_content)
51
-
52
- print(f"Successfully fetched CSV for {impact_type}")
53
- return df
54
-
55
- except Exception as e:
56
- print(f"Error fetching CSV for {impact_type}: {str(e)}")
57
- return None
58
-
59
- def convert_csv_to_json(impact_type, output_filename=None, split_file=False, num_parts=4):
60
- """
61
- Convert a CSV file to JSON for a specific impact type.
62
-
63
- Args:
64
- impact_type (str): Name of the impact type (e.g., 'land-conversion')
65
- output_filename (str, optional): Name of the output JSON file.
66
- Defaults to impact_type + '.json'
67
- split_file (bool, optional): Whether to split the file into multiple parts.
68
- Defaults to False.
69
- num_parts (int, optional): Number of parts to split the file into.
70
- Defaults to 4.
71
-
72
- Returns:
73
- bool: True if conversion was successful, False otherwise
74
- """
75
- # Fetch the CSV data from GitHub
76
- df = fetch_csv_from_github(impact_type)
77
-
78
- if df is None:
79
- print(f"Failed to fetch CSV data for {impact_type}")
80
- return False
81
-
82
- # Create the output directory
83
- output_dir = REPO_ROOT / "data" / "by-impact-type" / impact_type
84
- create_directory_if_not_exists(output_dir)
85
-
86
- # Set the output filename if not provided
87
- if output_filename is None:
88
- output_filename = f"{impact_type}_by_impact.json"
89
-
90
- try:
91
- # Convert the DataFrame to a nested dictionary structure
92
- # Get the column names
93
- columns = df.columns.tolist()
94
-
95
- # Create a hierarchical structure
96
- metadata = {
97
- "impact_type": impact_type,
98
- "description": f"Value factors for {impact_type}",
99
- "source": f"Derived from {impact_type}.csv",
100
- "columns": columns
101
- }
102
-
103
- if not split_file:
104
- # Regular processing for non-split files
105
- data = {
106
- "metadata": metadata,
107
- "data": {}
108
- }
109
-
110
- # Convert DataFrame to dictionary
111
- # If the DataFrame has 'Country' or 'Region' columns, organize by those
112
- if 'Country' in df.columns:
113
- for country in df['Country'].unique():
114
- country_data = df[df['Country'] == country].to_dict(orient='records')
115
- data['data'][country] = country_data
116
- elif 'Region' in df.columns:
117
- for region in df['Region'].unique():
118
- region_data = df[df['Region'] == region].to_dict(orient='records')
119
- data['data'][region] = region_data
120
- else:
121
- # If no country or region columns, just convert all rows
122
- data['data']['all'] = df.to_dict(orient='records')
123
-
124
- # Write the JSON file
125
- output_path = output_dir / output_filename
126
- with open(output_path, 'w') as f:
127
- json.dump(data, f, indent=2)
128
-
129
- print(f"Created JSON file: {output_path}")
130
-
131
- else:
132
- # Split processing for large files
133
- # Calculate the size of each part
134
- part_size = len(df) // num_parts
135
- output_paths = []
136
-
137
- # Process each part
138
- for i in range(num_parts):
139
- start_idx = i * part_size
140
- end_idx = (i + 1) * part_size if i < num_parts - 1 else len(df)
141
-
142
- df_part = df.iloc[start_idx:end_idx]
143
-
144
- # Create data structure for this part
145
- data_part = {
146
- "metadata": {**metadata, "part": i + 1, "total_parts": num_parts},
147
- "data": {}
148
- }
149
-
150
- # Convert DataFrame to dictionary for this part
151
- if 'Country' in df_part.columns:
152
- for country in df_part['Country'].unique():
153
- country_data = df_part[df_part['Country'] == country].to_dict(orient='records')
154
- data_part['data'][country] = country_data
155
- elif 'Region' in df_part.columns:
156
- for region in df_part['Region'].unique():
157
- region_data = df_part[df_part['Region'] == region].to_dict(orient='records')
158
- data_part['data'][region] = region_data
159
- else:
160
- data_part['data']['all'] = df_part.to_dict(orient='records')
161
-
162
- # Write the JSON file for this part
163
- base_name = output_filename.replace('.json', '')
164
- output_path = output_dir / f"{base_name}_part{i+1}.json"
165
- output_paths.append(output_path)
166
-
167
- with open(output_path, 'w') as f:
168
- json.dump(data_part, f, indent=2)
169
-
170
- print(f"Created {num_parts} JSON files: {', '.join(str(p) for p in output_paths)}")
171
-
172
- return True
173
-
174
- except Exception as e:
175
- print(f"Error converting {impact_type} to JSON: {str(e)}")
176
- return False
177
-
178
- def main():
179
- """
180
- Main function to convert all missing impact types to JSON.
181
- """
182
- # List of impact types to convert
183
- impact_types = [
184
- "land-conversion",
185
- "land-use",
186
- "water-pollution"
187
- ]
188
-
189
- # Output filenames for each impact type
190
- output_filenames = {
191
- "land-conversion": "land_conversion_by_impact.json",
192
- "land-use": "land_use_by_impact.json",
193
- "water-pollution": "water_pollution_by_impact.json"
194
- }
195
-
196
- # Convert each impact type
197
- for impact_type in impact_types:
198
- output_filename = output_filenames.get(impact_type)
199
-
200
- # Split the water-pollution file into four parts
201
- split_file = (impact_type == "water-pollution")
202
- num_parts = 4 if split_file else 2
203
-
204
- success = convert_csv_to_json(impact_type, output_filename, split_file, num_parts)
205
-
206
- if success:
207
- print(f"Successfully converted {impact_type} to JSON")
208
- else:
209
- print(f"Failed to convert {impact_type} to JSON")
210
-
211
- print("\nDone! You can now add and commit the files.")
212
-
213
- if __name__ == "__main__":
214
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/extract_country_links.py DELETED
@@ -1,86 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Script to extract all country links from the README.md file and create a comprehensive
4
- country-data-links.json file.
5
- """
6
-
7
- import re
8
- import json
9
- import os
10
-
11
- # Path to the README.md file
12
- readme_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "README.md")
13
- output_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "country-data-links.json")
14
-
15
- # Regular expression to match country entries in the README
16
- # Format: | πŸ‡©πŸ‡Ώ Algeria | [JSON](https://huggingface.co/datasets/danielrosehill/ifvi_valuefactors_deriv/blob/main/data/by-region/continental/Africa/Algeria.json) | ...
17
- country_pattern = r'\|\s+(?:(\S+)\s+)?([^|]+)\s+\|\s+\[JSON\]\(([^)]+)\)\s+\|'
18
-
19
- # Dictionary to store country data by continent
20
- country_data = {
21
- "Africa": [],
22
- "Asia": [],
23
- "Europe": [],
24
- "North America": [],
25
- "South America": [],
26
- "Oceania": []
27
- }
28
-
29
- current_continent = None
30
-
31
- # Read the README.md file
32
- with open(readme_path, 'r', encoding='utf-8') as f:
33
- readme_content = f.read()
34
-
35
- # Split the content by lines
36
- lines = readme_content.split('\n')
37
-
38
- # Process each line
39
- for line in lines:
40
- # Check if this is a continent header
41
- if "#### 🌍 Africa" in line or "#### 🌏 Asia" in line or "#### 🌍 Europe" in line or \
42
- "#### 🌎 North America" in line or "#### 🌏 Oceania" in line or "#### 🌎 South America" in line:
43
- if "Africa" in line:
44
- current_continent = "Africa"
45
- elif "Asia" in line:
46
- current_continent = "Asia"
47
- elif "Europe" in line:
48
- current_continent = "Europe"
49
- elif "North America" in line:
50
- current_continent = "North America"
51
- elif "Oceania" in line:
52
- current_continent = "Oceania"
53
- elif "South America" in line:
54
- current_continent = "South America"
55
-
56
- # If we're in a continent section, look for country entries
57
- if current_continent and "|" in line:
58
- # Find all country entries in the line
59
- matches = re.findall(country_pattern, line)
60
- for match in matches:
61
- emoji, country, url = match
62
-
63
- # Clean up the country name
64
- country = country.strip()
65
-
66
- # Add to the appropriate continent list
67
- if current_continent in country_data and country and url:
68
- country_data[current_continent].append({
69
- "country": country,
70
- "emoji": emoji.strip() if emoji else "",
71
- "url": url
72
- })
73
-
74
- # Create the final JSON structure
75
- final_json = {
76
- "country_data": country_data
77
- }
78
-
79
- # Write to the output file
80
- with open(output_path, 'w', encoding='utf-8') as f:
81
- json.dump(final_json, f, indent=2)
82
-
83
- print(f"Successfully extracted country links to {output_path}")
84
- print(f"Total countries extracted: {sum(len(countries) for countries in country_data.values())}")
85
- for continent, countries in country_data.items():
86
- print(f" {continent}: {len(countries)} countries")