|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T01:11:46.022556Z" |
|
}, |
|
"title": "A Holistic Assessment of the Carbon Footprint of Noor, a Very Large Arabic Language Model", |
|
"authors": [ |
|
{ |
|
"first": "Imad", |
|
"middle": [], |
|
"last": "Lakim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TII", |
|
"location": { |
|
"settlement": "Abu Dhabi" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ebtesam", |
|
"middle": [], |
|
"last": "Almazrouei", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TII", |
|
"location": { |
|
"settlement": "Abu Dhabi" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Ibrahim", |
|
"middle": [ |
|
"Abu" |
|
], |
|
"last": "Alhaol", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TII", |
|
"location": { |
|
"settlement": "Abu Dhabi" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Merouane", |
|
"middle": [], |
|
"last": "Debbah", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "TII", |
|
"location": { |
|
"settlement": "Abu Dhabi" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Launay", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "As ever larger language models grow more ubiquitous, it is crucial to consider their environmental impact. Characterised by extreme size and resource use, recent generations of models have been criticised for their voracious appetite for compute, and thus significant carbon footprint. Although reporting of carbon impact has grown more common in machine learning papers, this reporting is usually limited to compute resources used strictly for training. In this work, we propose a holistic assessment of the footprint of an extremescale language model, Noor. Noor is an ongoing project aiming to develop the largest multi-task Arabic language models-with up to 13B parameters-leveraging zero-shot generalisation to enable a wide range of downstream tasks via natural language instructions. We assess the total carbon bill of the entire project: starting with data collection and storage costs, including research and development budgets, pretraining costs, future serving estimates, and other exogenous costs necessary for this international cooperation. Notably, we find that inference costs and exogenous factors can have a significant impact on total budget. Finally, we discuss pathways to reduce the carbon footprint of extreme-scale models.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "As ever larger language models grow more ubiquitous, it is crucial to consider their environmental impact. Characterised by extreme size and resource use, recent generations of models have been criticised for their voracious appetite for compute, and thus significant carbon footprint. Although reporting of carbon impact has grown more common in machine learning papers, this reporting is usually limited to compute resources used strictly for training. In this work, we propose a holistic assessment of the footprint of an extremescale language model, Noor. Noor is an ongoing project aiming to develop the largest multi-task Arabic language models-with up to 13B parameters-leveraging zero-shot generalisation to enable a wide range of downstream tasks via natural language instructions. We assess the total carbon bill of the entire project: starting with data collection and storage costs, including research and development budgets, pretraining costs, future serving estimates, and other exogenous costs necessary for this international cooperation. Notably, we find that inference costs and exogenous factors can have a significant impact on total budget. Finally, we discuss pathways to reduce the carbon footprint of extreme-scale models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Recent progress in natural language processing (NLP) has been driven by the emergence of socalled foundation models (Bommasani et al., 2021) . This paradigm shift is characterised by a homogenisation of modelling methods-crystallising around the Transformer architecture (Vaswani et al., 2017) and by emergent capabilities (e.g. zero-shot generalisation) predominantly arising from sheer scale alone . NLP models are now experiencing a 3-4 months doubling time in size, as outlined by Figure 1 . Most recent large language models such as MT-NLG 530B (Smith et al., 2022 ), Gopher 280B (Rae et al., 2021 , or Jurassic-1 178B (Lieber et al., 2021) , all report training budgets in the thousands of PF-days 1 range. Because AI accelerators performance per watt has plateaued compared to deep learning budgets (Reuther et al., 2021; Sevilla et al., 2022) , practitioners have had to scale-out training over an increasingly large number of accelerators (Narayanan et al., 2021) . Accordingly, the energy cost of training state-of-theart models has grown significantly: increase in compute is no longer fuelled by improvements in hardware efficiency, but in hardware scale.", |
|
"cite_spans": [ |
|
{ |
|
"start": 116, |
|
"end": 140, |
|
"text": "(Bommasani et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 293, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 550, |
|
"end": 569, |
|
"text": "(Smith et al., 2022", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 570, |
|
"end": 602, |
|
"text": "), Gopher 280B (Rae et al., 2021", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 624, |
|
"end": 645, |
|
"text": "(Lieber et al., 2021)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 806, |
|
"end": 828, |
|
"text": "(Reuther et al., 2021;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 829, |
|
"end": 850, |
|
"text": "Sevilla et al., 2022)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 948, |
|
"end": 972, |
|
"text": "(Narayanan et al., 2021)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 485, |
|
"end": 493, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Although this increase in size and compute budget is backed by empirical scaling laws drawing a clear link between compute spent and model performance , the societal benefits of larger models have been questioned (Toma\u0161ev et al., 2020; Bender et al., 2021) . Specifically to environmental concerns, in a time of climate crisis when carbon emissions must be drastically cut (Masson-Delmotte et al., 2018) , one may question whether these large compute budgets are justified. A crucial step towards answering this question is an in-depth evaluation of the footprint of large models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 235, |
|
"text": "(Toma\u0161ev et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 236, |
|
"end": 256, |
|
"text": "Bender et al., 2021)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 373, |
|
"end": 403, |
|
"text": "(Masson-Delmotte et al., 2018)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Existing assessments of the environmental impacts of large models are usually focused on hyperparameter tuning and pretraining costs (Strubell et al., 2019; Patterson et al., 2021) . This trend is reflected by the growing number of tools available to help practitioners quantify the impact of machine learning computations (Bannour et al., 2021) . If some studies have also endeavoured to quantify select aspects of the machine learning pipeline (e.g. conference attendance (Skiles et al., 2021) , hardware lifecycle (Gupta et al., 2021) , etc.), end-to-end evaluations of machine learning projects life cycle emissions remain rare (Wu et al., 2022) . : Over the last four years, the size of stateof-the-art language models has doubled every 3-4 months. Note that this trend has been slowing down, due to scale-out limitations.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 156, |
|
"text": "(Strubell et al., 2019;", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 180, |
|
"text": "Patterson et al., 2021)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 323, |
|
"end": 345, |
|
"text": "(Bannour et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 474, |
|
"end": 495, |
|
"text": "(Skiles et al., 2021)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 517, |
|
"end": 537, |
|
"text": "(Gupta et al., 2021)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 632, |
|
"end": 649, |
|
"text": "(Wu et al., 2022)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To fill this gap, we produce an end-to-end assessment of the carbon footprint of Noor, a project seeking to train a very large Arabic language model. Our contributions are the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Holistic assessment. We evaluate the total carbon bill of the entire project: starting with data collection, curation, and storage, including research and development and hyper-parameters tuning budgets, pretraining costs, future serving estimates, and other exogenous impacts sparked by this international cooperation (e.g. flights, personnel, etc.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Beyond pretraining. We identify pretraining compute as driving more than half of the emissions of the project. However, all combined, other R&D, storage, and personnel counts still amount for 35% of the carbon footprint. We also identify downstream use in the wild as potentially significant. This leads us to recommend for the end-to-end footprint to be systematically assessed on a per-project basis. Notably, in scenarios with a low-impact training electric mix, costs beyond pretraining may become the main sources of emissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Pathways to lower footprints. Finally, we discuss ways to reduce the environmental footprints of projects involving large models, and put in perspective the footprint of similar projects.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In light of ever increasing computational budgets (Sevilla et al., 2022) and of the need to cut on emissions to abate global warming (Masson-Delmotte et al., 2018) , the environmental impact of deep learning has drawn significant interest. Strubell et al., 2019 notably highlighted the potential high environmental costs of deep learning. However, its headline figures were produced in the specific context of neural architecture search, a relatively rare practice for extreme-scale models nowadays. Lacoste et al., 2019; Lottick et al., 2019; Schwartz et al., 2020 subsequently called for AI research to be more aware of its environmental cost. An increasing number of tools, such as codecarbon (Schmidt et al., 2021) , have been developed to help with tracking the impact of deep learning experiments (Bannour et al., 2021) . All of these lines of research share similar recommendations: the carbon footprint of deep learning is a direct consequence of the electricity mix and efficiency of the data center, suggesting that picking an appropriate provider is the most straightforward way to reduce environmental impact.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 72, |
|
"text": "(Sevilla et al., 2022)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 163, |
|
"text": "(Masson-Delmotte et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 240, |
|
"end": 261, |
|
"text": "Strubell et al., 2019", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 500, |
|
"end": 521, |
|
"text": "Lacoste et al., 2019;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 543, |
|
"text": "Lottick et al., 2019;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 544, |
|
"end": 565, |
|
"text": "Schwartz et al., 2020", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 696, |
|
"end": 718, |
|
"text": "(Schmidt et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 825, |
|
"text": "(Bannour et al., 2021)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Specifically to extreme-scale models, Patterson et al., 2021 estimated the energy consumption of five large NLP models, including GPT-3. They identified that a judicious choice of neural architecture, datacenter and accelerator can help reduce considerably carbon budgets. Thompson et al., 2020 identified a clear relationship between large models performance and their carbon impact, building upon work on neural scaling laws . Taddeo et al., 2021 estimated the cost of training GPT-3 in different data centers across the worldwide, highlighting again the high dependency on the local energy mix and specific infrastructure.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Two recent studies have provided insights into the end-to-end carbon footprint of deployed models in the industry. Wu et al., 2022 studied the impact of the increasingly large recommender systems leveraged at Meta, while Patterson et al., 2022 provided an assessment of the costs (including inference) of large models at Google. They expect the carbon footprint of training to plateau in coming years, and then to shrink-owing to more efficient high performance computing platforms. They also assert that current studies are overestimating the real environmental costs of large models, in light of the wide availability of \"clean\" compute platforms.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 243, |
|
"text": "Patterson et al., 2022", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In the field of astrophysics, Aujoux et al., 2021 did an extensive study to estimate the carbon footprint of the Giant Array for Neutrino Detection (GRAND) project, a multi-decade worldwide experiment. Inspired by their holistic methodology, we seek to establish the first end-to-end assessment of an extreme-scale NLP project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The current state-of-the-art generative language model in Modern Standard Arabic is AraGPT (Antoun et al., 2021), a 1.5B parameters model. The Noor project seeks to expand upon this model, introducing a 1.5B, 2.7B, 6.7B, and 13B Arabic models, trained a custom curated dataset of 150B tokens, inspired by The Pile (Gao et al., 2020) . These larger scales are expected to make the model able to tackle novel tasks through zero-shot generalization, as exhibited by GPT-3 (Brown et al., 2020) or GPT-J (Wang and Komatsuzaki, 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 314, |
|
"end": 332, |
|
"text": "(Gao et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 463, |
|
"end": 489, |
|
"text": "GPT-3 (Brown et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 499, |
|
"end": 527, |
|
"text": "(Wang and Komatsuzaki, 2021)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Noor is an on-going international cooperation between the Technology Innovation Institute in the United Arab Emirates and LightOn in France. The Noor project can be split in four parts:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Data curation. A custom curated dataset of 150B tokens has been assembled for Noor. This dataset has been scrapped from diversified sources, and also includes data from Common Crawl. We filter this data with an LM-based quality-scoring system inspired by CCNet (Wenzek et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 284, |
|
"text": "(Wenzek et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 R&D experiments. To validate tokenization, dataset, architecture, and establish scaling laws, we trained a number of R&D models (100M-1.5B parameters on 10-30B tokens).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Main training. We train a suite of four models of 1.5B, 2.7B, 6.7B, and 13B parameters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "\u2022 Model use. Prospectively, we include some estimations of the future inference cost of these models as they are put in use.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Noor Project", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Before beginning our assessment, we propose to identify some of the key influencing factors on the potential carbon footprint of large models, focusing first on factors directly related to the models themselves and not to the project producing them.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Model size. The number of floating operations per forward pass is directly proportional to the size of the network. A common approximation for the total compute budget C required for training a Transformer model with N parameters on D tokens is C = 6N D . As the optimal dataset size only grows sublinearly with model size for autoregressive modelling , compute budget will scale more or less linearly with model size. The larger the number of operations, the more energy is needed to train the model. For inference, the cost for each token is reduced to a third compared to training, and environmental impact will be driven by the total number of words/tokens processed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Hardware characteristics. The throughput (in FLOPs) that can be tackled by the hardware will drive the total time required to perform the task. More efficient hardware will have more throughput per Watt. We note however that most available chips suitable for large model training (e.g., NVIDIA GPUs, Google TPUs, etc.) exhibit similar efficiency characteristics (Reuther et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 362, |
|
"end": 384, |
|
"text": "(Reuther et al., 2021)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Modelling decisions. We identified above two key factors: number of tokens processed (for training or inference), and hardware throughput. We note that both of these are also strongly impacted by modelling decisions. A more fertile tokenizer will use less tokens for the same text, leading to faster processing. Similarly, small changes in model architecture (e.g., choosing hidden sizes in accordance with wave/tile quantization) and in implementation (e.g., 3D parallelism) can drastically increase throughput, and reduce total training time.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Data center efficiency. The energy consumed does not serve only to power up the servers, but also to cool down the data center itself and to respond to other electrical needs. The Power Usage Effectiveness (PUE) is used to assess the overall efficiency of a data center. It measures the quotient of the total energy requirement and the final energy used by the servers. The PUE will be influenced by the data center architecture. Worldwide average is around 1.8, but Google for instance reports an average PUE of 1.11. Waste heat in data centers can also be reused for collective water heating, driving down the PUE, as in the Jean Zay HPC.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Electricity mix. The breakdown of the energy sources powering a data center is a crucial factor, and depends primarily on the region. The electricity mix determines the carbon emissions per kWh of electricity. Today, the world average of carbon emission by kwh of electricity generated is 475 gCO2e/kWh, and an increasing number of data centers from cloud providers are using 100% renewable or nuclear energy to power their hardware. Taking Google Cloud as an example again, their", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Montreal facility reports 27gCO2e/kWH, twenty times lower than the world average. Beyond factors related to the models themselves, we seek in this study to take into account a number of other costs: storage, preprocessing, and transfer costs for the dataset, personnel costs such as travel and individual laptops, etc. We note however one limitation from our study: we do not take into account the lifecycle of the hardware used. Unfortunately, numbers are scarcely available, and not made public by the main manufacturers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "5 Carbon footprint of the Noor project", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Factors influencing the carbon footprint of large models", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We begin by accounting for the electricity consumption of all aspects of the project. The impact of this consumption will be highly dependent on the carbon intensity of the electricity mix used. Nonelectric sources (e.g., international flights) will be added to the carbon budget in a second phase.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Electricity consumption", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "The energy consumption of data depends on both the energy required for powering the disks to store the data, and the energy consumed when moving the data from one server to another. We average storage costs over the 6 months of the project.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Storage. Although disk wattage is generally reported on per-disk level, Posani et al., 2019 estimates the power per TB of data using aggregated technical specifications. The paper reports that the average peak consumption of cloud storage is around 11.3W/TB. It means an energy consumption of 99 kWh/TB a year. This estimation considers a PUE of 1.6 and a redundancy factor of 2 since managed services will also have a back-up.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "The breakdown of our data storage is as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Curated data. Including both raw and processed data, we have accumulated around 2TB of curated data. This is stored for the 6 months of the project, resulting in 99kWh used.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Bulk data. We use Common Crawl (CC) for acquiring large amounts of web data. Each CC dump is on average around 10TB, and we discard it immediately after processing it. On average, it takes 24 hours to fully process a dump: we used 21 dumps from CC, meaning we stored 210TB of data for 24hours, equivalent to 57 kWh of energy consumption. After processing the dumps, we got on average 1.2TB of data per dump, thus 25TB in total. Considering that this data will be stored for 6 months, we end up with 1.3 MWh of energy consumption for the bulk data. Note that we keep the processed data in all languages (not just Modern Standard Arabic).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Models. The weights of the Noor models (1.3B, 2.7B, 6.7B and 13B) are respectively 2.6GB, 5.4G, 13.4GB, and 26GB in halfprecision. This corresponds to training checkpoints (including the full-precision optimizer) of 20.8GB, 43.2GB, 107.2GB, and 208GB. We save such checkpoints every 10B tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "In total, we end-up with 5.7TB of model weights and intermediary checkpoints for future analysis and interpretability work, consuming 0.3MWh in total.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Transfers. Posani et al., 2019 provided an estimate of 23.9 kJ per GB (6.38 kWh per TB) transferred, using the formula of Baliga et al., 2011 and the same hypothesis as Aslan et al., 2017 (800km average distance between core nodes). The 210TB of CC data are downloaded on the preprocessing servers once; the 25TB of processed data are moved once to our archival machines, and another time to the HPC used for training; the curated data is downloaded once, moved to the archival machines, and then moved to the HPC; the 5.7TB of models are moved once from our HPC, and then to our inference servers for final models or to workstations for intermediary checkpoints. Consequently, we estimate the transfer energy bill at 1.8 MWh.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "Total. Thus, the total energy consumption of data is estimated to be about 3.5 MWh, dominated by the multilingual Common Crawl data. We note that as ideal dataset size increases sublinearly with model size (Kaplan et al., 2020), we expect checkpoints and model transfers to eventually dominate the costs of storage and transfer for larger models. Note that we neglect costs linked to a potential public release of the models, as it is difficult to predict traffic. As a rough estimation, 10,000 downloads of the 13B model would represent 260TB of traffic, and 1.66MWh consumed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data storage and transfers", |
|
"sec_num": "5.1.1" |
|
}, |
|
{ |
|
"text": "We take all text data through a pipeline inspired by CCNet (Wenzek et al., 2019) for preprocessing. This pipeline takes care of deduplication, language identification, and finally quality filtering with a ures , we estimate the average power consumption of each node at 350W; hence, the power of the cluster is 5.6kW. We processed 21 dumps of Com-monCrawl, plus our curated data, for a total of 381 wall-clock hours. Accordingly, assuming a PUE of 1.1 as reported by Google, the total energy consumed by data preprocessing is 2.35MWh.", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 80, |
|
"text": "(Wenzek et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 205, |
|
"end": 209, |
|
"text": "ures", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data processing", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "Note that for CommonCrawl data, this results in data processed for every language supported (176 for identification, 48 for quality filtering). Accordingly, this cost could be amortised over future projects. For high-resource languages, this also results in very large amounts of data: processing more dumps would not be necessary, even to train a 1 trillion parameters model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data processing", |
|
"sec_num": "5.1.2" |
|
}, |
|
{ |
|
"text": "We carried experiments to validate tokenization methods, dataset composition, tune hyperparameters, and establish scaling laws. This early research and development work was performed on MeluXina, a high-performance super-computer located in Luxembourg. We used a total of 16,800 A100hours in this phase. Each node used in MeluXina has 4 A100 SXM 40GB with a TDP of 400W, and two AMD EPYC 7763 CPUs with a TDP of 280W. They report a PUE of 1.35. Thus, we estimate the consumption of this R&D phase to be of 10.7MWh.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research and development", |
|
"sec_num": "5.1.3" |
|
}, |
|
{ |
|
"text": "We expect the budget of this phase to roughly scale with model size. Indeed, debugging potential issues (e.g., numerical instabilities , etc.) for the final larger model will cost significantly more.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Research and development", |
|
"sec_num": "5.1.3" |
|
}, |
|
{ |
|
"text": "Using the C = 6N D approximation, it is possible to calculate in advance the training budget required for a specific model. We observe an ef-fective throughput with our Megatron+DeepSpeed codebase of around 100 TFLOPs 2 across models, in line with the state-of-the-art. We train four main models (1.5B, 2.7B, 6.7B, 13B) on 150B tokens.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main training", |
|
"sec_num": "5.1.4" |
|
}, |
|
{ |
|
"text": "We train the smaller model on MeluXina, but the other three on our own HPC cluster. Each node contains 8 A100 80GB and 2 AMD EPYC 7763 CPUs. The PUE of our data center is 1.5, 20% more efficient than the world average. Table 1 outlines the costs of the main training. The total electric energy consumed to train the Noor suite of models is thus 41.6 MWh, 55% of it spent on the largest 13B model.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 219, |
|
"end": 226, |
|
"text": "Table 1", |
|
"ref_id": "TABREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main training", |
|
"sec_num": "5.1.4" |
|
}, |
|
{ |
|
"text": "As the models of Noor have yet to be deployed, this is only a prospective estimate. Inference costs in general are difficult to estimate in advance, even more so for open source models which will be deployed to platforms with varying characteristics. We provide an estimate of the energy consumption during inference per generated token.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "We thereafter denote as processed tokens the tokens in the original prompt sent to the model, and as generated tokens the tokens generated by the model using the prompt. To simplify calculations, we make the following assumptions from our experience with another large-scale API: (1) an A100 is used, which is sufficient for Noor-13B, but could be reduced to a more efficient T4 for Noor-1.5B/2.7B;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "(2) inference time per generated token is constant, whichever the number of processed tokens (per our benchmarks, thanks to caching, this is true up to 512 processed tokens roughly); (3) batch size is assumed to be 1, as batching is more challenging and less consistent for inference workloads.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "Under these hypothesises, an A100 can generate up to 72,000 tokens per hour.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "Accordingly, we estimate that 26 Joules are required per token generated (400W Figure 2 : Breakdown of the electricity consumption (total 59.14 MWh) of the Noor project. Data preprocessing is included in R&D, amounting for 20% of it. We also note that R&D and dataset costs could be amortised through other projects or larger models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 79, |
|
"end": 87, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "for the GPU, 70W for the CPU, and 1.1 PUE on Google Cloud imply 517Wh of energy consumption for 72,000 tokens. Converted to Joule, it results in 26 Joules per token.) Accordingly, 3 billion tokens would have to be generated for inference costs to catch up with training costs. At some point during its beta, GPT-3 was reported to generate 4.5 billion words per day (Pilipiszyn, 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 365, |
|
"end": 383, |
|
"text": "(Pilipiszyn, 2021)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Inference", |
|
"sec_num": "5.1.5" |
|
}, |
|
{ |
|
"text": "Beyond costs related to data, R&D, training, and inference, one may wonder if direct electricity use from scientists involved in the project is significant. Assuming that the average laptop consumes 70W, plus 30W for an external screen, six research scientists dedicating 100% of their time during 6 months for this project, 8 hours per day, will use up 0.604MWh. We could also include costs of e-mail exchanges and video-conferences specifically, but these were found to be negligible in Aujoux et al., 2021 . We round up the marginal costs to 1MWh, and note that this is but a rough estimate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 489, |
|
"end": 508, |
|
"text": "Aujoux et al., 2021", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional costs", |
|
"sec_num": "5.1.6" |
|
}, |
|
{ |
|
"text": "We showed that the total electricity consumption of the Noor project is not only about training the final models, as outlined in Figure 2 . Nearly a third of the energy consumed (30%) went to tasks outside of main models pretraining.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 137, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "5.1.7" |
|
}, |
|
{ |
|
"text": "Because of larger uncertainties, we keep the serv-ing/inference assessment out of the previous budget. However, especially in the context of openly available models, the inference budget can rapidly catch up with the total budget outlined in 2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Summary", |
|
"sec_num": "5.1.7" |
|
}, |
|
{ |
|
"text": "Now, from the electricity consumption, and using information on the local carbon intensity, we will derive the full footprint of the Noor project. We will also add energy use coming from non-electric sources (e.g., flights). As the carbon intensity of the electricity mix varies significantly across regions, we outlined below the locations of interest:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Storage. We used Amazon S3 in Bahrain;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 R&D. We used a GCP CPU cluster located in Netherlands, and MeluXina in Luxembourg;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Main training. The smaller 1.3B model was trained on MeluXina, and the remaining models were trained on our dedicated HPC platform in the United Arab Emirates (UAE);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "\u2022 Other. Six full-time scientists were involved, half in France and half in the UAE. Table 2 shows the resulting carbon footprint for each of the development stages of Noor project. This highlights the importance of location for carbon footprint: notably, all calculations on performed on the relatively low-carbon MeluXina HPC end-up having very limited costs, even compared to small items like storage in Bahrain.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 92, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In addition to these development costs, we consider the carbon footprint of three round-trip flights of four scientists between Paris and Abu Dhabi. These trips were taken to run training workshops, brainstorming sessions, and discussions related to the project. We use the carbon emissions simulator of the International Civil Aviation Organization. One round-trip emits 527 kgCO2e per person, totalling 6.4 tons of emissions over all trips.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Finally, Figure 3 displays the total distribution of the carbon footprint of the project. As shown in the figure, factors like flights may be usually neglected, but have a significant contribution in the total carbon footprint. Specifically, as conference returns in-person, this is a systematic impact that exists on most papers. In the case of Noor, the few flights operated account for 18% of the total carbon emission of the whole project. Interestingly, we note that with increasingly clean electricity and efficient data centers, the exogenous costs linked to flights and personnel are bound to increase in proportional impact.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 17, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Inference. Forecasting the carbon footprint of inference is harder for open models: as they may be downloaded and deployed by anyone, it is impossible to predict the carbon intensity of the electricity they will use. We study two scenarios: an intermediate one, based on the world average emission per kWh (475 gCO2e/kWh) and a best-case one, based on the low-impact French mix (56 gCO2e/kWh). These two scenarios correspond to around 300,000 tokens generated per kgCO2e, or to 2,500,000 tokens generated per kgCO2e in the best-case. Going back to the 4.5 billion words per day of GPT-3, this amounts to 30 tons of CO2e per day and 3.5 tons. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Carbon footprint", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "From our experience with Noor, we highlight some recommendations for future projects to minimise their carbon footprint.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Best practices and recommendations", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "A first angle of attack is to make the machine learning techniques used more efficient.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modelling & engineering", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "\u2022 Efficient architectures. Mixture-of-experts (MoE) models split the large fully-connected layers of a Transformer into distinct experts (Fedus et al., 2021) . Although larger, MoE Transformers can bring significant energy savings during training and inference (Du et al., 2021) , as the experts are only sparsely activated. Recent work demonstrate that they may even scale favorably compared to dense models (Clark et al., 2022) . More broadly, even small changes (e.g. better embeddings, activation functions) may have a non-negligible impact on the overall carbon footprint.", |
|
"cite_spans": [ |
|
{ |
|
"start": 137, |
|
"end": 157, |
|
"text": "(Fedus et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 278, |
|
"text": "(Du et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 429, |
|
"text": "(Clark et al., 2022)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modelling & engineering", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "\u2022 Efficient inference. As we have shown, inference costs can rapidly catch up with training costs: it is also interesting to make the model leaner for inference. Quantization (Yang et al., 2019) reduces numerical precision at inference time and accelerates inference, but it has seen limited adoption with large models. Distillation (i.e., training a smaller model from the outputs of a larger one) is a promising direction, already demonstrated for Transformers applied to vision (Touvron et al., 2021) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 175, |
|
"end": 194, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 481, |
|
"end": 503, |
|
"text": "(Touvron et al., 2021)", |
|
"ref_id": "BIBREF38" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modelling & engineering", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "\u2022 Efficient implementations. Crucially, distributed training implementations must be as efficient as possible, to amortise the large idle consumption of the hardware -MeluXina reports for instance idle power of around 150W per GPU when accounting for CPU cores, infrastructure, etc. This includes taking into account fine-grained effects depending on architectures, such as wave and tile quantization, to achieve the best throughput possible.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Modelling & engineering", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "A second angle of attack is to focus on the hardware used to train these models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hardware", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "\u2022 Data center choice. A data center with a PUE of 1.1 will decrease energy consumption by 39% compared to the world average of 1.8. Low PUE platforms should be preferred.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hardware", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "\u2022 Local carbon intensity. As highlighted by Table 2 , the carbon intensity of the electricity mix significantly impacts the final footprint. Locating training in an area with a clean mix is an easy step to take that can drastically cut the footprint of a project. This is especially easy to do on online cloud platforms, which have many areas of availability.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 44, |
|
"end": 51, |
|
"text": "Table 2", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Hardware", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "\u2022 Efficient inference. Carefully selecting a proper AI accelerator for managed inference workloads can limit the footprint of model use. For instance, for smaller models (<3B), it may be possible to use T4s rather than A100s, which are 20% more energy efficient per FLOP than A100s. Finally, specialised accelerators are also starting to become available (Reuther et al., 2020) . We note that this may however require specific developments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 355, |
|
"end": 377, |
|
"text": "(Reuther et al., 2020)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Hardware", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Finally, it is important to not underestimate costs beyond machine learning workloads.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other practices", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "\u2022 Minimising exogenous impact. Although we found the final footprint to be dominated by the main training runs, we still note the significant impact of the international flights taken during this cooperation (20% of the final footprint). Minimising such high-intensity cost center is important.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other practices", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "\u2022 Costs reporting and offset. The full cost of model development is rarely, if ever, reported in the literature. We highly recommend the AI community to start reporting the full energy consumption and the CO2e of their projects. This reporting can also be used as the basis for offsetting carbon emissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Other practices", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We undertook an end-to-end assessment of the carbon footprint associated with the development of an extreme-scale language model. We took into account data collection and storage, research and development, pretraining, and included estimates for future serving and inference. We also added personnel costs, such as international flights to run training workshops and brainstorming sessions. In total, we estimate the development of the suite of the four Noor models to have emitted 36.5 tons of CO2, 65% of which for training the models, 18% for the international flights, 12% for data storage, and 4% for small-scale research and development experiments. To put this in perspective, the average carbon footprint per individual in the US is around 20 tons, so our project generated a little over two years of individual US emissions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We find that the main driver of this carbon footprint is the carbon intensity of the mix used for model training. Appropriately selecting the location of calculations can significantly reduce the environmental impact of a project. For instance, in this project, running all computations in France would have reduced the total footprint to 14.9 tCO2e, 42% of which from the international flights. As the impact of the computations themselves become smaller, it is important for practitionners to more carefully weigh in exogenous contributions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "All-in-all, with careful considerations around data center choice, it is possible to run extremescale NLP projects with a low carbon impact.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Finally, we also identified that large-scale inference could also rapidly outtake pretraining costs in terms of carbon impact. Inference, if not centrally managed, is harder to control: with a publicly available model, it will happen on hardware decided by the end user. We thus think its equally important for practitioners to alert users regarding best efficient inference practices, and regarding best practices to limit the environmental cost of computations (e.g. choosing an efficient data center, running inference in a country with a low-impact mix, etc.)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussion and conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "A PF-day is 1 PFLOPs (10 A100) sustained for a day.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "These are effective FLOPs for training the model, not hardware FLOPs. Hardware FLOPs are closer to 150 TFLOPs.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Aragpt2: Pre-trained transformer for arabic language generation", |
|
"authors": [ |
|
{ |
|
"first": "Wissam", |
|
"middle": [], |
|
"last": "Antoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fady", |
|
"middle": [], |
|
"last": "Baly", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hazem", |
|
"middle": [], |
|
"last": "Hajj", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wissam Antoun, Fady Baly, and Hazem Hajj. 2021. Aragpt2: Pre-trained transformer for arabic lan- guage generation.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Electricity intensity of internet data transmission: Untangling the estimates: Electricity intensity of data transmission", |
|
"authors": [ |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Aslan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kieren", |
|
"middle": [], |
|
"last": "Mayers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Koomey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "France", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Journal of Industrial Ecology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1111/jiec.12630" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joshua Aslan, Kieren Mayers, Jonathan Koomey, and Chris France. 2017. Electricity intensity of internet data transmission: Untangling the estimates: Elec- tricity intensity of data transmission. Journal of In- dustrial Ecology, 22.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Estimating the carbon footprint of the grand project, a multi-decade astrophysics experiment", |
|
"authors": [ |
|
{ |
|
"first": "Clarisse", |
|
"middle": [], |
|
"last": "Aujoux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kumiko", |
|
"middle": [], |
|
"last": "Kotera", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Odile", |
|
"middle": [], |
|
"last": "Blanchard", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Astroparticle Physics", |
|
"volume": "131", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.astropartphys.2021.102587" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Clarisse Aujoux, Kumiko Kotera, and Odile Blanchard. 2021. Estimating the carbon footprint of the grand project, a multi-decade astrophysics experiment. As- troparticle Physics, 131:102587.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Green cloud computing: Balancing energy in processing, storage, and transport. Proceedings of the IEEE", |
|
"authors": [ |
|
{ |
|
"first": "Jayant", |
|
"middle": [], |
|
"last": "Baliga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Ayre", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kerry", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Tucker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "", |
|
"volume": "99", |
|
"issue": "", |
|
"pages": "149--167", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/JPROC.2010.2060451" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jayant Baliga, Robert Ayre, Kerry Hinton, and Rodney Tucker. 2011. Green cloud computing: Balancing energy in processing, storage, and transport. Pro- ceedings of the IEEE, 99:149 -167.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Evaluating the carbon footprint of nlp methods: a survey and analysis of existing tools", |
|
"authors": [ |
|
{ |
|
"first": "Nesrine", |
|
"middle": [], |
|
"last": "Bannour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sahar", |
|
"middle": [], |
|
"last": "Ghannay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aur\u00e9lie", |
|
"middle": [], |
|
"last": "N\u00e9v\u00e9ol", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anne-Laure", |
|
"middle": [], |
|
"last": "Ligozat", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "EMNLP, Workshop SustaiNLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nesrine Bannour, Sahar Ghannay, Aur\u00e9lie N\u00e9v\u00e9ol, and Anne-Laure Ligozat. 2021. Evaluating the carbon footprint of nlp methods: a survey and analysis of existing tools. In EMNLP, Workshop SustaiNLP.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "On the dangers of stochastic parrots: Can language models be too big?", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Emily", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Timnit", |
|
"middle": [], |
|
"last": "Bender", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angelina", |
|
"middle": [], |
|
"last": "Gebru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shmargaret", |
|
"middle": [], |
|
"last": "Mcmillan-Major", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shmitchell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "610--623", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emily M Bender, Timnit Gebru, Angelina McMillan- Major, and Shmargaret Shmitchell. 2021. On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM Confer- ence on Fairness, Accountability, and Transparency, pages 610-623.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On the opportunities and risks of foundation models", |
|
"authors": [ |
|
{ |
|
"first": "Rishi", |
|
"middle": [], |
|
"last": "Bommasani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Drew", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ehsan", |
|
"middle": [], |
|
"last": "Hudson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russ", |
|
"middle": [], |
|
"last": "Adeli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Simran", |
|
"middle": [], |
|
"last": "Altman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sydney Von Arx", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeannette", |
|
"middle": [], |
|
"last": "Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bohg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brunskill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2108.07258" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rishi Bommasani, Drew A Hudson, Ehsan Adeli, Russ Altman, Simran Arora, Sydney von Arx, Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. 2021. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Unified scaling laws for routed language models", |
|
"authors": [ |
|
{ |
|
"first": "Aidan", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "De Las Casas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aurelia", |
|
"middle": [], |
|
"last": "Guy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arthur", |
|
"middle": [], |
|
"last": "Mensch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michela", |
|
"middle": [], |
|
"last": "Paganini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Hoffmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bogdan", |
|
"middle": [], |
|
"last": "Damoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Blake", |
|
"middle": [], |
|
"last": "Hechtman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Borgeaud", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2202.01169" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aidan Clark, Diego de las Casas, Aurelia Guy, Arthur Mensch, Michela Paganini, Jordan Hoffmann, Bog- dan Damoc, Blake Hechtman, Trevor Cai, Se- bastian Borgeaud, et al. 2022. Unified scaling laws for routed language models. arXiv preprint arXiv:2202.01169.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity", |
|
"authors": [ |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Fedus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barret", |
|
"middle": [], |
|
"last": "Zoph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.03961" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "William Fedus, Barret Zoph, and Noam Shazeer. 2021. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. arXiv preprint arXiv:2101.03961.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "The Pile: An 800gb dataset of diverse text for language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Leo", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stella", |
|
"middle": [], |
|
"last": "Biderman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sid", |
|
"middle": [], |
|
"last": "Black", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laurence", |
|
"middle": [], |
|
"last": "Golding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Travis", |
|
"middle": [], |
|
"last": "Hoppe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Charles", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Phang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Horace", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anish", |
|
"middle": [], |
|
"last": "Thite", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noa", |
|
"middle": [], |
|
"last": "Nabeshima", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shawn", |
|
"middle": [], |
|
"last": "Presser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Connor", |
|
"middle": [], |
|
"last": "Leahy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.00027" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leo Gao, Stella Biderman, Sid Black, Laurence Gold- ing, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, Shawn Presser, and Connor Leahy. 2020. The Pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Chasing carbon: The elusive environmental footprint of computing", |
|
"authors": [ |
|
{ |
|
"first": "Udit", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Young", |
|
"middle": [ |
|
"Geun" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvia", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Tse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Hsien-Hsin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gu-Yeon", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carole-Jean", |
|
"middle": [], |
|
"last": "Brooks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "2021 IEEE International Symposium on High-Performance Computer Architecture (HPCA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "854--867", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Udit Gupta, Young Geun Kim, Sylvia Lee, Jor- dan Tse, Hsien-Hsin S Lee, Gu-Yeon Wei, David Brooks, and Carole-Jean Wu. 2021. Chasing car- bon: The elusive environmental footprint of com- puting. In 2021 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pages 854-867. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Scaling laws for autoregressive generative modeling", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Henighan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mor", |
|
"middle": [], |
|
"last": "Katz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Hesse", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Jackson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heewoo", |
|
"middle": [], |
|
"last": "Jun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [], |
|
"last": "Tom", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Scott", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gray", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2010.14701" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Henighan, Jared Kaplan, Mor Katz, Mark Chen, Christopher Hesse, Jacob Jackson, Heewoo Jun, Tom B Brown, Prafulla Dhariwal, Scott Gray, et al. 2020. Scaling laws for autoregressive generative modeling. arXiv preprint arXiv:2010.14701.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "What changes can large-scale language models bring? intensive study on hyperclova: Billions-scale korean generative pretrained transformers", |
|
"authors": [ |
|
{ |
|
"first": "Boseop", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hyoungseok", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sang-Woo", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gichang", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyun", |
|
"middle": [], |
|
"last": "Kwak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunghyun", |
|
"middle": [], |
|
"last": "Dong Hyeon Jeon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungju", |
|
"middle": [], |
|
"last": "Park", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seonhoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongpil", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Seo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2109.04650" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Boseop Kim, HyoungSeok Kim, Sang-Woo Lee, Gichang Lee, Donghyun Kwak, Dong Hyeon Jeon, Sunghyun Park, Sungju Kim, Seonhoon Kim, Dong- pil Seo, et al. 2021. What changes can large-scale language models bring? intensive study on hyper- clova: Billions-scale korean generative pretrained transformers. arXiv preprint arXiv:2109.04650.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Quantifying the carbon emissions of machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Lacoste", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandra", |
|
"middle": [], |
|
"last": "Luccioni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Dandres", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1910.09700" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexandre Lacoste, Alexandra Luccioni, Victor Schmidt, and Thomas Dandres. 2019. Quantifying the carbon emissions of machine learning. arXiv preprint arXiv:1910.09700.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Jurassic-1: Technical details and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Opher", |
|
"middle": [], |
|
"last": "Lieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Or", |
|
"middle": [], |
|
"last": "Sharir", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barak", |
|
"middle": [], |
|
"last": "Lenz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Shoham", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Opher Lieber, Or Sharir, Barak Lenz, and Yoav Shoham. 2021. Jurassic-1: Technical details and evaluation. White Paper. AI21 Labs.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Energy usage reports: Environmental awareness as part of algorithmic accountability", |
|
"authors": [ |
|
{ |
|
"first": "Kadan", |
|
"middle": [], |
|
"last": "Lottick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Silvia", |
|
"middle": [], |
|
"last": "Susai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Sorelle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Friedler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Workshop on Tackling Climate Change with Machine Learning at NeurIPS", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kadan Lottick, Silvia Susai, Sorelle A. Friedler, and Jonathan P. Wilson. 2019. Energy usage reports: Environmental awareness as part of algorithmic ac- countability. Workshop on Tackling Climate Change with Machine Learning at NeurIPS 2019.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Global warming of 1.5 c. An IPCC Special Report on the impacts of global warming of", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pidcock", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "R Pidcock, et al. 2018. Global warming of 1.5 c. An IPCC Special Report on the impacts of global warm- ing of, 1(5).", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Efficient large-scale language model training on gpu clusters using megatron-lm", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Narayanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Shoeybi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Casper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Legresley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mostofa", |
|
"middle": [], |
|
"last": "Patwary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijay", |
|
"middle": [], |
|
"last": "Korthikanti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dmitri", |
|
"middle": [], |
|
"last": "Vainbrand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prethvi", |
|
"middle": [], |
|
"last": "Kashinkunti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Bernauer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bryan", |
|
"middle": [], |
|
"last": "Catanzaro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--15", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deepak Narayanan, Mohammad Shoeybi, Jared Casper, Patrick LeGresley, Mostofa Patwary, Vijay Korthikanti, Dmitri Vainbrand, Prethvi Kashinkunti, Julie Bernauer, Bryan Catanzaro, et al. 2021. Efficient large-scale language model training on gpu clusters using megatron-lm. In Proceedings of the International Conference for High Perfor- mance Computing, Networking, Storage and Anal- ysis, pages 1-15.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "The Carbon Footprint of Machine Learning Training Will Plateau", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Patterson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Gonzalez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urs", |
|
"middle": [], |
|
"last": "H\u00f6lzle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung", |
|
"middle": [], |
|
"last": "Quoc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lluis-Miquel", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Munguia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Rothchild", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maud", |
|
"middle": [], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Texier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.36227/techrxiv.19139645.v2" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Patterson, Joseph Gonzalez, Urs H\u00f6lzle, Quoc Hung Le, Chen Liang, Lluis-Miquel Munguia, Daniel Rothchild, David So, Maud Texier, and Jef- frey Dean. 2022. The Carbon Footprint of Machine Learning Training Will Plateau, Then Shrink.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Carbon emissions and large neural network training", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Patterson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joseph", |
|
"middle": [], |
|
"last": "Gonzalez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lluis-Miquel", |
|
"middle": [], |
|
"last": "Munguia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Rothchild", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maud", |
|
"middle": [], |
|
"last": "Texier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeff", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Patterson, Joseph Gonzalez, Quoc Le, Chen Liang, Lluis-Miquel Munguia, Daniel Rothchild, David So, Maud Texier, and Jeff Dean. 2021. Car- bon emissions and large neural network training.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Gpt-3 powers the next generation of apps", |
|
"authors": [ |
|
{ |
|
"first": "Ashley", |
|
"middle": [], |
|
"last": "Pilipiszyn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashley Pilipiszyn. 2021. Gpt-3 powers the next gener- ation of apps.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "The carbon footprint of distributed cloud storage", |
|
"authors": [ |
|
{ |
|
"first": "Lorenzo", |
|
"middle": [], |
|
"last": "Posani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessio", |
|
"middle": [], |
|
"last": "Paccoia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Moschettini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lorenzo Posani, Alessio Paccoia, and Marco Moschet- tini. 2019. The carbon footprint of distributed cloud storage.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Scaling language models: Methods, analysis & insights from training gopher", |
|
"authors": [ |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Jack W Rae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Borgeaud", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katie", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jordan", |
|
"middle": [], |
|
"last": "Millican", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francis", |
|
"middle": [], |
|
"last": "Hoffmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Aslanides", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roman", |
|
"middle": [], |
|
"last": "Henderson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susannah", |
|
"middle": [], |
|
"last": "Ring", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Young", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2112.11446" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susan- nah Young, et al. 2021. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Survey of machine learning accelerators", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Reuther", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Michaleas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijay", |
|
"middle": [], |
|
"last": "Gadepally", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddharth", |
|
"middle": [], |
|
"last": "Samsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Kepner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE high performance extreme computing conference (HPEC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Reuther, Peter Michaleas, Michael Jones, Vi- jay Gadepally, Siddharth Samsi, and Jeremy Kep- ner. 2020. Survey of machine learning accelerators. In 2020 IEEE high performance extreme computing conference (HPEC), pages 1-12. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Ai accelerator survey and trends", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Reuther", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Michaleas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijay", |
|
"middle": [], |
|
"last": "Gadepally", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siddharth", |
|
"middle": [], |
|
"last": "Samsi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Kepner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "2021 IEEE High Performance Extreme Computing Conference (HPEC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Reuther, Peter Michaleas, Michael Jones, Vi- jay Gadepally, Siddharth Samsi, and Jeremy Kepner. 2021. Ai accelerator survey and trends. In 2021 IEEE High Performance Extreme Computing Con- ference (HPEC), pages 1-9. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Sorelle Friedler, and Sasha Luccioni. 2021. CodeCarbon: Estimate and Track Carbon Emissions from Machine Learning Computing", |
|
"authors": [ |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kamal", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aditya", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Boris", |
|
"middle": [], |
|
"last": "Feld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liam", |
|
"middle": [], |
|
"last": "Conell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolas", |
|
"middle": [], |
|
"last": "Laskaris", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Blank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Wilson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.5281/zenodo.4658424" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Victor Schmidt, Kamal Goyal, Aditya Joshi, Boris Feld, Liam Conell, Nikolas Laskaris, Doug Blank, Jonathan Wilson, Sorelle Friedler, and Sasha Luc- cioni. 2021. CodeCarbon: Estimate and Track Car- bon Emissions from Machine Learning Computing.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Tamay Besiroglu, Marius Hobbhahn, and Pablo Villalobos. 2022. Compute trends across three eras of machine learning", |
|
"authors": [ |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Sevilla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lennart", |
|
"middle": [], |
|
"last": "Heim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anson", |
|
"middle": [], |
|
"last": "Ho", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2202.05924" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jaime Sevilla, Lennart Heim, Anson Ho, Tamay Be- siroglu, Marius Hobbhahn, and Pablo Villalobos. 2022. Compute trends across three eras of machine learning. arXiv preprint arXiv:2202.05924.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Conference demographics and footprint changed by virtual platforms", |
|
"authors": [ |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Skiles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Euijin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Orad", |
|
"middle": [], |
|
"last": "Reshef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diego", |
|
"middle": [ |
|
"Robalino" |
|
], |
|
"last": "Mu\u00f1oz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Diana", |
|
"middle": [], |
|
"last": "Cintron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mary", |
|
"middle": [ |
|
"Laura" |
|
], |
|
"last": "Lind", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patricia", |
|
"middle": [ |
|
"Perez" |
|
], |
|
"last": "Calleja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robert", |
|
"middle": [], |
|
"last": "Nerenberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrea", |
|
"middle": [], |
|
"last": "Armani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Nature Sustainability", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--8", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matthew Skiles, Euijin Yang, Orad Reshef, Diego Robalino Mu\u00f1oz, Diana Cintron, Mary Laura Lind, Alexander Rush, Patricia Perez Calleja, Robert Nerenberg, Andrea Armani, et al. 2021. Conference demographics and footprint changed by virtual platforms. Nature Sustainability, pages 1-8.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model", |
|
"authors": [ |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mostofa", |
|
"middle": [], |
|
"last": "Patwary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brandon", |
|
"middle": [], |
|
"last": "Norick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Legresley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samyam", |
|
"middle": [], |
|
"last": "Rajbhandari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [], |
|
"last": "Casper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shrimai", |
|
"middle": [], |
|
"last": "Prabhumoye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "George", |
|
"middle": [], |
|
"last": "Zerveas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vijay", |
|
"middle": [], |
|
"last": "Korthikanti", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2201.11990" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, et al. 2022. Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. arXiv preprint arXiv:2201.11990.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Energy and policy considerations for deep learning in nlp", |
|
"authors": [ |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Strubell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananya", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Emma Strubell, Ananya Ganesh, and Andrew McCal- lum. 2019. Energy and policy considerations for deep learning in nlp.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Artificial intelligence and the climate emergency: Opportunities, challenges, and recommendations", |
|
"authors": [ |
|
{ |
|
"first": "Mariarosaria", |
|
"middle": [], |
|
"last": "Taddeo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Tsamados", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Josh", |
|
"middle": [], |
|
"last": "Cowls", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luciano", |
|
"middle": [], |
|
"last": "Floridi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "One Earth", |
|
"volume": "4", |
|
"issue": "", |
|
"pages": "776--779", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.oneear.2021.05.018" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mariarosaria Taddeo, Andreas Tsamados, Josh Cowls, and Luciano Floridi. 2021. Artificial intelligence and the climate emergency: Opportunities, chal- lenges, and recommendations. One Earth, 4:776- 779.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "The computational limits of deep learning", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Neil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristjan", |
|
"middle": [], |
|
"last": "Thompson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Keeheon", |
|
"middle": [], |
|
"last": "Greenewald", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manso", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Neil C. Thompson, Kristjan Greenewald, Keeheon Lee, and Gabriel F. Manso. 2020. The computational lim- its of deep learning.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Fanny Cachat van der Haert, Frank Mugisha, et al. 2020. Ai for social good: unlocking the opportunity for positive impact", |
|
"authors": [ |
|
{ |
|
"first": "Nenad", |
|
"middle": [], |
|
"last": "Toma\u0161ev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Cornebise", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Hutter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shakir", |
|
"middle": [], |
|
"last": "Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Angela", |
|
"middle": [], |
|
"last": "Picciariello", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Nature Communications", |
|
"volume": "11", |
|
"issue": "1", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nenad Toma\u0161ev, Julien Cornebise, Frank Hutter, Shakir Mohamed, Angela Picciariello, Bec Con- nelly, Danielle Belgrave, Daphne Ezer, Fanny Cachat van der Haert, Frank Mugisha, et al. 2020. Ai for social good: unlocking the opportunity for positive impact. Nature Communications, 11(1):1- 6.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Training data-efficient image transformers & distillation through attention", |
|
"authors": [ |
|
{ |
|
"first": "Hugo", |
|
"middle": [], |
|
"last": "Touvron", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Cord", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthijs", |
|
"middle": [], |
|
"last": "Douze", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Sablayrolles", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Herv\u00e9", |
|
"middle": [], |
|
"last": "J\u00e9gou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10347--10357", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hugo Touvron, Matthieu Cord, Matthijs Douze, Fran- cisco Massa, Alexandre Sablayrolles, and Herv\u00e9 J\u00e9gou. 2021. Training data-efficient image trans- formers & distillation through attention. In Inter- national Conference on Machine Learning, pages 10347-10357. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Attention is all you need. Advances in neural information processing systems", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information process- ing systems, 30.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model", |
|
"authors": [ |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aran", |
|
"middle": [], |
|
"last": "Komatsuzaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ben Wang and Aran Komatsuzaki. 2021. GPT-J- 6B: A 6 Billion Parameter Autoregressive Language Model.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Ccnet: Extracting high quality monolingual datasets from web crawl data", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Wenzek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marie-Anne", |
|
"middle": [], |
|
"last": "Lachaux", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexis", |
|
"middle": [], |
|
"last": "Conneau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vishrav", |
|
"middle": [], |
|
"last": "Chaudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Guzm\u00e1n", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armand", |
|
"middle": [], |
|
"last": "Joulin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edouard", |
|
"middle": [], |
|
"last": "Grave", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Wenzek, Marie-Anne Lachaux, Alexis Con- neau, Vishrav Chaudhary, Francisco Guzm\u00e1n, Ar- mand Joulin, and Edouard Grave. 2019. Ccnet: Ex- tracting high quality monolingual datasets from web crawl data.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Quantization networks", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xu", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xinmei", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Houqiang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianqiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiansheng", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Yang, Xu Shen, Jun Xing, Xinmei Tian, Houqiang Li, Bing Deng, Jianqiang Huang, and Xi- ansheng Hua. 2019. Quantization networks.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Figure 1: Over the last four years, the size of stateof-the-art language models has doubled every 3-4 months. Note that this trend has been slowing down, due to scale-out limitations.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "Breakdown of the carbon footprint (total 36.5t tC02e) of the Noor project. This breakdown is highly dependent on the localisation of the workloads and the local carbon intensity of the electricity mix.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF0": { |
|
"text": "Training compute budget and energy used for training the Noor models. Assuming a pretraining dataset of 150B tokens and a throughput of 100 TFLOPs per A100.", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"3\">Model Budget [PF-days] Budget [A100-hours]</td><td>HPC</td><td>Consumption [MWh]</td></tr><tr><td>1.3B</td><td>13.5</td><td>3300</td><td>MeluXina</td><td>2.1</td></tr><tr><td>2.7B</td><td>28.1</td><td>6800</td><td>Noor-HPC</td><td>4.8</td></tr><tr><td>6.7B</td><td>69.8</td><td>17000</td><td>Noor-HPC</td><td>11.8</td></tr><tr><td>13B</td><td>135</td><td>33000</td><td>Noor-HPC</td><td>22.9</td></tr><tr><td colspan=\"3\">reference language model trained on Wikipedia.</td><td/><td/></tr><tr><td colspan=\"3\">Processing with our pipeline occurs on a CPU clus-</td><td/><td/></tr><tr><td colspan=\"2\">ter with 768 cores, split over 16 nodes.</td><td/><td/><td/></tr><tr><td colspan=\"3\">Using average high-performance CPUs TDP fig-</td><td/><td/></tr></table>", |
|
"type_str": "table" |
|
}, |
|
"TABREF1": { |
|
"text": "Carbon footprint of each phase of the Noor project. Use MWh Footprint [tCO2e]", |
|
"num": null, |
|
"html": null, |
|
"content": "<table><tr><td colspan=\"3\">Phase Mix [gCO2e/kWh] Storage Amazon S3 Provider Location Bahrain 1188 GCP Netherlands 410 R&D MeluXina Luxembourg 60</td><td>3.5 2.35 10.7</td><td>4.2 0.96 0.65</td></tr><tr><td colspan=\"2\">Training MeluXina Luxembourg Noor-HPC UAE</td><td>60 600</td><td>2.1 39.5</td><td>0.13 23.7</td></tr><tr><td>Others</td><td>France UAE</td><td>56 600</td><td>0.33 0.66</td><td>0.02 0.4</td></tr></table>", |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |