paleDriver7 commited on
Commit
76502d8
·
verified ·
1 Parent(s): f43a2d1

Delete backend/llama3

Browse files
backend/llama3/.github/ISSUE_TEMPLATE/bug_report.md DELETED
@@ -1,34 +0,0 @@
1
- ---
2
- name: Bug report
3
- about: Create a report to help us reproduce and fix the issue
4
- ---
5
-
6
- **Before submitting a bug, please make sure the issue hasn't been already addressed by searching through the [FAQs](https://ai.meta.com/llama/faq/) and [existing/past issues](https://github.com/facebookresearch/llama/issues)**
7
-
8
- ## Describe the bug
9
- <Please provide a clear and concise description of what the bug is. If relevant, please include a _minimal_ (least lines of code necessary) _reproducible_ (running this will give us the same result as you get) code snippet. Make sure to include the relevant imports.>
10
-
11
- ### Minimal reproducible example
12
- <Remember to wrap the code in ```` ```triple-quotes blocks``` ````>
13
-
14
- ```python
15
- # sample code to repro the bug
16
- ```
17
-
18
- ### Output
19
- <Remember to wrap the output in ```` ```triple-quotes blocks``` ````>
20
-
21
- ```
22
- <paste stacktrace and other outputs here>
23
- ```
24
-
25
- ## Runtime Environment
26
- - Model: [eg: `meta-llama-3-8b-instruct`]
27
- - Using via huggingface?: [yes/no]
28
- - OS: [eg. Linux/Ubuntu, Windows]
29
- - GPU VRAM:
30
- - Number of GPUs:
31
- - GPU Make: [eg: Nvidia, AMD, Intel]
32
-
33
- **Additional context**
34
- Add any other context about the problem or environment here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/.gitignore DELETED
@@ -1,160 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # pdm
105
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
- #pdm.lock
107
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
- # in version control.
109
- # https://pdm.fming.dev/#use-with-ide
110
- .pdm.toml
111
-
112
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
- __pypackages__/
114
-
115
- # Celery stuff
116
- celerybeat-schedule
117
- celerybeat.pid
118
-
119
- # SageMath parsed files
120
- *.sage.py
121
-
122
- # Environments
123
- .env
124
- .venv
125
- env/
126
- venv/
127
- ENV/
128
- env.bak/
129
- venv.bak/
130
-
131
- # Spyder project settings
132
- .spyderproject
133
- .spyproject
134
-
135
- # Rope project settings
136
- .ropeproject
137
-
138
- # mkdocs documentation
139
- /site
140
-
141
- # mypy
142
- .mypy_cache/
143
- .dmypy.json
144
- dmypy.json
145
-
146
- # Pyre type checker
147
- .pyre/
148
-
149
- # pytype static type analyzer
150
- .pytype/
151
-
152
- # Cython debug symbols
153
- cython_debug/
154
-
155
- # PyCharm
156
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
- # and can be added to the global gitignore or merged into this file. For a more nuclear
159
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
- #.idea/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/CODE_OF_CONDUCT.md DELETED
@@ -1,80 +0,0 @@
1
- # Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- In the interest of fostering an open and welcoming environment, we as
6
- contributors and maintainers pledge to make participation in our project and
7
- our community a harassment-free experience for everyone, regardless of age, body
8
- size, disability, ethnicity, sex characteristics, gender identity and expression,
9
- level of experience, education, socio-economic status, nationality, personal
10
- appearance, race, religion, or sexual identity and orientation.
11
-
12
- ## Our Standards
13
-
14
- Examples of behavior that contributes to creating a positive environment
15
- include:
16
-
17
- * Using welcoming and inclusive language
18
- * Being respectful of differing viewpoints and experiences
19
- * Gracefully accepting constructive criticism
20
- * Focusing on what is best for the community
21
- * Showing empathy towards other community members
22
-
23
- Examples of unacceptable behavior by participants include:
24
-
25
- * The use of sexualized language or imagery and unwelcome sexual attention or
26
- advances
27
- * Trolling, insulting/derogatory comments, and personal or political attacks
28
- * Public or private harassment
29
- * Publishing others' private information, such as a physical or electronic
30
- address, without explicit permission
31
- * Other conduct which could reasonably be considered inappropriate in a
32
- professional setting
33
-
34
- ## Our Responsibilities
35
-
36
- Project maintainers are responsible for clarifying the standards of acceptable
37
- behavior and are expected to take appropriate and fair corrective action in
38
- response to any instances of unacceptable behavior.
39
-
40
- Project maintainers have the right and responsibility to remove, edit, or
41
- reject comments, commits, code, wiki edits, issues, and other contributions
42
- that are not aligned to this Code of Conduct, or to ban temporarily or
43
- permanently any contributor for other behaviors that they deem inappropriate,
44
- threatening, offensive, or harmful.
45
-
46
- ## Scope
47
-
48
- This Code of Conduct applies within all project spaces, and it also applies when
49
- an individual is representing the project or its community in public spaces.
50
- Examples of representing a project or community include using an official
51
- project e-mail address, posting via an official social media account, or acting
52
- as an appointed representative at an online or offline event. Representation of
53
- a project may be further defined and clarified by project maintainers.
54
-
55
- This Code of Conduct also applies outside the project spaces when there is a
56
- reasonable belief that an individual's behavior may have a negative impact on
57
- the project or its community.
58
-
59
- ## Enforcement
60
-
61
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
- reported by contacting the project team at <[email protected]>. All
63
- complaints will be reviewed and investigated and will result in a response that
64
- is deemed necessary and appropriate to the circumstances. The project team is
65
- obligated to maintain confidentiality with regard to the reporter of an incident.
66
- Further details of specific enforcement policies may be posted separately.
67
-
68
- Project maintainers who do not follow or enforce the Code of Conduct in good
69
- faith may face temporary or permanent repercussions as determined by other
70
- members of the project's leadership.
71
-
72
- ## Attribution
73
-
74
- This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
- available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
-
77
- [homepage]: https://www.contributor-covenant.org
78
-
79
- For answers to common questions about this code of conduct, see
80
- https://www.contributor-covenant.org/faq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/CONTRIBUTING.md DELETED
@@ -1,39 +0,0 @@
1
- # Contributing to Llama 3
2
- We want to make contributing to this project as easy and transparent as
3
- possible.
4
-
5
- ## Our Development Process
6
- ... (in particular how this is synced with internal changes to the project)
7
-
8
- ## Pull Requests
9
- We actively welcome your pull requests.
10
-
11
- 1. Fork the repo and create your branch from `main`.
12
- 2. If you've added code that should be tested, add tests.
13
- 3. If you've changed APIs, update the documentation.
14
- 4. Ensure the test suite passes.
15
- 5. Make sure your code lints.
16
- 6. If you haven't already, complete the Contributor License Agreement ("CLA").
17
-
18
- ## Contributor License Agreement ("CLA")
19
- In order to accept your pull request, we need you to submit a CLA. You only need
20
- to do this once to work on any of Meta's open source projects.
21
-
22
- Complete your CLA here: <https://code.facebook.com/cla>
23
-
24
- ## Issues
25
- We use GitHub issues to track public bugs. Please ensure your description is
26
- clear and has sufficient instructions to be able to reproduce the issue.
27
-
28
- Meta has a [bounty program](http://facebook.com/whitehat/info) for the safe
29
- disclosure of security bugs. In those cases, please go through the process
30
- outlined on that page and do not file a public issue.
31
-
32
- ## Coding Style
33
- * 2 spaces for indentation rather than tabs
34
- * 80 character line length
35
- * ...
36
-
37
- ## License
38
- By contributing to Llama 3, you agree that your contributions will be licensed
39
- under the LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/LICENSE DELETED
@@ -1,84 +0,0 @@
1
- META LLAMA 3 COMMUNITY LICENSE AGREEMENT
2
-
3
- Meta Llama 3 Version Release Date: April 18, 2024
4
- “Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.
5
-
6
- “Documentation” means the specifications, manuals and documentation accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.
7
-
8
- “Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
9
-
10
- “Meta Llama 3” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.
11
-
12
- “Llama Materials” means, collectively, Meta’s proprietary Meta Llama 3 and Documentation (and any portion thereof) made available under this Agreement.
13
-
14
- “Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
15
-
16
- By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement.
17
-
18
- 1. License Rights and Redistribution.
19
-
20
- a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.
21
- b. Redistribution and Use.
22
- i. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display “Built with Meta Llama 3” on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include “Llama 3” at the beginning of any such AI model name.
23
- ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.
24
- iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Meta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.”
25
- iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference into this Agreement.
26
- v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Meta Llama 3 or derivative works thereof).
27
-
28
- 2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee’s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
29
-
30
- 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
31
-
32
- 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
33
-
34
- 5. Intellectual Property.
35
- a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use “Llama 3” (the “Mark”) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta’s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use of the Mark will inure to the benefit of Meta.
36
- b. Subject to Meta’s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
37
- c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.
38
-
39
- 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
40
-
41
- 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
42
-
43
-
44
- Meta Llama 3 Acceptable Use Policy
45
- Meta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at https://llama.meta.com/llama3/use-policy
46
- Prohibited Uses
47
- We want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Meta Llama 3 to:
48
- 1. Violate the law or others’ rights, including to:
49
- a. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
50
- i. Violence or terrorism
51
- ii. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
52
- iii. Human trafficking, exploitation, and sexual violence
53
- iv. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
54
- v. Sexual solicitation
55
- vi. Any other criminal activity
56
- b. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
57
- c. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
58
- d. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
59
- e. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
60
- f. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials
61
- g. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
62
-
63
- 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following:
64
- a. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
65
- b. Guns and illegal weapons (including weapon development)
66
- c. Illegal drugs and regulated/controlled substances
67
- d. Operation of critical infrastructure, transportation technologies, or heavy machinery
68
- e. Self-harm or harm to others, including suicide, cutting, and eating disorders
69
- f. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
70
-
71
- 3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following:
72
- a. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
73
- b. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
74
- c. Generating, promoting, or further distributing spam
75
- d. Impersonating another individual without consent, authorization, or legal right
76
- e. Representing that the use of Meta Llama 3 or outputs are human-generated
77
- f. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
78
- g. Fail to appropriately disclose to end users any known dangers of your AI system
79
-
80
- Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
81
- * Reporting issues with the model: https://github.com/meta-llama/llama3
82
- * Reporting risky content generated by the model: developers.facebook.com/llama_output_feedback
83
- * Reporting bugs and security concerns: facebook.com/whitehat/info
84
- * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: [email protected]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/Llama3_Repo.jpeg DELETED
Binary file (456 kB)
 
backend/llama3/MODEL_CARD.md DELETED
@@ -1,512 +0,0 @@
1
- ## Model Details
2
-
3
- Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety.
4
-
5
- **Model developers** Meta
6
-
7
- **Llama 3 family of models** Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants.
8
-
9
- **Input** Models input text only.
10
-
11
- **Output** Models generate text and code only.
12
-
13
- **Model Architecture** Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. Llama 3 uses a tokenizer with a vocabulary of 128K tokens, and was trained on on sequences of 8,192 tokens. Grouped-Query Attention (GQA) is used for all models to improve inference efficiency. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
14
-
15
-
16
- <table>
17
- <tr>
18
- <td>
19
- </td>
20
- <td><strong>Training Data</strong>
21
- </td>
22
- <td><strong>Params</strong>
23
- </td>
24
- <td><strong>Context length</strong>
25
- </td>
26
- <td><strong>GQA</strong>
27
- </td>
28
- <td><strong>Token count</strong>
29
- </td>
30
- <td><strong>Knowledge cutoff</strong>
31
- </td>
32
- </tr>
33
- <tr>
34
- <td rowspan="2" >Llama 3
35
- </td>
36
- <td rowspan="2" >A new mix of publicly available online data.
37
- </td>
38
- <td>8B
39
- </td>
40
- <td>8k
41
- </td>
42
- <td>Yes
43
- </td>
44
- <td rowspan="2" >15T+
45
- </td>
46
- <td>March, 2023
47
- </td>
48
- </tr>
49
- <tr>
50
- <td>70B
51
- </td>
52
- <td>8k
53
- </td>
54
- <td>Yes
55
- </td>
56
- <td>December, 2023
57
- </td>
58
- </tr>
59
- </table>
60
-
61
-
62
- Note: Token counts refer to pretraining data only.
63
-
64
- **Model Release Date** April 18, 2024.
65
-
66
- **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
67
-
68
- **License** A custom commercial license is available at: [https://llama.meta.com/llama3/license](https://llama.meta.com/llama3/license)
69
-
70
- Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
71
-
72
-
73
- ## Intended Use
74
-
75
- **Intended Use Cases** Llama 3 is intended for commercial and research use in English. Instruction tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
76
-
77
- **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the [Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/) and [Llama 3 Community License](https://llama.meta.com/llama3/license/). Use in languages other than English**.
78
-
79
- **Note: Developers may fine-tune Llama 3 models for languages beyond English provided they comply with the [Llama 3 Community License](https://llama.meta.com/llama3/license/) and the [Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/).
80
-
81
-
82
- ## Hardware and Software
83
-
84
- **Training Factors** We used custom training libraries, Meta's Research SuperCluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
85
-
86
- **Carbon Footprint Pretraining utilized a cumulative** 7.7M GPU hours of computation on hardware of type H100-80GB (TDP of 700W). Estimated total emissions were 2290 tCO2eq, 100% of which were offset by Meta’s sustainability program.
87
-
88
-
89
- <table>
90
- <tr>
91
- <td>
92
- </td>
93
- <td><strong>Time (GPU hours)</strong>
94
- </td>
95
- <td><strong>Power Consumption (W)</strong>
96
- </td>
97
- <td><strong>Carbon Emitted(tCO2eq)</strong>
98
- </td>
99
- </tr>
100
- <tr>
101
- <td>Llama 3 8B
102
- </td>
103
- <td>1.3M
104
- </td>
105
- <td>700
106
- </td>
107
- <td>390
108
- </td>
109
- </tr>
110
- <tr>
111
- <td>Llama 3 70B
112
- </td>
113
- <td>6.4M
114
- </td>
115
- <td>700
116
- </td>
117
- <td>1900
118
- </td>
119
- </tr>
120
- <tr>
121
- <td>Total
122
- </td>
123
- <td>7.7M
124
- </td>
125
- <td>
126
- </td>
127
- <td>2290
128
- </td>
129
- </tr>
130
- </table>
131
-
132
-
133
-
134
- **CO2 emissions during pre-training**. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
135
-
136
-
137
- ## Training Data
138
-
139
- **Overview** Llama 3 was pretrained on over 15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 10M human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
140
-
141
- **Data Freshness** The pretraining data has a cutoff of March 2023 for the 8B and December 2023 for the 70B models respectively.
142
-
143
-
144
- ## Benchmarks
145
-
146
- In this section, we report the results for Llama 3 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library. For details on the methodology see [here](https://github.com/meta-llama/llama3/blob/main/eval_details.md).
147
-
148
-
149
- ### Base pretrained models
150
-
151
-
152
- <table>
153
- <tr>
154
- <td><strong>Category</strong>
155
- </td>
156
- <td><strong>Benchmark</strong>
157
- </td>
158
- <td><strong>Llama 3 8B</strong>
159
- </td>
160
- <td><strong>Llama2 7B</strong>
161
- </td>
162
- <td><strong>Llama2 13B</strong>
163
- </td>
164
- <td><strong>Llama 3 70B</strong>
165
- </td>
166
- <td><strong>Llama2 70B</strong>
167
- </td>
168
- </tr>
169
- <tr>
170
- <td rowspan="6" >General
171
- </td>
172
- <td>MMLU (5-shot)
173
- </td>
174
- <td>66.6
175
- </td>
176
- <td>45.7
177
- </td>
178
- <td>53.8
179
- </td>
180
- <td>79.5
181
- </td>
182
- <td>69.7
183
- </td>
184
- </tr>
185
- <tr>
186
- <td>AGIEval English (3-5 shot)
187
- </td>
188
- <td>45.9
189
- </td>
190
- <td>28.8
191
- </td>
192
- <td>38.7
193
- </td>
194
- <td>63.0
195
- </td>
196
- <td>54.8
197
- </td>
198
- </tr>
199
- <tr>
200
- <td>CommonSenseQA (7-shot)
201
- </td>
202
- <td>72.6
203
- </td>
204
- <td>57.6
205
- </td>
206
- <td>67.6
207
- </td>
208
- <td>83.8
209
- </td>
210
- <td>78.7
211
- </td>
212
- </tr>
213
- <tr>
214
- <td>Winogrande (5-shot)
215
- </td>
216
- <td>76.1
217
- </td>
218
- <td>73.3
219
- </td>
220
- <td>75.4
221
- </td>
222
- <td>83.1
223
- </td>
224
- <td>81.8
225
- </td>
226
- </tr>
227
- <tr>
228
- <td>BIG-Bench Hard (3-shot, CoT)
229
- </td>
230
- <td>61.1
231
- </td>
232
- <td>38.1
233
- </td>
234
- <td>47.0
235
- </td>
236
- <td>81.3
237
- </td>
238
- <td>65.7
239
- </td>
240
- </tr>
241
- <tr>
242
- <td>ARC-Challenge (25-shot)
243
- </td>
244
- <td>78.6
245
- </td>
246
- <td>53.7
247
- </td>
248
- <td>67.6
249
- </td>
250
- <td>93.0
251
- </td>
252
- <td>85.3
253
- </td>
254
- </tr>
255
- <tr>
256
- <td>Knowledge reasoning
257
- </td>
258
- <td>TriviaQA-Wiki (5-shot)
259
- </td>
260
- <td>78.5
261
- </td>
262
- <td>72.1
263
- </td>
264
- <td>79.6
265
- </td>
266
- <td>89.7
267
- </td>
268
- <td>87.5
269
- </td>
270
- </tr>
271
- <tr>
272
- <td rowspan="4" >Reading comprehension
273
- </td>
274
- <td>SQuAD (1-shot)
275
- </td>
276
- <td>76.4
277
- </td>
278
- <td>72.2
279
- </td>
280
- <td>72.1
281
- </td>
282
- <td>85.6
283
- </td>
284
- <td>82.6
285
- </td>
286
- </tr>
287
- <tr>
288
- <td>QuAC (1-shot, F1)
289
- </td>
290
- <td>44.4
291
- </td>
292
- <td>39.6
293
- </td>
294
- <td>44.9
295
- </td>
296
- <td>51.1
297
- </td>
298
- <td>49.4
299
- </td>
300
- </tr>
301
- <tr>
302
- <td>BoolQ (0-shot)
303
- </td>
304
- <td>75.7
305
- </td>
306
- <td>65.5
307
- </td>
308
- <td>66.9
309
- </td>
310
- <td>79.0
311
- </td>
312
- <td>73.1
313
- </td>
314
- </tr>
315
- <tr>
316
- <td>DROP (3-shot, F1)
317
- </td>
318
- <td>58.4
319
- </td>
320
- <td>37.9
321
- </td>
322
- <td>49.8
323
- </td>
324
- <td>79.7
325
- </td>
326
- <td>70.2
327
- </td>
328
- </tr>
329
- </table>
330
-
331
-
332
-
333
- ### Instruction tuned models
334
-
335
-
336
- <table>
337
- <tr>
338
- <td><strong>Benchmark</strong>
339
- </td>
340
- <td><strong>Llama 3 8B</strong>
341
- </td>
342
- <td><strong>Llama 2 7B</strong>
343
- </td>
344
- <td><strong>Llama 2 13B</strong>
345
- </td>
346
- <td><strong>Llama 3 70B</strong>
347
- </td>
348
- <td><strong>Llama 2 70B</strong>
349
- </td>
350
- </tr>
351
- <tr>
352
- <td>MMLU (5-shot)
353
- </td>
354
- <td>68.4
355
- </td>
356
- <td>34.1
357
- </td>
358
- <td>47.8
359
- </td>
360
- <td>82.0
361
- </td>
362
- <td>52.9
363
- </td>
364
- </tr>
365
- <tr>
366
- <td>GPQA (0-shot)
367
- </td>
368
- <td>34.2
369
- </td>
370
- <td>21.7
371
- </td>
372
- <td>22.3
373
- </td>
374
- <td>39.5
375
- </td>
376
- <td>21.0
377
- </td>
378
- </tr>
379
- <tr>
380
- <td>HumanEval (0-shot)
381
- </td>
382
- <td>62.2
383
- </td>
384
- <td>7.9
385
- </td>
386
- <td>14.0
387
- </td>
388
- <td>81.7
389
- </td>
390
- <td>25.6
391
- </td>
392
- </tr>
393
- <tr>
394
- <td>GSM-8K (8-shot, CoT)
395
- </td>
396
- <td>79.6
397
- </td>
398
- <td>25.7
399
- </td>
400
- <td>41.2
401
- </td>
402
- <td>93.0
403
- </td>
404
- <td>57.5
405
- </td>
406
- </tr>
407
- <tr>
408
- <td>MATH (4-shot, CoT)
409
- </td>
410
- <td>30.0
411
- </td>
412
- <td>3.8
413
- </td>
414
- <td>6.7
415
- </td>
416
- <td>50.4
417
- </td>
418
- <td>11.6
419
- </td>
420
- </tr>
421
- </table>
422
-
423
-
424
-
425
- ### Responsibility & Safety
426
-
427
- We believe that an open approach to AI leads to better, safer products, faster innovation, and a bigger overall market. We are committed to Responsible AI development and took a series of steps to limit misuse and harm and support the open source community.
428
-
429
- Foundation models are widely capable technologies that are built to be used for a diverse range of applications. They are not designed to meet every developer preference on safety levels for all use cases, out-of-the-box, as those by their nature will differ across different applications.
430
-
431
- Rather, responsible LLM-application deployment is achieved by implementing a series of safety best practices throughout the development of such applications, from the model pre-training, fine-tuning and the deployment of systems composed of safeguards to tailor the safety needs specifically to the use case and audience.
432
-
433
-
434
- As part of the Llama 3 release, we updated our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to outline the steps and best practices for developers to implement model and system level safety for their application. We also provide a set of resources including [Meta Llama Guard 2](https://llama.meta.com/purple-llama/) and [Code Shield](https://llama.meta.com/purple-llama/) safeguards. These tools have proven to drastically reduce residual risks of LLM Systems, while maintaining a high level of helpfulness. We encourage developers to tune and deploy these safeguards according to their needs and we provide a [reference implementation](https://github.com/meta-llama/llama-recipes/tree/main/recipes/responsible_ai) to get you started.
435
-
436
-
437
- #### Llama 3-Instruct
438
-
439
- As outlined in the Responsible Use Guide, some trade-off between model helpfulness and model alignment is likely unavoidable. Developers should exercise discretion about how to weigh the benefits of alignment and helpfulness for their specific use case and audience. Developers should be mindful of residual risks when using Llama models and leverage additional safety tools as needed to reach the right safety bar for their use case.
440
-
441
- <span style="text-decoration:underline;">Safety</span>
442
-
443
- For our instruction tuned model, we conducted extensive red teaming exercises, performed adversarial evaluations and implemented safety mitigations techniques to lower residual risks. As with any Large Language Model, residual risks will likely remain and we recommend that developers assess these risks in the context of their use case. In parallel, we are working with the community to make AI safety benchmark standards transparent, rigorous and interpretable.
444
-
445
- <span style="text-decoration:underline;">Refusals</span>
446
-
447
- In addition to residual risks, we put a great emphasis on model refusals to benign prompts. Over-refusing not only can impact the user experience but could even be harmful in certain contexts as well. We’ve heard the feedback from the developer community and improved our fine tuning to ensure that Llama 3 is significantly less likely to falsely refuse to answer prompts than Llama 2.
448
-
449
- We built internal benchmarks and developed mitigations to limit false refusals making Llama 3 our most helpful model to date.
450
-
451
-
452
- #### Responsible release
453
-
454
- In addition to responsible use considerations outlined above, we followed a rigorous process that requires us to take extra measures against misuse and critical risks before we make our release decision.
455
-
456
- Misuse
457
-
458
- If you access or use Llama 3, you agree to the Acceptable Use Policy. The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy/](https://llama.meta.com/llama3/use-policy/).
459
-
460
-
461
- #### Critical risks
462
-
463
- <span style="text-decoration:underline;">CBRNE</span> (Chemical, Biological, Radiological, Nuclear, and high yield Explosives)
464
-
465
- We have conducted a two fold assessment of the safety of the model in this area:
466
-
467
-
468
-
469
- * Iterative testing during model training to assess the safety of responses related to CBRNE threats and other adversarial risks.
470
- * Involving external CBRNE experts to conduct an uplift test assessing the ability of the model to accurately provide expert knowledge and reduce barriers to potential CBRNE misuse, by reference to what can be achieved using web search (without the model).
471
-
472
-
473
- ### <span style="text-decoration:underline;">Cyber Security </span>
474
-
475
- We have evaluated Llama 3 with CyberSecEval, Meta’s cybersecurity safety eval suite, measuring Llama 3’s propensity to suggest insecure code when used as a coding assistant, and Llama 3’s propensity to comply with requests to help carry out cyber attacks, where attacks are defined by the industry standard MITRE ATT&CK cyber attack ontology. On our insecure coding and cyber attacker helpfulness tests, Llama 3 behaved in the same range or safer than models of [equivalent coding capability](https://huggingface.co/spaces/facebook/CyberSecEval).
476
-
477
-
478
- ### <span style="text-decoration:underline;">Child Safety</span>
479
-
480
- Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences.
481
-
482
-
483
- ### Community
484
-
485
- Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership in AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [GitHub repository](https://github.com/meta-llama/PurpleLlama).
486
-
487
- Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community.
488
-
489
-
490
- ## Ethical Considerations and Limitations
491
-
492
- The core values of Llama 3 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress.
493
-
494
- But Llama 3 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has been in English, and has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3 models, developers should perform safety testing and tuning tailored to their specific applications of the model. As outlined in the Responsible Use Guide, we recommend incorporating [Purple Llama](https://github.com/facebookresearch/PurpleLlama) solutions into your workflows and specifically [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) which provides a base model to filter input and output prompts to layer system-level safety on top of model-level safety.
495
-
496
- Please see the Responsible Use Guide available at [http://llama.meta.com/responsible-use-guide](http://llama.meta.com/responsible-use-guide)
497
-
498
-
499
- ## Citation instructions
500
-
501
- ```
502
- @article{llama3modelcard,
503
- title={Llama 3 Model Card},
504
- author={AI@Meta},
505
- year={2024},
506
- url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md}
507
- }
508
- ```
509
-
510
- ## Contributors
511
-
512
- Aaditya Singh; Aaron Grattafiori; Abhimanyu Dubey; Abhinav Jauhri; Abhinav Pandey; Abhishek Kadian; Adam Kelsey; Adi Gangidi; Ahmad Al-Dahle; Amit Sangani; Ahuva Goldstand; Aiesha Letman; Ajay Menon; Akhil Mathur; Alan Schelten; Alex Vaughan; Amy Yang; Andrei Lupu; Andres Alvarado; Andrew Gallagher; Andrew Gu; Andrew Ho; Andrew Poulton; Andrew Ryan; Angela Fan; Ankit Ramchandani; Anthony Hartshorn; Archi Mitra; Archie Sravankumar; Artem Korenev; Arun Rao; Ashley Gabriel; Ashwin Bharambe; Assaf Eisenman; Aston Zhang; Ash JJhaveri; Aurelien Rodriguez; Austen Gregerson; Ava Spataru; Baptiste Roziere; Ben Maurer; Benjamin Leonhardi; Bernie Huang; Bhargavi Paranjape; Bing Liu; Binh Tang; Bobbie Chern; Brani Stojkovic; Brian Fuller; Catalina Mejia Arenas; Chao Zhou; Charlotte Caucheteux; Chaya Nayak; Ching-Hsiang Chu; Chloe Bi; Chris Cai; Chris Cox; Chris Marra; Chris McConnell; Christian Keller; Christoph Feichtenhofer; Christophe Touret; Chunyang Wu; Corinne Wong; Cristian Canton Ferrer; Damien Allonsius; Daniel Kreymer; Daniel Haziza; Daniel Li; Danielle Pintz; Danny Livshits; Danny Wyatt; David Adkins; David Esiobu; David Xu; Davide Testuggine; Delia David; Devi Parikh; Dhruv Choudhary; Dhruv Mahajan; Diana Liskovich; Diego Garcia-Olano; Diego Perino; Dieuwke Hupkes; Dingkang Wang; Dustin Holland; Egor Lakomkin; Elina Lobanova; Xiaoqing Ellen Tan; Emily Dinan; Eric Smith; Erik Brinkman; Esteban Arcaute; Filip Radenovic; Firat Ozgenel; Francesco Caggioni; Frank Seide; Frank Zhang; Gabriel Synnaeve; Gabriella Schwarz; Gabrielle Lee; Gada Badeer; Georgia Anderson; Graeme Nail; Gregoire Mialon; Guan Pang; Guillem Cucurell; Hailey Nguyen; Hamid Shojanazeri; Hannah Korevaar; Hannah Wang; Haroun Habeeb; Harrison Rudolph; Henry Aspegren; Hu Xu; Hugo Touvron; Iga Kozlowska; Igor Molybog; Igor Tufanov; Iliyan Zarov; Imanol Arrieta Ibarra; Irina-Elena Veliche; Isabel Kloumann; Ishan Misra; Ivan Evtimov; Jade Copet; Jake Weissman; Jan Geffert; Jana Vranes; Japhet Asher; Jason Park; Jay Mahadeokar; Jean-Baptiste Gaya; Jeet Shah; Jelmer van der Linde; Jennifer Chan; Jenny Hong; Jenya Lee; Jeremy Fu; Jeremy Teboul; Jianfeng Chi; Jianyu Huang; Jie Wang; Jiecao Yu; Joanna Bitton; Joe Spisak; Joelle Pineau; Jon Carvill; Jongsoo Park; Joseph Rocca; Joshua Johnstun; Junteng Jia; Kalyan Vasuden Alwala; Kam Hou U; Kate Plawiak; Kartikeya Upasani; Kaushik Veeraraghavan; Ke Li; Kenneth Heafield; Kevin Stone; Khalid El-Arini; Krithika Iyer; Kshitiz Malik; Kuenley Chiu; Kunal Bhalla; Kyle Huang; Lakshya Garg; Lauren Rantala-Yeary; Laurens van der Maaten; Lawrence Chen; Leandro Silva; Lee Bell; Lei Zhang; Liang Tan; Louis Martin; Lovish Madaan; Luca Wehrstedt; Lukas Blecher; Luke de Oliveira; Madeline Muzzi; Madian Khabsa; Manav Avlani; Mannat Singh; Manohar Paluri; Mark Zuckerberg; Marcin Kardas; Martynas Mankus; Mathew Oldham; Mathieu Rita; Matthew Lennie; Maya Pavlova; Meghan Keneally; Melanie Kambadur; Mihir Patel; Mikayel Samvelyan; Mike Clark; Mike Lewis; Min Si; Mitesh Kumar Singh; Mo Metanat; Mona Hassan; Naman Goyal; Narjes Torabi; Nicolas Usunier; Nikolay Bashlykov; Nikolay Bogoychev; Niladri Chatterji; Ning Dong; Oliver Aobo Yang; Olivier Duchenne; Onur Celebi; Parth Parekh; Patrick Alrassy; Paul Saab; Pavan Balaji; Pedro Rittner; Pengchuan Zhang; Pengwei Li; Petar Vasic; Peter Weng; Polina Zvyagina; Prajjwal Bhargava; Pratik Dubal; Praveen Krishnan; Punit Singh Koura; Puxin Xu; Qing He; Rachel Rodriguez; Ragavan Srinivasan; Rahul Mitra; Ramon Calderer; Raymond Li; Robert Stojnic; Roberta Raileanu; Robin Battey; Rocky Wang; Rohit Girdhar; Rohit Patel; Romain Sauvestre; Ronnie Polidoro; Roshan Sumbaly; Ross Taylor; Ruan Silva; Rui Hou; Rui Wang; Russ Howes; Ruty Rinott; Saghar Hosseini; Sai Jayesh Bondu; Samyak Datta; Sanjay Singh; Sara Chugh; Sargun Dhillon; Satadru Pan; Sean Bell; Sergey Edunov; Shaoliang Nie; Sharan Narang; Sharath Raparthy; Shaun Lindsay; Sheng Feng; Sheng Shen; Shenghao Lin; Shiva Shankar; Shruti Bhosale; Shun Zhang; Simon Vandenhende; Sinong Wang; Seohyun Sonia Kim; Soumya Batra; Sten Sootla; Steve Kehoe; Suchin Gururangan; Sumit Gupta; Sunny Virk; Sydney Borodinsky; Tamar Glaser; Tamar Herman; Tamara Best; Tara Fowler; Thomas Georgiou; Thomas Scialom; Tianhe Li; Todor Mihaylov; Tong Xiao; Ujjwal Karn; Vedanuj Goswami; Vibhor Gupta; Vignesh Ramanathan; Viktor Kerkez; Vinay Satish Kumar; Vincent Gonguet; Vish Vogeti; Vlad Poenaru; Vlad Tiberiu Mihailescu; Vladan Petrovic; Vladimir Ivanov; Wei Li; Weiwei Chu; Wenhan Xiong; Wenyin Fu; Wes Bouaziz; Whitney Meers; Will Constable; Xavier Martinet; Xiaojian Wu; Xinbo Gao; Xinfeng Xie; Xuchao Jia; Yaelle Goldschlag; Yann LeCun; Yashesh Gaur; Yasmine Babaei; Ye Qi; Yenda Li; Yi Wen; Yiwen Song; Youngjin Nam; Yuchen Hao; Yuchen Zhang; Yun Wang; Yuning Mao; Yuzi He; Zacharie Delpierre Coudert; Zachary DeVito; Zahra Hankir; Zhaoduo Wen; Zheng Yan; Zhengxing Chen; Zhenyu Yang; Zoe Papakipos
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/README.md DELETED
@@ -1,164 +0,0 @@
1
- <p align="center">
2
- <img src="https://github.com/meta-llama/llama3/blob/main/Llama3_Repo.jpeg" width="400"/>
3
- </p>
4
-
5
- <p align="center">
6
- 🤗 <a href="https://huggingface.co/meta-Llama"> Models on Hugging Face</a>&nbsp | <a href="https://ai.meta.com/blog/"> Blog</a>&nbsp | <a href="https://llama.meta.com/">Website</a>&nbsp | <a href="https://llama.meta.com/get-started/">Get Started</a>&nbsp
7
- <br>
8
-
9
- ---
10
-
11
- ## **Note of deprecation**
12
-
13
- Thank you for developing with Llama models. As part of the Llama 3.1 release, we’ve consolidated GitHub repos and added some additional repos as we’ve expanded Llama’s functionality into being an e2e Llama Stack. Please use the following repos going forward:
14
- - [llama-models](https://github.com/meta-llama/llama-models) - Central repo for the foundation models including basic utilities, model cards, license and use policies
15
- - [PurpleLlama](https://github.com/meta-llama/PurpleLlama) - Key component of Llama Stack focusing on safety risks and inference time mitigations
16
- - [llama-toolchain](https://github.com/meta-llama/llama-toolchain) - Model development (inference/fine-tuning/safety shields/synthetic data generation) interfaces and canonical implementations
17
- - [llama-agentic-system](https://github.com/meta-llama/llama-agentic-system) - E2E standalone Llama Stack system, along with opinionated underlying interface, that enables creation of agentic applications
18
- - [llama-recipes](https://github.com/meta-llama/llama-recipes) - Community driven scripts and integrations
19
-
20
- If you have any questions, please feel free to file an issue on any of the above repos and we will do our best to respond in a timely manner.
21
-
22
- Thank you!
23
-
24
-
25
- # (Deprecated) Meta Llama 3
26
-
27
- We are unlocking the power of large language models. Our latest version of Llama is now accessible to individuals, creators, researchers, and businesses of all sizes so that they can experiment, innovate, and scale their ideas responsibly.
28
-
29
- This release includes model weights and starting code for pre-trained and instruction-tuned Llama 3 language models — including sizes of 8B to 70B parameters.
30
-
31
- This repository is a minimal example of loading Llama 3 models and running inference. For more detailed examples, see [llama-recipes](https://github.com/facebookresearch/llama-recipes/).
32
-
33
- ## Download
34
-
35
- To download the model weights and tokenizer, please visit the [Meta Llama website](https://llama.meta.com/llama-downloads/) and accept our License.
36
-
37
- Once your request is approved, you will receive a signed URL over email. Then, run the download.sh script, passing the URL provided when prompted to start the download.
38
-
39
- Pre-requisites: Ensure you have `wget` and `md5sum` installed. Then run the script: `./download.sh`.
40
-
41
- Remember that the links expire after 24 hours and a certain amount of downloads. You can always re-request a link if you start seeing errors such as `403: Forbidden`.
42
-
43
- ### Access to Hugging Face
44
-
45
- We also provide downloads on [Hugging Face](https://huggingface.co/meta-llama), in both transformers and native `llama3` formats. To download the weights from Hugging Face, please follow these steps:
46
-
47
- - Visit one of the repos, for example [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct).
48
- - Read and accept the license. Once your request is approved, you'll be granted access to all the Llama 3 models. Note that requests used to take up to one hour to get processed.
49
- - To download the original native weights to use with this repo, click on the "Files and versions" tab and download the contents of the `original` folder. You can also download them from the command line if you `pip install huggingface-hub`:
50
-
51
- ```bash
52
- huggingface-cli download meta-llama/Meta-Llama-3-8B-Instruct --include "original/*" --local-dir meta-llama/Meta-Llama-3-8B-Instruct
53
- ```
54
-
55
- - To use with transformers, the following [pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines) snippet will download and cache the weights:
56
-
57
- ```python
58
- import transformers
59
- import torch
60
-
61
- model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
62
-
63
- pipeline = transformers.pipeline(
64
- "text-generation",
65
- model="meta-llama/Meta-Llama-3-8B-Instruct",
66
- model_kwargs={"torch_dtype": torch.bfloat16},
67
- device="cuda",
68
- )
69
- ```
70
-
71
- ## Quick Start
72
-
73
- You can follow the steps below to get up and running with Llama 3 models quickly. These steps will let you run quick inference locally. For more examples, see the [Llama recipes repository](https://github.com/facebookresearch/llama-recipes).
74
-
75
- 1. Clone and download this repository in a conda env with PyTorch / CUDA.
76
-
77
- 2. In the top-level directory run:
78
- ```bash
79
- pip install -e .
80
- ```
81
- 3. Visit the [Meta Llama website](https://llama.meta.com/llama-downloads/) and register to download the model/s.
82
-
83
- 4. Once registered, you will get an email with a URL to download the models. You will need this URL when you run the download.sh script.
84
-
85
- 5. Once you get the email, navigate to your downloaded llama repository and run the download.sh script.
86
- - Make sure to grant execution permissions to the download.sh script
87
- - During this process, you will be prompted to enter the URL from the email.
88
- - Do not use the “Copy Link” option; copy the link from the email manually.
89
-
90
- 6. Once the model/s you want have been downloaded, you can run the model locally using the command below:
91
- ```bash
92
- torchrun --nproc_per_node 1 example_chat_completion.py \
93
- --ckpt_dir Meta-Llama-3-8B-Instruct/ \
94
- --tokenizer_path Meta-Llama-3-8B-Instruct/tokenizer.model \
95
- --max_seq_len 512 --max_batch_size 6
96
- ```
97
- **Note**
98
- - Replace `Meta-Llama-3-8B-Instruct/` with the path to your checkpoint directory and `Meta-Llama-3-8B-Instruct/tokenizer.model` with the path to your tokenizer model.
99
- - The `–nproc_per_node` should be set to the [MP](#inference) value for the model you are using.
100
- - Adjust the `max_seq_len` and `max_batch_size` parameters as needed.
101
- - This example runs the [example_chat_completion.py](example_chat_completion.py) found in this repository, but you can change that to a different .py file.
102
-
103
- ## Inference
104
-
105
- Different models require different model-parallel (MP) values:
106
-
107
- | Model | MP |
108
- |--------|----|
109
- | 8B | 1 |
110
- | 70B | 8 |
111
-
112
- All models support sequence length up to 8192 tokens, but we pre-allocate the cache according to `max_seq_len` and `max_batch_size` values. So set those according to your hardware.
113
-
114
- ### Pretrained Models
115
-
116
- These models are not finetuned for chat or Q&A. They should be prompted so that the expected answer is the natural continuation of the prompt.
117
-
118
- See `example_text_completion.py` for some examples. To illustrate, see the command below to run it with the llama-3-8b model (`nproc_per_node` needs to be set to the `MP` value):
119
-
120
- ```
121
- torchrun --nproc_per_node 1 example_text_completion.py \
122
- --ckpt_dir Meta-Llama-3-8B/ \
123
- --tokenizer_path Meta-Llama-3-8B/tokenizer.model \
124
- --max_seq_len 128 --max_batch_size 4
125
- ```
126
-
127
- ### Instruction-tuned Models
128
-
129
- The fine-tuned models were trained for dialogue applications. To get the expected features and performance for them, specific formatting defined in [`ChatFormat`](https://github.com/meta-llama/llama3/blob/main/llama/tokenizer.py#L202)
130
- needs to be followed: The prompt begins with a `<|begin_of_text|>` special token, after which one or more messages follow. Each message starts with the `<|start_header_id|>` tag, the role `system`, `user` or `assistant`, and the `<|end_header_id|>` tag. After a double newline `\n\n`, the message's contents follow. The end of each message is marked by the `<|eot_id|>` token.
131
-
132
- You can also deploy additional classifiers to filter out inputs and outputs that are deemed unsafe. See the llama-recipes repo for [an example](https://github.com/meta-llama/llama-recipes/blob/main/recipes/inference/local_inference/inference.py) of how to add a safety checker to the inputs and outputs of your inference code.
133
-
134
- Examples using llama-3-8b-chat:
135
-
136
- ```
137
- torchrun --nproc_per_node 1 example_chat_completion.py \
138
- --ckpt_dir Meta-Llama-3-8B-Instruct/ \
139
- --tokenizer_path Meta-Llama-3-8B-Instruct/tokenizer.model \
140
- --max_seq_len 512 --max_batch_size 6
141
- ```
142
-
143
- Llama 3 is a new technology that carries potential risks with use. Testing conducted to date has not — and could not — cover all scenarios.
144
- To help developers address these risks, we have created the [Responsible Use Guide](https://ai.meta.com/static-resource/responsible-use-guide/).
145
-
146
- ## Issues
147
-
148
- Please report any software “bug” or other problems with the models through one of the following means:
149
- - Reporting issues with the model: [https://github.com/meta-llama/llama3/issues](https://github.com/meta-llama/llama3/issues)
150
- - Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
151
- - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
152
-
153
- ## Model Card
154
- See [MODEL_CARD.md](MODEL_CARD.md).
155
-
156
- ## License
157
-
158
- Our model and weights are licensed for researchers and commercial entities, upholding the principles of openness. Our mission is to empower individuals and industry through this opportunity while fostering an environment of discovery and ethical AI advancements.
159
-
160
- See the [LICENSE](LICENSE) file, as well as our accompanying [Acceptable Use Policy](USE_POLICY.md)
161
-
162
- ## Questions
163
-
164
- For common questions, the FAQ can be found [here](https://llama.meta.com/faq), which will be updated over time as new questions arise.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/USE_POLICY.md DELETED
@@ -1,50 +0,0 @@
1
- # Meta Llama 3 Acceptable Use Policy
2
-
3
- Meta is committed to promoting safe and fair use of its tools and features, including Llama 3. If you access or use Llama 3, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
4
-
5
- ## Prohibited Uses
6
- We want everyone to use Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Llama 3 to:
7
-
8
- 1. Violate the law or others’ rights, including to:
9
- 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
10
- 1. Violence or terrorism
11
- 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
12
- 3. Human trafficking, exploitation, and sexual violence
13
- 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
14
- 5. Sexual solicitation
15
- 6. Any other criminal activity
16
- 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
17
- 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
18
- 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
19
- 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
20
- 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 3 Materials
21
- 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
22
-
23
-
24
-
25
- 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 3 related to the following:
26
- 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
27
- 2. Guns and illegal weapons (including weapon development)
28
- 3. Illegal drugs and regulated/controlled substances
29
- 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
30
- 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
31
- 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
32
-
33
-
34
-
35
- 3. Intentionally deceive or mislead others, including use of Llama 3 related to the following:
36
- 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
37
- 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
38
- 3. Generating, promoting, or further distributing spam
39
- 4. Impersonating another individual without consent, authorization, or legal right
40
- 5. Representing that the use of Llama 3 or outputs are human-generated
41
- 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
42
- 4. Fail to appropriately disclose to end users any known dangers of your AI system
43
-
44
- Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
45
-
46
- * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
47
- * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
48
- * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
49
- * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/download.sh DELETED
@@ -1,64 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # Copyright (c) Meta Platforms, Inc. and affiliates.
4
- # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
5
-
6
- ####
7
- ## NOTE: For downloading Llama 3.1 please refer to https://github.com/meta-llama/llama-models/tree/main/models/llama3_1#download
8
- ####
9
-
10
- set -e
11
-
12
- read -p "Enter the URL from email: " PRESIGNED_URL
13
- echo ""
14
- read -p "Enter the list of models to download without spaces (8B,8B-instruct,70B,70B-instruct), or press Enter for all: " MODEL_SIZE
15
- TARGET_FOLDER="." # where all files should end up
16
- mkdir -p ${TARGET_FOLDER}
17
-
18
- if [[ $MODEL_SIZE == "" ]]; then
19
- MODEL_SIZE="8B,8B-instruct,70B,70B-instruct"
20
- fi
21
-
22
- echo "Downloading LICENSE and Acceptable Usage Policy"
23
- wget --continue ${PRESIGNED_URL/'*'/"LICENSE"} -O ${TARGET_FOLDER}"/LICENSE"
24
- wget --continue ${PRESIGNED_URL/'*'/"USE_POLICY"} -O ${TARGET_FOLDER}"/USE_POLICY"
25
-
26
- for m in ${MODEL_SIZE//,/ }
27
- do
28
- if [[ $m == "8B" ]] || [[ $m == "8b" ]]; then
29
- SHARD=0
30
- MODEL_FOLDER_PATH="Meta-Llama-3-8B"
31
- MODEL_PATH="8b_pre_trained"
32
- elif [[ $m == "8B-instruct" ]] || [[ $m == "8b-instruct" ]] || [[ $m == "8b-Instruct" ]] || [[ $m == "8B-Instruct" ]]; then
33
- SHARD=0
34
- MODEL_FOLDER_PATH="Meta-Llama-3-8B-Instruct"
35
- MODEL_PATH="8b_instruction_tuned"
36
- elif [[ $m == "70B" ]] || [[ $m == "70b" ]]; then
37
- SHARD=7
38
- MODEL_FOLDER_PATH="Meta-Llama-3-70B"
39
- MODEL_PATH="70b_pre_trained"
40
- elif [[ $m == "70B-instruct" ]] || [[ $m == "70b-instruct" ]] || [[ $m == "70b-Instruct" ]] || [[ $m == "70B-Instruct" ]]; then
41
- SHARD=7
42
- MODEL_FOLDER_PATH="Meta-Llama-3-70B-Instruct"
43
- MODEL_PATH="70b_instruction_tuned"
44
- fi
45
-
46
- echo "Downloading ${MODEL_PATH}"
47
- mkdir -p ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}"
48
-
49
- for s in $(seq -f "0%g" 0 ${SHARD})
50
- do
51
- wget --continue ${PRESIGNED_URL/'*'/"${MODEL_PATH}/consolidated.${s}.pth"} -O ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}/consolidated.${s}.pth"
52
- done
53
-
54
- wget --continue ${PRESIGNED_URL/'*'/"${MODEL_PATH}/params.json"} -O ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}/params.json"
55
- wget --continue ${PRESIGNED_URL/'*'/"${MODEL_PATH}/tokenizer.model"} -O ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}/tokenizer.model"
56
- wget --continue ${PRESIGNED_URL/'*'/"${MODEL_PATH}/checklist.chk"} -O ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}/checklist.chk"
57
- echo "Checking checksums"
58
- CPU_ARCH=$(uname -m)
59
- if [[ "$CPU_ARCH" == "arm64" ]]; then
60
- (cd ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}" && md5 checklist.chk)
61
- else
62
- (cd ${TARGET_FOLDER}"/${MODEL_FOLDER_PATH}" && md5sum -c checklist.chk)
63
- fi
64
- done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/eval_details.md DELETED
@@ -1,51 +0,0 @@
1
- ### Llama 3 Evaluation Details
2
- This document contains additional context on the settings and parameters for how we evaluated the Llama 3 pre-trained and instruct-aligned models.
3
- ### Auto-eval benchmark notes
4
- #### MMLU
5
- - We are reporting macro averages for MMLU benchmarks. The micro average numbers for MMLU are: 65.4 and 67.4 for the 8B pre-trained and instruct-aligned models, 78.9 and 82.0 for the 70B pre-trained and instruct-aligned models
6
- - The pre-trained models are evaluated in the standard way by calculating the likelihood of each choice character. For the instruct-aligned models, we use a dialogue prompt (*user/assistant*) for the shots and ask the model to generate the best choice character as answer.
7
- #### AGI English
8
- - We use the default few-shot and prompt settings as specified [here](https://github.com/ruixiangcui/AGIEval). The score is averaged over the english subtasks.
9
- #### CommonSenseQA
10
- - We use the same 7-shot chain-of-thought prompt as in [Wei et al. (2022)](https://arxiv.org/pdf/2201.11903.pdf).
11
- #### Winogrande
12
- - We use a choice based setup for evaluation where we fill in the missing blank with the two possible choices and then compute log-likelihood over the suffix. We use 5 shots for evaluation.
13
- #### BIG-Bench Hard
14
- - We use a 3-shot chain of thought style prompting and compute the average exact match over the subsets in this task.
15
- #### ARC-Challenge
16
- - We use the arc-challenge subset from the arc benchmark. We use 25 shots and use the MMLU setup for evaluation where we provide all the choices in the prompt and calculate likelihood over choice characters
17
- #### TriviaQA-WIKI
18
- - We evaluate on the Wiki validation set and use 5 few-shot examples.
19
- #### SQuAD
20
- - We are using SQuAD v2 and compute exact match in a 1-shot setting.
21
- #### QuAC
22
- - Same setting as Llama 2 (1-shot, f1).
23
- #### BoolQ
24
- - Same setting as Llama 1 and Llama 2 (0-shot, accuracy).
25
- #### DROP
26
- - For each validation example, we draw 3 random few-shot examples from the train split.
27
- #### GPQA
28
- - We report 0-shot exact match scores over the possible options using the Main subset for our models and other open-source models (Mistral, Gemma).
29
- #### HumanEval
30
- - Same setting as Llama 1 and Llama 2 (pass@1).
31
- #### GSM8K
32
- - We use the same 8-shot chain-of-thought prompt as in [Wei et al. (2022)](https://arxiv.org/pdf/2201.11903.pdf) (maj@1).
33
- - Max generation length is 512 tokens.
34
- #### MATH
35
- - We use the 4-shot problem available in [Lewkowycz et al. (2022)](https://arxiv.org/pdf/2206.14858.pdf) (maj@1).
36
- ### Human evaluation notes
37
- This evaluation set contains 1,800 prompts that cover 12 key use cases: asking for advice, brainstorming, classification, closed question answering, coding, creative writing, extraction, inhabiting a character/persona, open question answering, reasoning, rewriting, and summarization.
38
- |Category|Count|
39
- |--------|-----|
40
- |Coding|150|
41
- |Mathematical reasoning|150|
42
- |Asking for Advice|150|
43
- |Brainstorming|150|
44
- |Classification|150|
45
- |Closed Question Answering|150|
46
- |Creative Writing|150|
47
- |Extraction|150|
48
- |Inhabiting a Character/Persona|150|
49
- |Open Question Answering|150|
50
- |Rewriting|150|
51
- |Summarization|150|
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/example_chat_completion.py DELETED
@@ -1,84 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- from typing import List, Optional
5
-
6
- import fire
7
-
8
- from llama import Dialog, Llama
9
-
10
-
11
- def main(
12
- ckpt_dir: str,
13
- tokenizer_path: str,
14
- temperature: float = 0.6,
15
- top_p: float = 0.9,
16
- max_seq_len: int = 512,
17
- max_batch_size: int = 4,
18
- max_gen_len: Optional[int] = None,
19
- ):
20
- """
21
- Examples to run with the models finetuned for chat. Prompts correspond of chat
22
- turns between the user and assistant with the final one always being the user.
23
-
24
- An optional system prompt at the beginning to control how the model should respond
25
- is also supported.
26
-
27
- The context window of llama3 models is 8192 tokens, so `max_seq_len` needs to be <= 8192.
28
-
29
- `max_gen_len` is optional because finetuned models are able to stop generations naturally.
30
- """
31
- generator = Llama.build(
32
- ckpt_dir=ckpt_dir,
33
- tokenizer_path=tokenizer_path,
34
- max_seq_len=max_seq_len,
35
- max_batch_size=max_batch_size,
36
- )
37
-
38
- dialogs: List[Dialog] = [
39
- [{"role": "user", "content": "what is the recipe of mayonnaise?"}],
40
- [
41
- {"role": "user", "content": "I am going to Paris, what should I see?"},
42
- {
43
- "role": "assistant",
44
- "content": """\
45
- Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
46
-
47
- 1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
48
- 2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
49
- 3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
50
-
51
- These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
52
- },
53
- {"role": "user", "content": "What is so great about #1?"},
54
- ],
55
- [
56
- {"role": "system", "content": "Always answer with Haiku"},
57
- {"role": "user", "content": "I am going to Paris, what should I see?"},
58
- ],
59
- [
60
- {
61
- "role": "system",
62
- "content": "Always answer with emojis",
63
- },
64
- {"role": "user", "content": "How to go from Beijing to NY?"},
65
- ],
66
- ]
67
- results = generator.chat_completion(
68
- dialogs,
69
- max_gen_len=max_gen_len,
70
- temperature=temperature,
71
- top_p=top_p,
72
- )
73
-
74
- for dialog, result in zip(dialogs, results):
75
- for msg in dialog:
76
- print(f"{msg['role'].capitalize()}: {msg['content']}\n")
77
- print(
78
- f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
79
- )
80
- print("\n==================================\n")
81
-
82
-
83
- if __name__ == "__main__":
84
- fire.Fire(main)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/example_text_completion.py DELETED
@@ -1,64 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- from typing import List
5
-
6
- import fire
7
-
8
- from llama import Llama
9
-
10
-
11
- def main(
12
- ckpt_dir: str,
13
- tokenizer_path: str,
14
- temperature: float = 0.6,
15
- top_p: float = 0.9,
16
- max_seq_len: int = 128,
17
- max_gen_len: int = 64,
18
- max_batch_size: int = 4,
19
- ):
20
- """
21
- Examples to run with the pre-trained models (no fine-tuning). Prompts are
22
- usually in the form of an incomplete text prefix that the model can then try to complete.
23
-
24
- The context window of llama3 models is 8192 tokens, so `max_seq_len` needs to be <= 8192.
25
- `max_gen_len` is needed because pre-trained models usually do not stop completions naturally.
26
- """
27
- generator = Llama.build(
28
- ckpt_dir=ckpt_dir,
29
- tokenizer_path=tokenizer_path,
30
- max_seq_len=max_seq_len,
31
- max_batch_size=max_batch_size,
32
- )
33
-
34
- prompts: List[str] = [
35
- # For these prompts, the expected answer is the natural continuation of the prompt
36
- "I believe the meaning of life is",
37
- "Simply put, the theory of relativity states that ",
38
- """A brief message congratulating the team on the launch:
39
-
40
- Hi everyone,
41
-
42
- I just """,
43
- # Few shot prompt (providing a few examples before asking model to complete more);
44
- """Translate English to French:
45
-
46
- sea otter => loutre de mer
47
- peppermint => menthe poivrée
48
- plush girafe => girafe peluche
49
- cheese =>""",
50
- ]
51
- results = generator.text_completion(
52
- prompts,
53
- max_gen_len=max_gen_len,
54
- temperature=temperature,
55
- top_p=top_p,
56
- )
57
- for prompt, result in zip(prompts, results):
58
- print(prompt)
59
- print(f"> {result['generation']}")
60
- print("\n==================================\n")
61
-
62
-
63
- if __name__ == "__main__":
64
- fire.Fire(main)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/llama/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- from .generation import Llama
5
- from .model import ModelArgs, Transformer
6
- from .tokenizer import Dialog, Tokenizer
 
 
 
 
 
 
 
backend/llama3/llama/generation.py DELETED
@@ -1,365 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- import json
5
- import os
6
- import sys
7
- import time
8
- from pathlib import Path
9
- from typing import List, Optional, Tuple, TypedDict
10
-
11
- import torch
12
- import torch.nn.functional as F
13
- from fairscale.nn.model_parallel.initialize import (
14
- get_model_parallel_rank,
15
- initialize_model_parallel,
16
- model_parallel_is_initialized,
17
- )
18
-
19
- from llama.model import ModelArgs, Transformer
20
- from llama.tokenizer import ChatFormat, Dialog, Message, Tokenizer
21
-
22
-
23
- class CompletionPrediction(TypedDict, total=False):
24
- generation: str
25
- tokens: List[str] # not required
26
- logprobs: List[float] # not required
27
-
28
-
29
- class ChatPrediction(TypedDict, total=False):
30
- generation: Message
31
- tokens: List[str] # not required
32
- logprobs: List[float] # not required
33
-
34
-
35
- class Llama:
36
- @staticmethod
37
- def build(
38
- ckpt_dir: str,
39
- tokenizer_path: str,
40
- max_seq_len: int,
41
- max_batch_size: int,
42
- model_parallel_size: Optional[int] = None,
43
- seed: int = 1,
44
- ) -> "Llama":
45
- """
46
- Build a Llama instance by initializing and loading a model checkpoint.
47
-
48
- Args:
49
- ckpt_dir (str): Path to the directory containing checkpoint files.
50
- tokenizer_path (str): Path to the tokenizer file.
51
- max_seq_len (int): Maximum sequence length for input text.
52
- max_batch_size (int): Maximum batch size for inference.
53
- model_parallel_size (Optional[int], optional): Number of model parallel processes.
54
- If not provided, it's determined from the environment. Defaults to None.
55
-
56
- Returns:
57
- Llama: An instance of the Llama class with the loaded model and tokenizer.
58
-
59
- Raises:
60
- AssertionError: If there are no checkpoint files in the specified directory,
61
- or if the model parallel size does not match the number of checkpoint files.
62
-
63
- Note:
64
- This method initializes the distributed process group, sets the device to CUDA,
65
- and loads the pre-trained model and tokenizer.
66
- """
67
- assert 1 <= max_seq_len <= 8192, f"max_seq_len must be between 1 and 8192, got {max_seq_len}."
68
- assert os.path.isdir(ckpt_dir), f"Checkpoint directory '{ckpt_dir}' does not exist."
69
- assert os.path.isfile(tokenizer_path), f"Tokenizer file '{tokenizer_path}' does not exist."
70
-
71
- if not torch.distributed.is_initialized():
72
- torch.distributed.init_process_group("nccl")
73
- if not model_parallel_is_initialized():
74
- if model_parallel_size is None:
75
- model_parallel_size = int(os.environ.get("WORLD_SIZE", 1))
76
- initialize_model_parallel(model_parallel_size)
77
-
78
- local_rank = int(os.environ.get("LOCAL_RANK", 0))
79
- torch.cuda.set_device(local_rank)
80
-
81
- # seed must be the same in all processes
82
- torch.manual_seed(seed)
83
-
84
- if local_rank > 0:
85
- sys.stdout = open(os.devnull, "w")
86
-
87
- start_time = time.time()
88
- checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
89
- assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}"
90
- assert model_parallel_size == len(
91
- checkpoints
92
- ), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}"
93
- ckpt_path = checkpoints[get_model_parallel_rank()]
94
- checkpoint = torch.load(ckpt_path, map_location="cpu")
95
- with open(Path(ckpt_dir) / "params.json", "r") as f:
96
- params = json.loads(f.read())
97
-
98
- model_args: ModelArgs = ModelArgs(
99
- max_seq_len=max_seq_len,
100
- max_batch_size=max_batch_size,
101
- **params,
102
- )
103
- tokenizer = Tokenizer(model_path=tokenizer_path)
104
- assert model_args.vocab_size == tokenizer.n_words
105
- if torch.cuda.is_bf16_supported():
106
- torch.set_default_tensor_type(torch.cuda.BFloat16Tensor)
107
- else:
108
- torch.set_default_tensor_type(torch.cuda.HalfTensor)
109
- model = Transformer(model_args)
110
- model.load_state_dict(checkpoint, strict=False)
111
- print(f"Loaded in {time.time() - start_time:.2f} seconds")
112
-
113
- return Llama(model, tokenizer)
114
-
115
- def __init__(self, model: Transformer, tokenizer: Tokenizer):
116
- self.model = model
117
- self.tokenizer = tokenizer
118
- self.formatter = ChatFormat(tokenizer)
119
-
120
- @torch.inference_mode()
121
- def generate(
122
- self,
123
- prompt_tokens: List[List[int]],
124
- max_gen_len: int,
125
- temperature: float = 0.6,
126
- top_p: float = 0.9,
127
- logprobs: bool = False,
128
- echo: bool = False,
129
- ) -> Tuple[List[List[int]], Optional[List[List[float]]]]:
130
- """
131
- Generate text sequences based on provided prompts using the language generation model.
132
-
133
- Args:
134
- prompt_tokens (List[List[int]]): List of tokenized prompts, where each prompt is represented as a list of integers.
135
- max_gen_len (int): Maximum length of the generated text sequence.
136
- temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.
137
- top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.
138
- logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.
139
- echo (bool, optional): Flag indicating whether to include prompt tokens in the generated output. Defaults to False.
140
-
141
- Returns:
142
- Tuple[List[List[int]], Optional[List[List[float]]]]: A tuple containing generated token sequences and, if logprobs is True, corresponding token log probabilities.
143
-
144
- Note:
145
- This method uses the provided prompts as a basis for generating text. It employs nucleus sampling to produce text with controlled randomness.
146
- If logprobs is True, token log probabilities are computed for each generated token.
147
-
148
- """
149
- params = self.model.params
150
- bsz = len(prompt_tokens)
151
- assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)
152
-
153
- min_prompt_len = min(len(t) for t in prompt_tokens)
154
- max_prompt_len = max(len(t) for t in prompt_tokens)
155
- assert max_prompt_len <= params.max_seq_len
156
- total_len = min(params.max_seq_len, max_gen_len + max_prompt_len)
157
-
158
- pad_id = self.tokenizer.pad_id
159
- tokens = torch.full((bsz, total_len), pad_id, dtype=torch.long, device="cuda")
160
- for k, t in enumerate(prompt_tokens):
161
- tokens[k, : len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
162
- if logprobs:
163
- token_logprobs = torch.zeros_like(tokens, dtype=torch.float)
164
-
165
- prev_pos = 0
166
- eos_reached = torch.tensor([False] * bsz, device="cuda")
167
- input_text_mask = tokens != pad_id
168
- if min_prompt_len == total_len:
169
- logits = self.model.forward(tokens, prev_pos)
170
- token_logprobs = -F.cross_entropy(
171
- input=logits.transpose(1, 2),
172
- target=tokens,
173
- reduction="none",
174
- ignore_index=pad_id,
175
- )
176
-
177
- stop_tokens = torch.tensor(list(self.tokenizer.stop_tokens))
178
-
179
- for cur_pos in range(min_prompt_len, total_len):
180
- logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
181
- if temperature > 0:
182
- probs = torch.softmax(logits[:, -1] / temperature, dim=-1)
183
- next_token = sample_top_p(probs, top_p)
184
- else:
185
- next_token = torch.argmax(logits[:, -1], dim=-1)
186
-
187
- next_token = next_token.reshape(-1)
188
- # only replace token if prompt has already been generated
189
- next_token = torch.where(
190
- input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token
191
- )
192
- tokens[:, cur_pos] = next_token
193
- if logprobs:
194
- token_logprobs[:, prev_pos + 1 : cur_pos + 1] = -F.cross_entropy(
195
- input=logits.transpose(1, 2),
196
- target=tokens[:, prev_pos + 1 : cur_pos + 1],
197
- reduction="none",
198
- ignore_index=pad_id,
199
- )
200
- eos_reached |= (~input_text_mask[:, cur_pos]) & (
201
- torch.isin(next_token, stop_tokens)
202
- )
203
- prev_pos = cur_pos
204
- if all(eos_reached):
205
- break
206
-
207
- if logprobs:
208
- token_logprobs = token_logprobs.tolist()
209
- out_tokens, out_logprobs = [], []
210
- for i, toks in enumerate(tokens.tolist()):
211
- # cut to max gen len
212
- start = 0 if echo else len(prompt_tokens[i])
213
- toks = toks[start : len(prompt_tokens[i]) + max_gen_len]
214
- probs = None
215
- if logprobs:
216
- probs = token_logprobs[i][start : len(prompt_tokens[i]) + max_gen_len]
217
- # cut to after eos tok if any
218
- for stop_token in self.tokenizer.stop_tokens:
219
- try:
220
- eos_idx = toks.index(stop_token)
221
- toks = toks[:eos_idx]
222
- probs = probs[:eos_idx] if logprobs else None
223
- except ValueError:
224
- pass
225
- out_tokens.append(toks)
226
- out_logprobs.append(probs)
227
- return (out_tokens, out_logprobs if logprobs else None)
228
-
229
- def text_completion(
230
- self,
231
- prompts: List[str],
232
- temperature: float = 0.6,
233
- top_p: float = 0.9,
234
- max_gen_len: Optional[int] = None,
235
- logprobs: bool = False,
236
- echo: bool = False,
237
- ) -> List[CompletionPrediction]:
238
- """
239
- Perform text completion for a list of prompts using the language generation model.
240
-
241
- Args:
242
- prompts (List[str]): List of text prompts for completion.
243
- temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.
244
- top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.
245
- max_gen_len (Optional[int], optional): Maximum length of the generated completion sequence.
246
- If not provided, it's set to the model's maximum sequence length minus 1.
247
- logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.
248
- echo (bool, optional): Flag indicating whether to include prompt tokens in the generated output. Defaults to False.
249
-
250
- Returns:
251
- List[CompletionPrediction]: List of completion predictions, each containing the generated text completion.
252
-
253
- Note:
254
- This method generates text completions for the provided prompts, employing nucleus sampling to introduce controlled randomness.
255
- If logprobs is True, token log probabilities are computed for each generated token.
256
-
257
- """
258
- if max_gen_len is None:
259
- max_gen_len = self.model.params.max_seq_len - 1
260
- prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]
261
- generation_tokens, generation_logprobs = self.generate(
262
- prompt_tokens=prompt_tokens,
263
- max_gen_len=max_gen_len,
264
- temperature=temperature,
265
- top_p=top_p,
266
- logprobs=logprobs,
267
- echo=echo,
268
- )
269
- if logprobs:
270
- return [
271
- {
272
- "generation": self.tokenizer.decode(t),
273
- "tokens": [self.tokenizer.decode([x]) for x in t],
274
- "logprobs": logprobs_i,
275
- }
276
- for t, logprobs_i in zip(generation_tokens, generation_logprobs)
277
- ]
278
- return [{"generation": self.tokenizer.decode(t)} for t in generation_tokens]
279
-
280
- def chat_completion(
281
- self,
282
- dialogs: List[Dialog],
283
- temperature: float = 0.6,
284
- top_p: float = 0.9,
285
- max_gen_len: Optional[int] = None,
286
- logprobs: bool = False,
287
- ) -> List[ChatPrediction]:
288
- """
289
- Generate assistant responses for a list of conversational dialogs using the language generation model.
290
-
291
- Args:
292
- dialogs (List[Dialog]): List of conversational dialogs, where each dialog is a list of messages.
293
- temperature (float, optional): Temperature value for controlling randomness in sampling. Defaults to 0.6.
294
- top_p (float, optional): Top-p probability threshold for nucleus sampling. Defaults to 0.9.
295
- max_gen_len (Optional[int], optional): Maximum length of the generated response sequence.
296
- If not provided, it's set to the model's maximum sequence length minus 1.
297
- logprobs (bool, optional): Flag indicating whether to compute token log probabilities. Defaults to False.
298
-
299
- Returns:
300
- List[ChatPrediction]: List of chat predictions, each containing the assistant's generated response.
301
-
302
- Note:
303
- This method generates assistant responses for the provided conversational dialogs.
304
- It employs nucleus sampling to introduce controlled randomness in text generation.
305
- If logprobs is True, token log probabilities are computed for each generated token.
306
- """
307
- if max_gen_len is None:
308
- max_gen_len = self.model.params.max_seq_len - 1
309
-
310
- prompt_tokens = [
311
- self.formatter.encode_dialog_prompt(dialog) for dialog in dialogs
312
- ]
313
- generation_tokens, generation_logprobs = self.generate(
314
- prompt_tokens=prompt_tokens,
315
- max_gen_len=max_gen_len,
316
- temperature=temperature,
317
- top_p=top_p,
318
- logprobs=logprobs,
319
- )
320
- if logprobs:
321
- return [
322
- {
323
- "generation": {
324
- "role": "assistant",
325
- "content": self.tokenizer.decode(t),
326
- },
327
- "tokens": [self.tokenizer.decode([x]) for x in t],
328
- "logprobs": logprobs_i,
329
- }
330
- for t, logprobs_i in zip(generation_tokens, generation_logprobs)
331
- ]
332
- return [
333
- {
334
- "generation": {
335
- "role": "assistant",
336
- "content": self.tokenizer.decode(t),
337
- },
338
- }
339
- for t in generation_tokens
340
- ]
341
-
342
-
343
- def sample_top_p(probs, p):
344
- """
345
- Perform top-p (nucleus) sampling on a probability distribution.
346
-
347
- Args:
348
- probs (torch.Tensor): Probability distribution tensor.
349
- p (float): Probability threshold for top-p sampling.
350
-
351
- Returns:
352
- torch.Tensor: Sampled token indices.
353
-
354
- Note:
355
- Top-p sampling selects the smallest set of tokens whose cumulative probability mass
356
- exceeds the threshold p. The distribution is renormalized based on the selected tokens.
357
- """
358
- probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
359
- probs_sum = torch.cumsum(probs_sort, dim=-1)
360
- mask = probs_sum - probs_sort > p
361
- probs_sort[mask] = 0.0
362
- probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
363
- next_token = torch.multinomial(probs_sort, num_samples=1)
364
- next_token = torch.gather(probs_idx, -1, next_token)
365
- return next_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/llama/model.py DELETED
@@ -1,302 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- import math
5
- from dataclasses import dataclass
6
- from typing import Optional, Tuple
7
-
8
- import fairscale.nn.model_parallel.initialize as fs_init
9
- import torch
10
- import torch.nn.functional as F
11
- from fairscale.nn.model_parallel.layers import (
12
- ColumnParallelLinear,
13
- RowParallelLinear,
14
- VocabParallelEmbedding,
15
- )
16
- from torch import nn
17
-
18
-
19
- @dataclass
20
- class ModelArgs:
21
- dim: int = 4096
22
- n_layers: int = 32
23
- n_heads: int = 32
24
- n_kv_heads: Optional[int] = None
25
- vocab_size: int = -1
26
- multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
27
- ffn_dim_multiplier: Optional[float] = None
28
- norm_eps: float = 1e-5
29
- rope_theta: float = 500000
30
-
31
- max_batch_size: int = 32
32
- max_seq_len: int = 2048
33
-
34
-
35
- class RMSNorm(torch.nn.Module):
36
- def __init__(self, dim: int, eps: float = 1e-6):
37
- super().__init__()
38
- self.eps = eps
39
- self.weight = nn.Parameter(torch.ones(dim))
40
-
41
- def _norm(self, x):
42
- return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
43
-
44
- def forward(self, x):
45
- output = self._norm(x.float()).type_as(x)
46
- return output * self.weight
47
-
48
-
49
- def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
50
- freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
51
- t = torch.arange(end, device=freqs.device, dtype=torch.float32)
52
- freqs = torch.outer(t, freqs)
53
- freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
54
- return freqs_cis
55
-
56
-
57
- def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
58
- ndim = x.ndim
59
- assert 0 <= 1 < ndim
60
- assert freqs_cis.shape == (x.shape[1], x.shape[-1])
61
- shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
62
- return freqs_cis.view(*shape)
63
-
64
-
65
- def apply_rotary_emb(
66
- xq: torch.Tensor,
67
- xk: torch.Tensor,
68
- freqs_cis: torch.Tensor,
69
- ) -> Tuple[torch.Tensor, torch.Tensor]:
70
- xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
71
- xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
72
- freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
73
- xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
74
- xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
75
- return xq_out.type_as(xq), xk_out.type_as(xk)
76
-
77
-
78
- def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
79
- """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
80
- bs, slen, n_kv_heads, head_dim = x.shape
81
- if n_rep == 1:
82
- return x
83
- return (
84
- x[:, :, :, None, :]
85
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
86
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
87
- )
88
-
89
-
90
- class Attention(nn.Module):
91
- def __init__(self, args: ModelArgs):
92
- super().__init__()
93
- self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
94
- model_parallel_size = fs_init.get_model_parallel_world_size()
95
- self.n_local_heads = args.n_heads // model_parallel_size
96
- self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
97
- self.n_rep = self.n_local_heads // self.n_local_kv_heads
98
- self.head_dim = args.dim // args.n_heads
99
-
100
- self.wq = ColumnParallelLinear(
101
- args.dim,
102
- args.n_heads * self.head_dim,
103
- bias=False,
104
- gather_output=False,
105
- init_method=lambda x: x,
106
- )
107
- self.wk = ColumnParallelLinear(
108
- args.dim,
109
- self.n_kv_heads * self.head_dim,
110
- bias=False,
111
- gather_output=False,
112
- init_method=lambda x: x,
113
- )
114
- self.wv = ColumnParallelLinear(
115
- args.dim,
116
- self.n_kv_heads * self.head_dim,
117
- bias=False,
118
- gather_output=False,
119
- init_method=lambda x: x,
120
- )
121
- self.wo = RowParallelLinear(
122
- args.n_heads * self.head_dim,
123
- args.dim,
124
- bias=False,
125
- input_is_parallel=True,
126
- init_method=lambda x: x,
127
- )
128
-
129
- self.cache_k = torch.zeros(
130
- (
131
- args.max_batch_size,
132
- args.max_seq_len,
133
- self.n_local_kv_heads,
134
- self.head_dim,
135
- )
136
- ).cuda()
137
- self.cache_v = torch.zeros(
138
- (
139
- args.max_batch_size,
140
- args.max_seq_len,
141
- self.n_local_kv_heads,
142
- self.head_dim,
143
- )
144
- ).cuda()
145
-
146
- def forward(
147
- self,
148
- x: torch.Tensor,
149
- start_pos: int,
150
- freqs_cis: torch.Tensor,
151
- mask: Optional[torch.Tensor],
152
- ):
153
- bsz, seqlen, _ = x.shape
154
- xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
155
-
156
- xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
157
- xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
158
- xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
159
-
160
- xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
161
-
162
- self.cache_k = self.cache_k.to(xq)
163
- self.cache_v = self.cache_v.to(xq)
164
-
165
- self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
166
- self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
167
-
168
- keys = self.cache_k[:bsz, : start_pos + seqlen]
169
- values = self.cache_v[:bsz, : start_pos + seqlen]
170
-
171
- # repeat k/v heads if n_kv_heads < n_heads
172
- keys = repeat_kv(
173
- keys, self.n_rep
174
- ) # (bs, cache_len + seqlen, n_local_heads, head_dim)
175
- values = repeat_kv(
176
- values, self.n_rep
177
- ) # (bs, cache_len + seqlen, n_local_heads, head_dim)
178
-
179
- xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
180
- keys = keys.transpose(1, 2) # (bs, n_local_heads, cache_len + seqlen, head_dim)
181
- values = values.transpose(
182
- 1, 2
183
- ) # (bs, n_local_heads, cache_len + seqlen, head_dim)
184
- scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
185
- if mask is not None:
186
- scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
187
- scores = F.softmax(scores.float(), dim=-1).type_as(xq)
188
- output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
189
- output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
190
- return self.wo(output)
191
-
192
-
193
- class FeedForward(nn.Module):
194
- def __init__(
195
- self,
196
- dim: int,
197
- hidden_dim: int,
198
- multiple_of: int,
199
- ffn_dim_multiplier: Optional[float],
200
- ):
201
- super().__init__()
202
- hidden_dim = int(2 * hidden_dim / 3)
203
- # custom dim factor multiplier
204
- if ffn_dim_multiplier is not None:
205
- hidden_dim = int(ffn_dim_multiplier * hidden_dim)
206
- hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
207
-
208
- self.w1 = ColumnParallelLinear(
209
- dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
210
- )
211
- self.w2 = RowParallelLinear(
212
- hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x
213
- )
214
- self.w3 = ColumnParallelLinear(
215
- dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
216
- )
217
-
218
- def forward(self, x):
219
- return self.w2(F.silu(self.w1(x)) * self.w3(x))
220
-
221
-
222
- class TransformerBlock(nn.Module):
223
- def __init__(self, layer_id: int, args: ModelArgs):
224
- super().__init__()
225
- self.n_heads = args.n_heads
226
- self.dim = args.dim
227
- self.head_dim = args.dim // args.n_heads
228
- self.attention = Attention(args)
229
- self.feed_forward = FeedForward(
230
- dim=args.dim,
231
- hidden_dim=4 * args.dim,
232
- multiple_of=args.multiple_of,
233
- ffn_dim_multiplier=args.ffn_dim_multiplier,
234
- )
235
- self.layer_id = layer_id
236
- self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
237
- self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
238
-
239
- def forward(
240
- self,
241
- x: torch.Tensor,
242
- start_pos: int,
243
- freqs_cis: torch.Tensor,
244
- mask: Optional[torch.Tensor],
245
- ):
246
- h = x + self.attention(self.attention_norm(x), start_pos, freqs_cis, mask)
247
- out = h + self.feed_forward(self.ffn_norm(h))
248
- return out
249
-
250
-
251
- class Transformer(nn.Module):
252
- def __init__(self, params: ModelArgs):
253
- super().__init__()
254
- self.params = params
255
- self.vocab_size = params.vocab_size
256
- self.n_layers = params.n_layers
257
-
258
- self.tok_embeddings = VocabParallelEmbedding(
259
- params.vocab_size, params.dim, init_method=lambda x: x
260
- )
261
-
262
- self.layers = torch.nn.ModuleList()
263
- for layer_id in range(params.n_layers):
264
- self.layers.append(TransformerBlock(layer_id, params))
265
-
266
- self.norm = RMSNorm(params.dim, eps=params.norm_eps)
267
- self.output = ColumnParallelLinear(
268
- params.dim, params.vocab_size, bias=False, init_method=lambda x: x
269
- )
270
-
271
- self.freqs_cis = precompute_freqs_cis(
272
- params.dim // params.n_heads,
273
- params.max_seq_len * 2,
274
- params.rope_theta,
275
- )
276
-
277
- @torch.inference_mode()
278
- def forward(self, tokens: torch.Tensor, start_pos: int):
279
- _bsz, seqlen = tokens.shape
280
- h = self.tok_embeddings(tokens)
281
- self.freqs_cis = self.freqs_cis.to(h.device)
282
- freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
283
-
284
- mask = None
285
- if seqlen > 1:
286
- mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device)
287
-
288
- mask = torch.triu(mask, diagonal=1)
289
-
290
- # When performing key-value caching, we compute the attention scores
291
- # only for the new sequence. Thus, the matrix of scores is of size
292
- # (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for
293
- # j > cache_len + i, since row i corresponds to token cache_len + i.
294
- mask = torch.hstack(
295
- [torch.zeros((seqlen, start_pos), device=tokens.device), mask]
296
- ).type_as(h)
297
-
298
- for layer in self.layers:
299
- h = layer(h, start_pos, freqs_cis, mask)
300
- h = self.norm(h)
301
- output = self.output(h).float()
302
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/llama/test_tokenizer.py DELETED
@@ -1,88 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- import os
5
- from unittest import TestCase
6
- from llama.tokenizer import ChatFormat, Tokenizer
7
-
8
- # TOKENIZER_PATH=<path> python -m unittest llama/test_tokenizer.py
9
-
10
- class TokenizerTests(TestCase):
11
- def setUp(self):
12
- self.tokenizer = Tokenizer(os.environ["TOKENIZER_PATH"])
13
- self.format = ChatFormat(self.tokenizer)
14
-
15
- def test_special_tokens(self):
16
- self.assertEqual(
17
- self.tokenizer.special_tokens["<|begin_of_text|>"],
18
- 128000,
19
- )
20
-
21
- def test_encode(self):
22
- self.assertEqual(
23
- self.tokenizer.encode(
24
- "This is a test sentence.",
25
- bos=True,
26
- eos=True
27
- ),
28
- [128000, 2028, 374, 264, 1296, 11914, 13, 128001],
29
- )
30
-
31
- def test_decode(self):
32
- self.assertEqual(
33
- self.tokenizer.decode(
34
- [128000, 2028, 374, 264, 1296, 11914, 13, 128001],
35
- ),
36
- "<|begin_of_text|>This is a test sentence.<|end_of_text|>",
37
- )
38
-
39
- def test_encode_message(self):
40
- message = {
41
- "role": "user",
42
- "content": "This is a test sentence.",
43
- }
44
- self.assertEqual(
45
- self.format.encode_message(message),
46
- [
47
- 128006, # <|start_header_id|>
48
- 882, # "user"
49
- 128007, # <|end_header_id|>
50
- 271, # "\n\n"
51
- 2028, 374, 264, 1296, 11914, 13, # This is a test sentence.
52
- 128009, # <|eot_id|>
53
- ]
54
- )
55
-
56
- def test_encode_dialog(self):
57
- dialog = [
58
- {
59
- "role": "system",
60
- "content": "This is a test sentence.",
61
- },
62
- {
63
- "role": "user",
64
- "content": "This is a response.",
65
- }
66
- ]
67
- self.assertEqual(
68
- self.format.encode_dialog_prompt(dialog),
69
- [
70
- 128000, # <|begin_of_text|>
71
- 128006, # <|start_header_id|>
72
- 9125, # "system"
73
- 128007, # <|end_header_id|>
74
- 271, # "\n\n"
75
- 2028, 374, 264, 1296, 11914, 13, # "This is a test sentence."
76
- 128009, # <|eot_id|>
77
- 128006, # <|start_header_id|>
78
- 882, # "user"
79
- 128007, # <|end_header_id|>
80
- 271, # "\n\n"
81
- 2028, 374, 264, 2077, 13, # "This is a response.",
82
- 128009, # <|eot_id|>
83
- 128006, # <|start_header_id|>
84
- 78191, # "assistant"
85
- 128007, # <|end_header_id|>
86
- 271, # "\n\n"
87
- ]
88
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/llama/tokenizer.py DELETED
@@ -1,229 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- import os
5
- from logging import getLogger
6
- from pathlib import Path
7
- from typing import (
8
- AbstractSet,
9
- cast,
10
- Collection,
11
- Dict,
12
- Iterator,
13
- List,
14
- Literal,
15
- Sequence,
16
- TypedDict,
17
- Union,
18
- )
19
-
20
- import tiktoken
21
- from tiktoken.load import load_tiktoken_bpe
22
-
23
-
24
- logger = getLogger(__name__)
25
-
26
-
27
- Role = Literal["system", "user", "assistant"]
28
-
29
-
30
- class Message(TypedDict):
31
- role: Role
32
- content: str
33
-
34
-
35
- Dialog = Sequence[Message]
36
-
37
-
38
- class Tokenizer:
39
- """
40
- Tokenizing and encoding/decoding text using the Tiktoken tokenizer.
41
- """
42
-
43
- special_tokens: Dict[str, int]
44
-
45
- num_reserved_special_tokens = 256
46
-
47
- pat_str = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" # noqa: E501
48
-
49
- def __init__(self, model_path: str):
50
- """
51
- Initializes the Tokenizer with a Tiktoken model.
52
-
53
- Args:
54
- model_path (str): The path to the Tiktoken model file.
55
- """
56
- assert os.path.isfile(model_path), model_path
57
-
58
- mergeable_ranks = load_tiktoken_bpe(model_path)
59
- num_base_tokens = len(mergeable_ranks)
60
- special_tokens = [
61
- "<|begin_of_text|>",
62
- "<|end_of_text|>",
63
- "<|reserved_special_token_0|>",
64
- "<|reserved_special_token_1|>",
65
- "<|reserved_special_token_2|>",
66
- "<|reserved_special_token_3|>",
67
- "<|start_header_id|>",
68
- "<|end_header_id|>",
69
- "<|reserved_special_token_4|>",
70
- "<|eot_id|>", # end of turn
71
- ] + [
72
- f"<|reserved_special_token_{i}|>"
73
- for i in range(5, self.num_reserved_special_tokens - 5)
74
- ]
75
- self.special_tokens = {
76
- token: num_base_tokens + i for i, token in enumerate(special_tokens)
77
- }
78
- self.model = tiktoken.Encoding(
79
- name=Path(model_path).name,
80
- pat_str=self.pat_str,
81
- mergeable_ranks=mergeable_ranks,
82
- special_tokens=self.special_tokens,
83
- )
84
- logger.info(f"Reloaded tiktoken model from {model_path}")
85
-
86
- self.n_words: int = self.model.n_vocab
87
- # BOS / EOS token IDs
88
- self.bos_id: int = self.special_tokens["<|begin_of_text|>"]
89
- self.eos_id: int = self.special_tokens["<|end_of_text|>"]
90
- self.pad_id: int = -1
91
- self.stop_tokens = {
92
- self.special_tokens["<|end_of_text|>"],
93
- self.special_tokens["<|eot_id|>"],
94
- }
95
- logger.info(
96
- f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
97
- )
98
-
99
- def encode(
100
- self,
101
- s: str,
102
- *,
103
- bos: bool,
104
- eos: bool,
105
- allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
106
- disallowed_special: Union[Literal["all"], Collection[str]] = (),
107
- ) -> List[int]:
108
- """
109
- Encodes a string into a list of token IDs.
110
-
111
- Args:
112
- s (str): The input string to be encoded.
113
- bos (bool): Whether to prepend the beginning-of-sequence token.
114
- eos (bool): Whether to append the end-of-sequence token.
115
- allowed_tokens ("all"|set[str]): allowed special tokens in string
116
- disallowed_tokens ("all"|set[str]): special tokens that raise an error when in string
117
-
118
- Returns:
119
- list[int]: A list of token IDs.
120
-
121
- By default, setting disallowed_special=() encodes a string by ignoring
122
- special tokens. Specifically:
123
- - Setting `disallowed_special` to () will cause all text corresponding
124
- to special tokens to be encoded as natural text (insteading of raising
125
- an error).
126
- - Setting `allowed_special` to "all" will treat all text corresponding
127
- to special tokens to be encoded as special tokens.
128
- """
129
- assert type(s) is str
130
-
131
- # The tiktoken tokenizer can handle <=400k chars without
132
- # pyo3_runtime.PanicException.
133
- TIKTOKEN_MAX_ENCODE_CHARS = 400_000
134
-
135
- # https://github.com/openai/tiktoken/issues/195
136
- # Here we iterate over subsequences and split if we exceed the limit
137
- # of max consecutive non-whitespace or whitespace characters.
138
- MAX_NO_WHITESPACES_CHARS = 25_000
139
-
140
- substrs = (
141
- substr
142
- for i in range(0, len(s), TIKTOKEN_MAX_ENCODE_CHARS)
143
- for substr in self._split_whitespaces_or_nonwhitespaces(
144
- s[i : i + TIKTOKEN_MAX_ENCODE_CHARS], MAX_NO_WHITESPACES_CHARS
145
- )
146
- )
147
- t: List[int] = []
148
- for substr in substrs:
149
- t.extend(
150
- self.model.encode(
151
- substr,
152
- allowed_special=allowed_special,
153
- disallowed_special=disallowed_special,
154
- )
155
- )
156
- if bos:
157
- t.insert(0, self.bos_id)
158
- if eos:
159
- t.append(self.eos_id)
160
- return t
161
-
162
- def decode(self, t: Sequence[int]) -> str:
163
- """
164
- Decodes a list of token IDs into a string.
165
-
166
- Args:
167
- t (List[int]): The list of token IDs to be decoded.
168
-
169
- Returns:
170
- str: The decoded string.
171
- """
172
- # Typecast is safe here. Tiktoken doesn't do anything list-related with the sequence.
173
- return self.model.decode(cast(List[int], t))
174
-
175
- @staticmethod
176
- def _split_whitespaces_or_nonwhitespaces(
177
- s: str, max_consecutive_slice_len: int
178
- ) -> Iterator[str]:
179
- """
180
- Splits the string `s` so that each substring contains no more than `max_consecutive_slice_len`
181
- consecutive whitespaces or consecutive non-whitespaces.
182
- """
183
- current_slice_len = 0
184
- current_slice_is_space = s[0].isspace() if len(s) > 0 else False
185
- slice_start = 0
186
-
187
- for i in range(len(s)):
188
- is_now_space = s[i].isspace()
189
-
190
- if current_slice_is_space ^ is_now_space:
191
- current_slice_len = 1
192
- current_slice_is_space = is_now_space
193
- else:
194
- current_slice_len += 1
195
- if current_slice_len > max_consecutive_slice_len:
196
- yield s[slice_start:i]
197
- slice_start = i
198
- current_slice_len = 1
199
- yield s[slice_start:]
200
-
201
-
202
- class ChatFormat:
203
- def __init__(self, tokenizer: Tokenizer):
204
- self.tokenizer = tokenizer
205
-
206
- def encode_header(self, message: Message) -> List[int]:
207
- tokens = []
208
- tokens.append(self.tokenizer.special_tokens["<|start_header_id|>"])
209
- tokens.extend(self.tokenizer.encode(message["role"], bos=False, eos=False))
210
- tokens.append(self.tokenizer.special_tokens["<|end_header_id|>"])
211
- tokens.extend(self.tokenizer.encode("\n\n", bos=False, eos=False))
212
- return tokens
213
-
214
- def encode_message(self, message: Message) -> List[int]:
215
- tokens = self.encode_header(message)
216
- tokens.extend(
217
- self.tokenizer.encode(message["content"].strip(), bos=False, eos=False)
218
- )
219
- tokens.append(self.tokenizer.special_tokens["<|eot_id|>"])
220
- return tokens
221
-
222
- def encode_dialog_prompt(self, dialog: Dialog) -> List[int]:
223
- tokens = []
224
- tokens.append(self.tokenizer.special_tokens["<|begin_of_text|>"])
225
- for message in dialog:
226
- tokens.extend(self.encode_message(message))
227
- # Add the start of an assistant message for the model to complete.
228
- tokens.extend(self.encode_header({"role": "assistant", "content": ""}))
229
- return tokens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/llama3/requirements.txt DELETED
@@ -1,5 +0,0 @@
1
- torch
2
- fairscale
3
- fire
4
- tiktoken==0.4.0
5
- blobfile
 
 
 
 
 
 
backend/llama3/setup.py DELETED
@@ -1,16 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement.
3
-
4
- from setuptools import find_packages, setup
5
-
6
-
7
- def get_requirements(path: str):
8
- return [l.strip() for l in open(path)]
9
-
10
-
11
- setup(
12
- name="llama3",
13
- version="0.0.1",
14
- packages=find_packages(),
15
- install_requires=get_requirements("requirements.txt"),
16
- )