initial commit
Browse files- .gitignore +2 -0
- agents.py +42 -0
- app.py +21 -0
- poetry.lock +0 -0
- pyproject.toml +29 -0
- results.md +169 -0
- tasks.py +31 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
__pycache__/
|
agents.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
load_dotenv()
|
4 |
+
from textwrap import dedent
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
+
from crewai import Agent
|
7 |
+
from crewai_tools import ScrapeWebsiteTool, SerperDevTool
|
8 |
+
|
9 |
+
# openai_llm = ChatOpenAI(model_name="gpt-3.5-turbo-0125", temperature=0.2)
|
10 |
+
openai_llm = ChatOpenAI(api_key=os.environ.get("OPENAI_API_KEY"), model="gpt-3.5-turbo-0125")
|
11 |
+
|
12 |
+
scrape_website_tool = ScrapeWebsiteTool()
|
13 |
+
search_tool = SerperDevTool()
|
14 |
+
|
15 |
+
topic_researcher_agent = Agent(
|
16 |
+
role="Topic Researcher",
|
17 |
+
goal="Your goal is to search for relevant content about the comparison between Llama 2 and Llama 3",
|
18 |
+
tools=[scrape_website_tool, search_tool],
|
19 |
+
backstory=dedent(
|
20 |
+
"""
|
21 |
+
You are proficient at searching for specific topics in the web, selecting those that provide
|
22 |
+
more value and information.
|
23 |
+
"""
|
24 |
+
),
|
25 |
+
verbose=True,
|
26 |
+
allow_delegation=False,
|
27 |
+
llm=openai_llm
|
28 |
+
)
|
29 |
+
|
30 |
+
writer_agent = Agent(
|
31 |
+
role="Writer",
|
32 |
+
goal="You will create an article that summarises the difference between Llama 2 and Llama 3",
|
33 |
+
backstory=dedent(
|
34 |
+
"""
|
35 |
+
You are a renowned Writer, known for your insightful and engaging articles.
|
36 |
+
You transform complex concepts into compelling narratives.
|
37 |
+
"""
|
38 |
+
),
|
39 |
+
verbose=True,
|
40 |
+
allow_delegation=False,
|
41 |
+
llm=openai_llm
|
42 |
+
)
|
app.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crewai import Crew
|
2 |
+
from agents import topic_researcher_agent, writer_agent
|
3 |
+
from tasks import topic_research_task, write_task
|
4 |
+
|
5 |
+
crew = Crew(
|
6 |
+
agents=[
|
7 |
+
topic_researcher_agent,
|
8 |
+
writer_agent
|
9 |
+
],
|
10 |
+
tasks=[
|
11 |
+
topic_research_task,
|
12 |
+
write_task
|
13 |
+
],
|
14 |
+
verbose=1
|
15 |
+
)
|
16 |
+
|
17 |
+
result = crew.kickoff()
|
18 |
+
|
19 |
+
|
20 |
+
print("Here is the result: ")
|
21 |
+
print(result)
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "crewai-tools-openai"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Automate social media"
|
5 |
+
authors = ["Z"]
|
6 |
+
|
7 |
+
[tool.poetry.dependencies]
|
8 |
+
python = ">=3.10,<=3.13"
|
9 |
+
crewai = "0.28.6"
|
10 |
+
crewai-tools = "0.1.6"
|
11 |
+
python-dotenv = "1.0.0"
|
12 |
+
langchain-openai = "^0.0.5"
|
13 |
+
|
14 |
+
[tool.poetry.extras]
|
15 |
+
tools = ["crewai-tools"]
|
16 |
+
|
17 |
+
[tool.pyright]
|
18 |
+
# https://github.com/microsoft/pyright/blob/main/docs/configuration.md
|
19 |
+
useLibraryCodeForTypes = true
|
20 |
+
exclude = [".cache"]
|
21 |
+
|
22 |
+
[tool.ruff]
|
23 |
+
# https://beta.ruff.rs/docs/configuration/
|
24 |
+
select = ['E', 'W', 'F', 'I', 'B', 'C4', 'ARG', 'SIM']
|
25 |
+
ignore = ['W291', 'W292', 'W293']
|
26 |
+
|
27 |
+
[build-system]
|
28 |
+
requires = ["poetry-core>=1.0.0"]
|
29 |
+
build-backend = "poetry.core.masonry.api"
|
results.md
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[DEBUG]: == Working Agent: Topic Researcher
|
2 |
+
|
3 |
+
|
4 |
+
> Entering new CrewAgentExecutor chain...
|
5 |
+
I need to use the tools available to gather valuable information about the comparison between Llama 2 and Llama 3. I should focus on finding detailed content that provides a clear comparison between the two.
|
6 |
+
|
7 |
+
Action: Search the internet
|
8 |
+
Action Input: {"search_query": "Comparison between Llama 2 and Llama 3"}
|
9 |
+
|
10 |
+
|
11 |
+
Search results: Title: Llama 3 vs Llama 2. Comparison, Differences, Features | Apps4Rent
|
12 |
+
Link: https://www.apps4rent.com/blog/llama-3-vs-llama-2/
|
13 |
+
Snippet: Comparing features, similarities, and differences between Llama 2 and Llama 3. Apps4Rent can help with Llama deployment on Azure & on AWS using SageMaker ...
|
14 |
+
---
|
15 |
+
Title: Llama 2 vs Llama 3: An In-depth Comparison - Medium
|
16 |
+
Link: https://medium.com/@vineethveetil/llama-2-vs-llama-3-an-in-depth-comparison-aebb6a3f8c51
|
17 |
+
Snippet: Llama 2 uses SentencePiece for tokenization, whereas Llama 3 has transitioned to OpenAI's Tiktoken. Llama 3 also introduces a ChatFormat class, ...
|
18 |
+
---
|
19 |
+
Title: Introducing Meta Llama 3: The most capable openly available LLM ...
|
20 |
+
Link: https://ai.meta.com/blog/meta-llama-3/
|
21 |
+
Snippet: Compared to Llama 2, we made several key improvements. Llama 3 uses a tokenizer with a vocabulary of 128K tokens that encodes language much more ...
|
22 |
+
---
|
23 |
+
Title: A Comprehensive Technical Analysis of Llama 3 & Comparison with ...
|
24 |
+
Link: https://www.linkedin.com/pulse/comprehensive-technical-analysis-llama-3-comparison-2-ibad-rehman-kw8pe
|
25 |
+
Snippet: Llama 3 distinguishes itself from its predecessor, Llama 2, with a broader scope and an array of enhanced features aimed at providing more ...
|
26 |
+
---
|
27 |
+
Title: Llama 2 vs. Llama 3: Which LLM is Better? | Sapling
|
28 |
+
Link: https://sapling.ai/llm/llama2-vs-llama3
|
29 |
+
Snippet: Side-by-side comparison of Llama 2 and Llama 3 with feature breakdowns and pros/cons of each large language model.
|
30 |
+
---
|
31 |
+
Title: Llama2 vs Llama3 - comparison. - LinkedIn
|
32 |
+
Link: https://www.linkedin.com/pulse/llama2-vs-llama3-comparison-vlad-kost-strxc
|
33 |
+
Snippet: Llama 3 introduces a more flexible approach to manage special tokens, which are essential for parsing distinct textual elements like headers or ...
|
34 |
+
---
|
35 |
+
Title: Result: Llama 3 EXL2 quant quality compared to GGUF and Llama 2
|
36 |
+
Link: https://www.reddit.com/r/LocalLLaMA/comments/1cfbadc/result_llama_3_exl2_quant_quality_compared_to/
|
37 |
+
Snippet: The quality at same model size seems to be exactly the same between EXL2 and the latest imatrix IQ quants of GGUF, for both Llama 3 and 2. For ...
|
38 |
+
---
|
39 |
+
Title: LLaMa 3 vs LLaMa 2 - Analyzing the Difference. - MonsterAPI Blog
|
40 |
+
Link: https://blog.monsterapi.ai/what-is-llama-3-and-how-it-differs-from-llama-2/
|
41 |
+
Snippet: LLaMa 3 boasts better performance across all parameters compared to LLaMa 2. Here's the comparison: Enhanced Performance: LLaMa 3 tackles multi- ...
|
42 |
+
---
|
43 |
+
Title: Meta AI: What is Llama 3 and why does it matter? - Zapier
|
44 |
+
Link: https://zapier.com/blog/llama-meta/
|
45 |
+
Snippet: It's the successor to Llama 2, Meta's previous generation of AI models. While there are some technical differences between Llama and other ...
|
46 |
+
---
|
47 |
+
Title: Llama-2 vs. Llama-3: a Tic-Tac-Toe Battle Between Models
|
48 |
+
Link: https://towardsdatascience.com/llama-2-vs-llama-3-a-tic-tac-toe-battle-between-models-7301962ca65d
|
49 |
+
Snippet: As a slight disadvantage, a Llama-3 is slower compared to a previous model (2.5 vs. 4.3 seconds, respectively). Practically, 4.3s is good enough ...
|
50 |
+
---
|
51 |
+
|
52 |
+
|
53 |
+
Final Answer:
|
54 |
+
Llama 3 vs Llama 2. Comparison, Differences, Features | Apps4Rent
|
55 |
+
Link: https://www.apps4rent.com/blog/llama-3-vs-llama-2/
|
56 |
+
Snippet: Comparing features, similarities, and differences between Llama 2 and Llama 3. Apps4Rent can help with Llama deployment on Azure & on AWS using SageMaker ...
|
57 |
+
|
58 |
+
Llama 2 vs Llama 3: An In-depth Comparison - Medium
|
59 |
+
Link: https://medium.com/@vineethveetil/llama-2-vs-llama-3-an-in-depth-comparison-aebb6a3f8c51
|
60 |
+
Snippet: Llama 2 uses SentencePiece for tokenization, whereas Llama 3 has transitioned to OpenAI's Tiktoken. Llama 3 also introduces a ChatFormat class, ...
|
61 |
+
|
62 |
+
Introducing Meta Llama 3: The most capable openly available LLM ...
|
63 |
+
Link: https://ai.meta.com/blog/meta-llama-3/
|
64 |
+
Snippet: Compared to Llama 2, we made several key improvements. Llama 3 uses a tokenizer with a vocabulary of 128K tokens that encodes language much more ...
|
65 |
+
|
66 |
+
A Comprehensive Technical Analysis of Llama 3 & Comparison with ...
|
67 |
+
Link: https://www.linkedin.com/pulse/comprehensive-technical-analysis-llama-3-comparison-2-ibad-rehman-kw8pe
|
68 |
+
Snippet: Llama 3 distinguishes itself from its predecessor, Llama 2, with a broader scope and an array of enhanced features aimed at providing more ...
|
69 |
+
|
70 |
+
Llama 2 vs. Llama 3: Which LLM is Better? | Sapling
|
71 |
+
Link: https://sapling.ai/llm/llama2-vs-llama3
|
72 |
+
Snippet: Side-by-side comparison of Llama 2 and Llama 3 with feature breakdowns and pros/cons of each large language model.
|
73 |
+
|
74 |
+
Llama2 vs Llama3 - comparison. - LinkedIn
|
75 |
+
Link: https://www.linkedin.com/pulse/llama2-vs-llama3-comparison-vlad-kost-strxc
|
76 |
+
Snippet: Llama 3 introduces a more flexible approach to manage special tokens, which are essential for parsing distinct textual elements like headers or ...
|
77 |
+
|
78 |
+
Result: Llama 3 EXL2 quant quality compared to GGUF and Llama 2
|
79 |
+
Link: https://www.reddit.com/r/LocalLLaMA/comments/1cfbadc/result_llama_3_exl2_quant_quality_compared_to/
|
80 |
+
Snippet: The quality at same model size seems to be exactly the same between EXL2 and the latest imatrix IQ quants of GGUF, for both Llama 3 and 2. For ...
|
81 |
+
|
82 |
+
LLaMa 3 vs LLaMa 2 - Analyzing the Difference. - MonsterAPI Blog
|
83 |
+
Link: https://blog.monsterapi.ai/what-is-llama-3-and-how-it-differs-from-llama-2/
|
84 |
+
Snippet: LLaMa 3 boasts better performance across all parameters compared to LLaMa 2. Here's the comparison: Enhanced Performance: LLaMa 3 tackles multi- ...
|
85 |
+
|
86 |
+
Meta AI: What is Llama 3 and why does it matter? - Zapier
|
87 |
+
Link: https://zapier.com/blog/llama-meta/
|
88 |
+
Snippet: It's the successor to Llama 2, Meta's previous generation of AI models. While there are some technical differences between Llama and other ...
|
89 |
+
|
90 |
+
Llama-2 vs. Llama-3: a Tic-Tac-Toe Battle Between Models
|
91 |
+
Link: https://towardsdatascience.com/llama-2-vs-llama-3-a-tic-tac-toe-battle-between-models-7301962ca65d
|
92 |
+
Snippet: As a slight disadvantage, a Llama-3 is slower compared to a previous model (2.5 vs. 4.3 seconds, respectively). Practically, 4.3s is good enough ...
|
93 |
+
|
94 |
+
> Finished chain.
|
95 |
+
[DEBUG]: == [Topic Researcher] Task output: Llama 3 vs Llama 2. Comparison, Differences, Features | Apps4Rent
|
96 |
+
Link: https://www.apps4rent.com/blog/llama-3-vs-llama-2/
|
97 |
+
Snippet: Comparing features, similarities, and differences between Llama 2 and Llama 3. Apps4Rent can help with Llama deployment on Azure & on AWS using SageMaker ...
|
98 |
+
|
99 |
+
Llama 2 vs Llama 3: An In-depth Comparison - Medium
|
100 |
+
Link: https://medium.com/@vineethveetil/llama-2-vs-llama-3-an-in-depth-comparison-aebb6a3f8c51
|
101 |
+
Snippet: Llama 2 uses SentencePiece for tokenization, whereas Llama 3 has transitioned to OpenAI's Tiktoken. Llama 3 also introduces a ChatFormat class, ...
|
102 |
+
|
103 |
+
Introducing Meta Llama 3: The most capable openly available LLM ...
|
104 |
+
Link: https://ai.meta.com/blog/meta-llama-3/
|
105 |
+
Snippet: Compared to Llama 2, we made several key improvements. Llama 3 uses a tokenizer with a vocabulary of 128K tokens that encodes language much more ...
|
106 |
+
|
107 |
+
A Comprehensive Technical Analysis of Llama 3 & Comparison with ...
|
108 |
+
Link: https://www.linkedin.com/pulse/comprehensive-technical-analysis-llama-3-comparison-2-ibad-rehman-kw8pe
|
109 |
+
Snippet: Llama 3 distinguishes itself from its predecessor, Llama 2, with a broader scope and an array of enhanced features aimed at providing more ...
|
110 |
+
|
111 |
+
Llama 2 vs. Llama 3: Which LLM is Better? | Sapling
|
112 |
+
Link: https://sapling.ai/llm/llama2-vs-llama3
|
113 |
+
Snippet: Side-by-side comparison of Llama 2 and Llama 3 with feature breakdowns and pros/cons of each large language model.
|
114 |
+
|
115 |
+
Llama2 vs Llama3 - comparison. - LinkedIn
|
116 |
+
Link: https://www.linkedin.com/pulse/llama2-vs-llama3-comparison-vlad-kost-strxc
|
117 |
+
Snippet: Llama 3 introduces a more flexible approach to manage special tokens, which are essential for parsing distinct textual elements like headers or ...
|
118 |
+
|
119 |
+
Result: Llama 3 EXL2 quant quality compared to GGUF and Llama 2
|
120 |
+
Link: https://www.reddit.com/r/LocalLLaMA/comments/1cfbadc/result_llama_3_exl2_quant_quality_compared_to/
|
121 |
+
Snippet: The quality at same model size seems to be exactly the same between EXL2 and the latest imatrix IQ quants of GGUF, for both Llama 3 and 2. For ...
|
122 |
+
|
123 |
+
LLaMa 3 vs LLaMa 2 - Analyzing the Difference. - MonsterAPI Blog
|
124 |
+
Link: https://blog.monsterapi.ai/what-is-llama-3-and-how-it-differs-from-llama-2/
|
125 |
+
Snippet: LLaMa 3 boasts better performance across all parameters compared to LLaMa 2. Here's the comparison: Enhanced Performance: LLaMa 3 tackles multi- ...
|
126 |
+
|
127 |
+
Meta AI: What is Llama 3 and why does it matter? - Zapier
|
128 |
+
Link: https://zapier.com/blog/llama-meta/
|
129 |
+
Snippet: It's the successor to Llama 2, Meta's previous generation of AI models. While there are some technical differences between Llama and other ...
|
130 |
+
|
131 |
+
Llama-2 vs. Llama-3: a Tic-Tac-Toe Battle Between Models
|
132 |
+
Link: https://towardsdatascience.com/llama-2-vs-llama-3-a-tic-tac-toe-battle-between-models-7301962ca65d
|
133 |
+
Snippet: As a slight disadvantage, a Llama-3 is slower compared to a previous model (2.5 vs. 4.3 seconds, respectively). Practically, 4.3s is good enough ...
|
134 |
+
|
135 |
+
|
136 |
+
[DEBUG]: == Working Agent: Writer
|
137 |
+
|
138 |
+
|
139 |
+
> Entering new CrewAgentExecutor chain...
|
140 |
+
I now can give a great answer
|
141 |
+
|
142 |
+
Final Answer:
|
143 |
+
|
144 |
+
Llama 2 and Llama 3 are both powerful language models, but they have key differences that set them apart. Llama 2 utilizes SentencePiece for tokenization, while Llama 3 has switched to OpenAI's Tiktoken. Additionally, Llama 3 introduces a ChatFormat class, offering new capabilities for users.
|
145 |
+
|
146 |
+
In terms of technical improvements, Llama 3 has a tokenizer with a vocabulary of 128K tokens, encoding language more effectively than its predecessor. This broader scope and enhanced features make Llama 3 a more capable and versatile option for natural language processing tasks.
|
147 |
+
|
148 |
+
When comparing performance, Llama 3 outshines Llama 2 with better overall parameters and enhanced efficiency. While Llama 3 may be slightly slower in processing speed compared to Llama 2, the improvements in performance and capabilities make it a superior choice for handling complex language tasks.
|
149 |
+
|
150 |
+
In conclusion, Llama 3 represents a significant advancement over Llama 2, offering enhanced features, improved performance, and a broader scope for tackling various language processing challenges. Whether it's for text generation, translation, or other NLP tasks, Llama 3 stands out as the more capable and effective option between the two models.
|
151 |
+
|
152 |
+
> Finished chain.
|
153 |
+
[DEBUG]: == [Writer] Task output: Llama 2 and Llama 3 are both powerful language models, but they have key differences that set them apart. Llama 2 utilizes SentencePiece for tokenization, while Llama 3 has switched to OpenAI's Tiktoken. Additionally, Llama 3 introduces a ChatFormat class, offering new capabilities for users.
|
154 |
+
|
155 |
+
In terms of technical improvements, Llama 3 has a tokenizer with a vocabulary of 128K tokens, encoding language more effectively than its predecessor. This broader scope and enhanced features make Llama 3 a more capable and versatile option for natural language processing tasks.
|
156 |
+
|
157 |
+
When comparing performance, Llama 3 outshines Llama 2 with better overall parameters and enhanced efficiency. While Llama 3 may be slightly slower in processing speed compared to Llama 2, the improvements in performance and capabilities make it a superior choice for handling complex language tasks.
|
158 |
+
|
159 |
+
In conclusion, Llama 3 represents a significant advancement over Llama 2, offering enhanced features, improved performance, and a broader scope for tackling various language processing challenges. Whether it's for text generation, translation, or other NLP tasks, Llama 3 stands out as the more capable and effective option between the two models.
|
160 |
+
|
161 |
+
|
162 |
+
Here is the result:
|
163 |
+
Llama 2 and Llama 3 are both powerful language models, but they have key differences that set them apart. Llama 2 utilizes SentencePiece for tokenization, while Llama 3 has switched to OpenAI's Tiktoken. Additionally, Llama 3 introduces a ChatFormat class, offering new capabilities for users.
|
164 |
+
|
165 |
+
In terms of technical improvements, Llama 3 has a tokenizer with a vocabulary of 128K tokens, encoding language more effectively than its predecessor. This broader scope and enhanced features make Llama 3 a more capable and versatile option for natural language processing tasks.
|
166 |
+
|
167 |
+
When comparing performance, Llama 3 outshines Llama 2 with better overall parameters and enhanced efficiency. While Llama 3 may be slightly slower in processing speed compared to Llama 2, the improvements in performance and capabilities make it a superior choice for handling complex language tasks.
|
168 |
+
|
169 |
+
In conclusion, Llama 3 represents a significant advancement over Llama 2, offering enhanced features, improved performance, and a broader scope for tackling various language processing challenges. Whether it's for text generation, translation, or other NLP tasks, Llama 3 stands out as the more capable and effective option between the two models.
|
tasks.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from textwrap import dedent
|
2 |
+
from crewai import Task
|
3 |
+
from agents import topic_researcher_agent, writer_agent
|
4 |
+
|
5 |
+
topic_research_task = Task(
|
6 |
+
description=dedent(
|
7 |
+
"""
|
8 |
+
Get valuable and high quality web information about the comparison between Llama 2 and Llama 3
|
9 |
+
"""
|
10 |
+
),
|
11 |
+
expected_output=dedent(
|
12 |
+
"""
|
13 |
+
Your task is to gather high quality information about the comparison between Llama 2 and Llama 3
|
14 |
+
"""
|
15 |
+
),
|
16 |
+
agent=topic_researcher_agent,
|
17 |
+
)
|
18 |
+
|
19 |
+
write_task = Task(
|
20 |
+
description=dedent(
|
21 |
+
"""
|
22 |
+
Create an article that summarises the difference between Llama 2 and Llama 3
|
23 |
+
"""
|
24 |
+
),
|
25 |
+
expected_output=dedent(
|
26 |
+
"""
|
27 |
+
A high-quality and engaging article comparing Llama 2 and Llama 3.
|
28 |
+
"""
|
29 |
+
),
|
30 |
+
agent=writer_agent,
|
31 |
+
)
|