Fran commited on
Commit
6f40147
·
1 Parent(s): ae7a494

New tool to create linkedin posts

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. app.py +7 -31
  3. tools/linkedin_post_composer.py +56 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ venv
app.py CHANGED
@@ -1,40 +1,16 @@
1
  from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
- import datetime
3
- import requests
4
- import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
 
 
7
 
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
-
21
- @tool
22
- def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
- Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
- """
27
- try:
28
- # Create timezone object
29
- tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
- return f"The current local time in {timezone} is: {local_time}"
33
- except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
-
36
 
37
  final_answer = FinalAnswerTool()
 
 
 
38
 
39
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
@@ -42,7 +18,7 @@ final_answer = FinalAnswerTool()
42
  model = HfApiModel(
43
  max_tokens=2096,
44
  temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
  custom_role_conversions=None,
47
  )
48
 
@@ -55,7 +31,7 @@ with open("prompts.yaml", 'r') as stream:
55
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
 
1
  from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
 
 
 
2
  import yaml
3
  from tools.final_answer import FinalAnswerTool
4
+ from tools.visit_webpage import VisitWebpageTool
5
+ from tools.linkedin_post_composer import LinkedInPostPromptComposerTool
6
 
7
  from Gradio_UI import GradioUI
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  final_answer = FinalAnswerTool()
11
+ visit_web_page = VisitWebpageTool()
12
+ linkedin_post_composer = LinkedInPostPromptComposerTool()
13
+ search = DuckDuckGoSearchTool()
14
 
15
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
16
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
 
18
  model = HfApiModel(
19
  max_tokens=2096,
20
  temperature=0.5,
21
+ model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded
22
  custom_role_conversions=None,
23
  )
24
 
 
31
 
32
  agent = CodeAgent(
33
  model=model,
34
+ tools=[linkedin_post_composer, visit_web_page, search, image_generation_tool, final_answer], ## add your tools here (don't remove final answer)
35
  max_steps=6,
36
  verbosity_level=1,
37
  grammar=None,
tools/linkedin_post_composer.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents.tools import Tool
2
+
3
+ class LinkedInPostPromptComposerTool(Tool):
4
+ name = "linkedin_post_prompt_composer"
5
+ description = (
6
+ "Generates a detailed prompt by synthesizing conversation context, extracted webpage insights, "
7
+ "and additional instructions. This prompt can then be used by a separate final answer tool to produce "
8
+ "a polished LinkedIn post."
9
+ )
10
+ inputs = {
11
+ "context": {
12
+ "type": "string",
13
+ "description": (
14
+ "A summary of your brainstorming or discussion context. Include key ideas, opinions, "
15
+ "and relevant back-and-forth that should influence the final post."
16
+ )
17
+ },
18
+ "extracted_info": {
19
+ "type": "string",
20
+ "description": (
21
+ "Key points, data, or insights extracted from webpages, reports, or articles that provide "
22
+ "the factual basis for the post."
23
+ )
24
+ },
25
+ "instructions": {
26
+ "type": "string",
27
+ "description": (
28
+ "Additional guidance such as desired tone, target audience, style, or specific calls-to-action. "
29
+ "For example: 'Make it conversational yet authoritative, include a compelling hook, and end with a question.'"
30
+ ),
31
+ "nullable": True
32
+ }
33
+ }
34
+ output_type = "string"
35
+
36
+ def forward(self, context: str, extracted_info: str, instructions: str = "") -> str:
37
+ prompt = (
38
+ "You are an experienced LinkedIn content strategist and influencer. Using the inputs provided, "
39
+ "generate a comprehensive prompt for an LLM to produce a final LinkedIn post that meets the following criteria:\n\n"
40
+ "1. **Compelling Hook:** Begin with a strong headline or opening line that grabs attention.\n"
41
+ "2. **Coherent Narrative:** Seamlessly blend the discussion context and the extracted information into a clear, engaging story.\n"
42
+ "3. **Actionable Insights:** Offer actionable advice or takeaways that provide real value to a professional audience.\n"
43
+ "4. **Call-to-Action:** Include a call-to-action to encourage comments, shares, or further engagement.\n"
44
+ "5. **Trending Hashtags:** Append 3-5 relevant and trending LinkedIn hashtags at the end.\n\n"
45
+ "The final post should be approximately 200–300 words, using a professional yet conversational tone.\n\n"
46
+ "### Input Sections:\n"
47
+ "**Discussion Context:**\n" + context + "\n\n"
48
+ "**Extracted Information:**\n" + extracted_info + "\n\n"
49
+ )
50
+ if instructions:
51
+ prompt += "**Additional Instructions:**\n" + instructions + "\n\n"
52
+ prompt += "Now, produce the final answer generating the LinkedIn post based on the above inputs."
53
+ return prompt
54
+
55
+ def __init__(self, *args, **kwargs):
56
+ self.is_initialized = False