Bhaskar2611 commited on
Commit
5681a09
·
verified ·
1 Parent(s): 37179bd

Create llm_tool.py

Browse files
Files changed (1) hide show
  1. llm_tool.py +42 -0
llm_tool.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import os
3
+
4
+ class LLMHandler:
5
+ def __init__(self):
6
+ self.client = InferenceClient(
7
+ model="mistralai/Mistral-7B-Instruct-v0.3", # Updated to v0.3
8
+ token=os.getenv("HF_TOKEN")
9
+ )
10
+
11
+ def get_deadline_suggestion(self, task_description):
12
+ prompt = f"""You are a task management assistant. Analyze the task below and provide a realistic deadline suggestion.
13
+
14
+ Task Description:
15
+ "{task_description}"
16
+
17
+ Follow this format:
18
+ 1. **Estimated Hours**: [X]
19
+ 2. **Recommended Deadline**: [YYYY-MM-DD HH:MM]
20
+ 3. **Priority**: [High/Medium/Low]
21
+ 4. **Notes**: [Brief explanation]
22
+
23
+ Example:
24
+ 1. **Estimated Hours**: 8
25
+ 2. **Recommended Deadline**: 2024-04-10 18:00
26
+ 3. **Priority**: High
27
+ 4. **Notes**: Research papers typically take 5–7 days for 5000 words.
28
+
29
+ Now analyze the task and return only the structured output."""
30
+
31
+ try:
32
+ response = self.client.chat.completions.create(
33
+ messages=[{"role": "user", "content": prompt}],
34
+ max_tokens=500,
35
+ temperature=0.3
36
+ )
37
+ return response.choices[0].message.content
38
+ except Exception as e:
39
+ return f"LLM Error: {str(e)}. Please check HF_TOKEN or try again later."
40
+
41
+ # Singleton instance
42
+ llm = LLMHandler()