shamimjony1000 commited on
Commit
429a64c
·
verified ·
1 Parent(s): e9739ea

Update llm_part.py

Browse files
Files changed (1) hide show
  1. llm_part.py +99 -100
llm_part.py CHANGED
@@ -1,100 +1,99 @@
1
- # llm_part.py
2
- import os
3
- from langchain.prompts import PromptTemplate
4
- from langchain.chains import LLMChain
5
- from langchain_google_genai import ChatGoogleGenerativeAI
6
- from langchain_groq import ChatGroq
7
- from secret_key import gemeni_key, llama_key
8
- from datetime import datetime
9
-
10
- # Initialize LLM models
11
- llm_1 = ChatGoogleGenerativeAI(model="gemini-pro", api_key=gemeni_key)
12
- llm_2 = ChatGroq(model="llama3-groq-70b-8192-tool-use-preview", groq_api_key=llama_key)
13
-
14
- # Prayer times for the current day only (Fajr, Dhuhr, Asr, Maghrib, Isha)
15
- prayer_times = {
16
- "Fajr": "04:30",
17
- "Dhuhr": "11:45",
18
- "Asr": "15:30",
19
- "Maghrib": "17:45",
20
- "Isha": "19:00"
21
- }
22
-
23
- # Define the next prayer function
24
- def get_next_prayer(current_time):
25
- # Retrieve today's prayer times
26
- times = list(prayer_times.values())
27
- prayer_names = list(prayer_times.keys())
28
-
29
- # Current time in comparable format
30
- current_hour = current_time.hour
31
- current_minute = current_time.minute
32
-
33
- for prayer, time_str in zip(prayer_names, times):
34
- prayer_hour, prayer_minute = map(int, time_str.split(':'))
35
- if (prayer_hour > current_hour) or (prayer_hour == current_hour and prayer_minute > current_minute):
36
- return prayer, time_str # Return both prayer name and time
37
-
38
- return "Isha", times[-1]
39
-
40
-
41
- templates = {
42
- "Learning": "You just completed a learning task: {task_name}. You studied for {hours} hours and {minutes} minutes. "
43
- "Can you suggest improvements to my learning process? What should I learn next?",
44
-
45
- "Gym": "You completed a gym task: {task_name}. It lasted {hours} hours and {minutes} minutes. "
46
- "How can I optimize my workout? What should I focus on next time in the gym?",
47
-
48
- "Personal": "I just completed a personal task: {task_name}. It took me {hours} hours and {minutes} minutes. "
49
- "What advice can you give for better time management or improvement?",
50
-
51
- "Work": "I completed a work-related task: {task_name}, which lasted {hours} hours and {minutes} minutes. "
52
- "Can you suggest ways to improve my work efficiency? What should I work on next?",
53
-
54
- "Prayer": "You completed a prayer task: {task_name}. It took you {hours} hours and {minutes} minutes. "
55
- "What practical steps can I take to deepen my spiritual connection during Salah (prayer)?"
56
- "Please provide specific traditions and teachings for the next prayer, **{next_prayer}**, which is at **{next_time}**. "
57
- "Format your response as bullet points for clarity."
58
- }
59
-
60
- # Function to select a template based on the task category
61
- def get_template(category):
62
- return templates.get(category, "You just completed the task: {task_name}. It took {hours} hours and {minutes} minutes. "
63
- "What advice can you give? What is the next most productive task?")
64
-
65
- # Template function to generate advice and next task recommendations for each task
66
- def get_task_advice(task_name, task_category, hours, minutes):
67
- # Get the template based on the category
68
- template_str = get_template(task_category)
69
-
70
- # Determine the next prayer and its time
71
- current_time = datetime.now()
72
- next_prayer, next_time = get_next_prayer(current_time)
73
-
74
- # Create the prompt
75
- prompt_template = PromptTemplate(
76
- input_variables=["task_name", "hours", "minutes", "next_prayer", "next_time"],
77
- template=template_str
78
- )
79
-
80
- prompt = prompt_template.format(task_name=task_name, hours=hours, minutes=minutes,
81
- next_prayer=next_prayer, next_time=next_time)
82
-
83
- # Set up LLMChain for both models
84
- chain_1 = LLMChain(llm=llm_1, prompt=prompt_template)
85
- chain_2 = LLMChain(llm=llm_2, prompt=prompt_template)
86
-
87
- try:
88
- # Run the LLMChain to get the responses
89
- response_llm1 = chain_1.run({"task_name": task_name, "hours": hours, "minutes": minutes,
90
- "next_prayer": next_prayer, "next_time": next_time})
91
- except Exception as e:
92
- response_llm1 = f"Error generating response from Gemini: {str(e)}"
93
-
94
- try:
95
- response_llm2 = chain_2.run({"task_name": task_name, "hours": hours, "minutes": minutes,
96
- "next_prayer": next_prayer, "next_time": next_time})
97
- except Exception as e:
98
- response_llm2 = f"Error generating response from Llama: {str(e)}"
99
-
100
- return response_llm1, response_llm2
 
1
+ # llm_part.py
2
+ import os
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain.chains import LLMChain
5
+ from langchain_google_genai import ChatGoogleGenerativeAI
6
+ from langchain_groq import ChatGroq
7
+ from datetime import datetime
8
+
9
+ # Initialize LLM models
10
+ llm_1 = ChatGoogleGenerativeAI(model="gemini-pro", api_key=gemeni_key)
11
+ llm_2 = ChatGroq(model="llama3-groq-70b-8192-tool-use-preview", groq_api_key=llama_key)
12
+
13
+ # Prayer times for the current day only (Fajr, Dhuhr, Asr, Maghrib, Isha)
14
+ prayer_times = {
15
+ "Fajr": "04:30",
16
+ "Dhuhr": "11:45",
17
+ "Asr": "15:30",
18
+ "Maghrib": "17:45",
19
+ "Isha": "19:00"
20
+ }
21
+
22
+ # Define the next prayer function
23
+ def get_next_prayer(current_time):
24
+ # Retrieve today's prayer times
25
+ times = list(prayer_times.values())
26
+ prayer_names = list(prayer_times.keys())
27
+
28
+ # Current time in comparable format
29
+ current_hour = current_time.hour
30
+ current_minute = current_time.minute
31
+
32
+ for prayer, time_str in zip(prayer_names, times):
33
+ prayer_hour, prayer_minute = map(int, time_str.split(':'))
34
+ if (prayer_hour > current_hour) or (prayer_hour == current_hour and prayer_minute > current_minute):
35
+ return prayer, time_str # Return both prayer name and time
36
+
37
+ return "Isha", times[-1]
38
+
39
+
40
+ templates = {
41
+ "Learning": "You just completed a learning task: {task_name}. You studied for {hours} hours and {minutes} minutes. "
42
+ "Can you suggest improvements to my learning process? What should I learn next?",
43
+
44
+ "Gym": "You completed a gym task: {task_name}. It lasted {hours} hours and {minutes} minutes. "
45
+ "How can I optimize my workout? What should I focus on next time in the gym?",
46
+
47
+ "Personal": "I just completed a personal task: {task_name}. It took me {hours} hours and {minutes} minutes. "
48
+ "What advice can you give for better time management or improvement?",
49
+
50
+ "Work": "I completed a work-related task: {task_name}, which lasted {hours} hours and {minutes} minutes. "
51
+ "Can you suggest ways to improve my work efficiency? What should I work on next?",
52
+
53
+ "Prayer": "You completed a prayer task: {task_name}. It took you {hours} hours and {minutes} minutes. "
54
+ "What practical steps can I take to deepen my spiritual connection during Salah (prayer)?"
55
+ "Please provide specific traditions and teachings for the next prayer, **{next_prayer}**, which is at **{next_time}**. "
56
+ "Format your response as bullet points for clarity."
57
+ }
58
+
59
+ # Function to select a template based on the task category
60
+ def get_template(category):
61
+ return templates.get(category, "You just completed the task: {task_name}. It took {hours} hours and {minutes} minutes. "
62
+ "What advice can you give? What is the next most productive task?")
63
+
64
+ # Template function to generate advice and next task recommendations for each task
65
+ def get_task_advice(task_name, task_category, hours, minutes):
66
+ # Get the template based on the category
67
+ template_str = get_template(task_category)
68
+
69
+ # Determine the next prayer and its time
70
+ current_time = datetime.now()
71
+ next_prayer, next_time = get_next_prayer(current_time)
72
+
73
+ # Create the prompt
74
+ prompt_template = PromptTemplate(
75
+ input_variables=["task_name", "hours", "minutes", "next_prayer", "next_time"],
76
+ template=template_str
77
+ )
78
+
79
+ prompt = prompt_template.format(task_name=task_name, hours=hours, minutes=minutes,
80
+ next_prayer=next_prayer, next_time=next_time)
81
+
82
+ # Set up LLMChain for both models
83
+ chain_1 = LLMChain(llm=llm_1, prompt=prompt_template)
84
+ chain_2 = LLMChain(llm=llm_2, prompt=prompt_template)
85
+
86
+ try:
87
+ # Run the LLMChain to get the responses
88
+ response_llm1 = chain_1.run({"task_name": task_name, "hours": hours, "minutes": minutes,
89
+ "next_prayer": next_prayer, "next_time": next_time})
90
+ except Exception as e:
91
+ response_llm1 = f"Error generating response from Gemini: {str(e)}"
92
+
93
+ try:
94
+ response_llm2 = chain_2.run({"task_name": task_name, "hours": hours, "minutes": minutes,
95
+ "next_prayer": next_prayer, "next_time": next_time})
96
+ except Exception as e:
97
+ response_llm2 = f"Error generating response from Llama: {str(e)}"
98
+
99
+ return response_llm1, response_llm2