File size: 3,898 Bytes
a8dd5f5
 
 
 
 
 
 
9c23216
 
 
 
a8dd5f5
 
 
 
9c23216
a8dd5f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c23216
a8dd5f5
 
 
 
 
 
 
 
 
 
 
 
 
9c23216
 
 
ba70fb9
 
 
 
 
 
4d46cce
 
ba70fb9
 
 
9c23216
 
4d46cce
9c23216
 
 
 
 
 
 
 
 
 
 
 
 
 
4d46cce
9c23216
 
 
 
 
 
 
 
 
4d46cce
9c23216
ba70fb9
 
9c23216
 
a8dd5f5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import pandas as pd
from datetime import datetime
from dotenv import load_dotenv
from langchain_core.output_parsers import StrOutputParser
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from prompts.summary_prompt import (
    meterological_data_summary_prompt,
    agricultural_yield_comparison_prompt
)

load_dotenv()


def get_meterological_summary(scenario: str, temperature_df: pd.DataFrame, rain_df: pd.DataFrame, irradiance_df: pd.DataFrame) -> str:

    today = datetime.today().strftime("%Y/%m/%d")

    temp_data = temperature_df.head(len(temperature_df)).to_string(index=False)
    rain_data = rain_df.head(len(rain_df)).to_string(index=False)
    irradiance_data = irradiance_df.head(len(irradiance_df)).to_string(index=False)

    llm = ChatOpenAI(
        model="gpt-4o",
        temperature=0,
        max_tokens=None,
        timeout=None,
        max_retries=2,
        api_key=os.environ.get("OPENAI_API_KEY")
        )
    output_parser = StrOutputParser()
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", meterological_data_summary_prompt),
            ("human", "Je veux un résumé de ces prévisions métérologique: les données de temperature {temp_data}, les données de précipitation {rain_data}, les données de radiance solaire {irradiance_data}")
        ]
    )
    chain = prompt | llm | output_parser
    
    response = chain.invoke({
        "scenario": scenario,
        "today": today,
        "temp_data": temp_data,
        "rain_data": rain_data,
        "irradiance_data": irradiance_data
    })

    return output_parser.parse(response)


def get_agricultural_yield_comparison(culture: str, 
                                      region:str, 
                                      historical_yield_df: pd.DataFrame, 
                                      forecast_yield_df: pd.DataFrame, 
                                      soil_df: pd.DataFrame, 
                                      climate_df: pd.DataFrame, 
                                      water_df: pd.DataFrame,
                                      water_df_pv: pd.DataFrame):
    
    historical_yield = historical_yield_df.head(len(historical_yield_df)).to_string(index=False)
    agricultural_yield = forecast_yield_df.head(len(forecast_yield_df)).to_string(index=False)
    soil_data = soil_df.head(len(soil_df)).to_string(index=False)
    water_data = water_df.head(len(water_df)).to_string(index=False)
    water_data_pv = water_df_pv.head(len(water_df_pv)).to_string(index=False)
    climate_data = climate_df.head(len(climate_df)).to_string(index=False)
    
    llm = ChatOpenAI(
        model="gpt-4o",
        temperature=0,
        max_tokens=None,
        timeout=None,
        max_retries=2,
        api_key=os.environ.get("OPENAI_API_KEY")
        )
    output_parser = StrOutputParser()
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", agricultural_yield_comparison_prompt),
            ("human", "Je suis agriculteur et je cultive de la {culture} à {region}. Voilà les caractéristiques du sol dans ma région {soil_data} et voilà l'historique de mon rendement {historical_yield} et projections du rendement ma culture avec et sans ombrage {agricultural_yield}. J'ai aussi les prévisions du stress hydrique sans ombrage {water_data} et avec ombrage {water_data_pv} et des données climatiques {climate_data}. " )  
        ]
    )
    chain = prompt | llm | output_parser
    
    response = chain.invoke({
        "culture": culture,
        "region": region,
        "soil_data": soil_data,
        "water_data": water_data,
        "water_data_pv": water_data_pv,
        "climate_data": climate_data,
        "agricultural_yield": agricultural_yield,
        "historical_yield": historical_yield
    })

    return output_parser.parse(response)