Simon Strandgaard
commited on
Commit
·
8628c58
1
Parent(s):
f0808c8
Snapshot of PlanExe commit d87f74953d1699782df0d7e3bfad64b027ccf618
Browse files- src/llm_factory.py +55 -6
- src/llm_util/__init__.py +0 -0
- src/llm_util/ollama_info.py +46 -0
- src/plan/app_text2plan.py +45 -16
- src/plan/data/simple_plan_prompts.jsonl +3 -2
- src/prompt/prompt_catalog.py +1 -1
- src/proof_of_concepts/run_ping.py +61 -0
- src/swot/app_swot_analysis.py +31 -11
- src/uuid_util/tests/test_is_valid_uuid.py +6 -1
src/llm_factory.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import logging
|
2 |
import os
|
3 |
import json
|
|
|
4 |
from dotenv import dotenv_values
|
5 |
from typing import Optional, Any, Dict
|
6 |
from llama_index.core.llms.llm import LLM
|
@@ -11,13 +12,14 @@ from llama_index.llms.together import TogetherLLM
|
|
11 |
from llama_index.llms.groq import Groq
|
12 |
from llama_index.llms.lmstudio import LMStudio
|
13 |
from llama_index.llms.openrouter import OpenRouter
|
|
|
14 |
|
15 |
# You can disable this if you don't want to send app info to OpenRouter.
|
16 |
SEND_APP_INFO_TO_OPENROUTER = True
|
17 |
|
18 |
logger = logging.getLogger(__name__)
|
19 |
|
20 |
-
__all__ = ["get_llm", "
|
21 |
|
22 |
# Load .env values and merge with system environment variables.
|
23 |
# This one-liner makes sure any secret injected by Hugging Face, like OPENROUTER_API_KEY
|
@@ -66,11 +68,58 @@ def substitute_env_vars(config: Dict[str, Any], env_vars: Dict[str, str]) -> Dic
|
|
66 |
|
67 |
return process_item(config)
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
def get_llm(llm_name: Optional[str] = None, **kwargs: Any) -> LLM:
|
76 |
"""
|
|
|
1 |
import logging
|
2 |
import os
|
3 |
import json
|
4 |
+
from dataclasses import dataclass
|
5 |
from dotenv import dotenv_values
|
6 |
from typing import Optional, Any, Dict
|
7 |
from llama_index.core.llms.llm import LLM
|
|
|
12 |
from llama_index.llms.groq import Groq
|
13 |
from llama_index.llms.lmstudio import LMStudio
|
14 |
from llama_index.llms.openrouter import OpenRouter
|
15 |
+
from src.llm_util.ollama_info import OllamaInfo
|
16 |
|
17 |
# You can disable this if you don't want to send app info to OpenRouter.
|
18 |
SEND_APP_INFO_TO_OPENROUTER = True
|
19 |
|
20 |
logger = logging.getLogger(__name__)
|
21 |
|
22 |
+
__all__ = ["get_llm", "LLMInfo"]
|
23 |
|
24 |
# Load .env values and merge with system environment variables.
|
25 |
# This one-liner makes sure any secret injected by Hugging Face, like OPENROUTER_API_KEY
|
|
|
68 |
|
69 |
return process_item(config)
|
70 |
|
71 |
+
@dataclass
|
72 |
+
class LLMConfigItem:
|
73 |
+
id: str
|
74 |
+
label: str
|
75 |
+
|
76 |
+
@dataclass
|
77 |
+
class LLMInfo:
|
78 |
+
llm_config_items: list[LLMConfigItem]
|
79 |
+
is_ollama_running: bool
|
80 |
+
error_message_list: list[str]
|
81 |
+
|
82 |
+
@classmethod
|
83 |
+
def obtain_info(cls) -> 'LLMInfo':
|
84 |
+
"""
|
85 |
+
Returns a list of available LLM names.
|
86 |
+
"""
|
87 |
+
error_message_list = []
|
88 |
+
ollama_info = OllamaInfo.obtain_info()
|
89 |
+
if ollama_info.is_running == False:
|
90 |
+
print(f"Ollama is not running. Please start the Ollama service, in order to use the models via Ollama.")
|
91 |
+
elif ollama_info.error_message:
|
92 |
+
print(f"Error message: {ollama_info.error_message}")
|
93 |
+
error_message_list.append(ollama_info.error_message)
|
94 |
+
|
95 |
+
llm_config_items = []
|
96 |
+
|
97 |
+
for config_id, config in _llm_configs.items():
|
98 |
+
if config.get("class") != "Ollama":
|
99 |
+
item = LLMConfigItem(id=config_id, label=config_id)
|
100 |
+
llm_config_items.append(item)
|
101 |
+
continue
|
102 |
+
arguments = config.get("arguments", {})
|
103 |
+
model = arguments.get("model", None)
|
104 |
+
|
105 |
+
is_model_available = ollama_info.is_model_available(model)
|
106 |
+
if is_model_available:
|
107 |
+
label = config_id
|
108 |
+
else:
|
109 |
+
label = f"{config_id} ❌ unavailable"
|
110 |
+
|
111 |
+
if ollama_info.is_running and not is_model_available:
|
112 |
+
error_message = f"Problem with config `\"{config_id}\"`: The model `\"{model}\"` is not available in Ollama. Compare model names in `llm_config.json` with the names available in Ollama."
|
113 |
+
error_message_list.append(error_message)
|
114 |
+
|
115 |
+
item = LLMConfigItem(id=config_id, label=label)
|
116 |
+
llm_config_items.append(item)
|
117 |
+
|
118 |
+
return LLMInfo(
|
119 |
+
llm_config_items=llm_config_items,
|
120 |
+
is_ollama_running=ollama_info.is_running,
|
121 |
+
error_message_list=error_message_list,
|
122 |
+
)
|
123 |
|
124 |
def get_llm(llm_name: Optional[str] = None, **kwargs: Any) -> LLM:
|
125 |
"""
|
src/llm_util/__init__.py
ADDED
File without changes
|
src/llm_util/ollama_info.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
PROMPT> python -m src.llm_util.ollama_info
|
3 |
+
"""
|
4 |
+
from dataclasses import dataclass
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class OllamaInfo:
|
8 |
+
"""
|
9 |
+
Details about the Ollama service, including a list of available model names,
|
10 |
+
a flag indicating whether the service is running, and an optional error message.
|
11 |
+
"""
|
12 |
+
model_names: list[str]
|
13 |
+
is_running: bool
|
14 |
+
error_message: str = None
|
15 |
+
|
16 |
+
@classmethod
|
17 |
+
def obtain_info(cls) -> 'OllamaInfo':
|
18 |
+
"""Retrieves information about the Ollama service."""
|
19 |
+
try:
|
20 |
+
# Only import ollama if it's available
|
21 |
+
from ollama import ListResponse, list
|
22 |
+
list_response: ListResponse = list()
|
23 |
+
except ImportError as e:
|
24 |
+
error_message = f"OllamaInfo. The 'ollama' library was not found: {e}"
|
25 |
+
return OllamaInfo(model_names=[], is_running=False, error_message=error_message)
|
26 |
+
except ConnectionError as e:
|
27 |
+
error_message = f"OllamaInfo. Error connecting to Ollama: {e}"
|
28 |
+
return OllamaInfo(model_names=[], is_running=False, error_message=error_message)
|
29 |
+
except Exception as e:
|
30 |
+
error_message = f"OllamaInfo. An unexpected error occurred: {e}"
|
31 |
+
return OllamaInfo(model_names=[], is_running=False, error_message=error_message)
|
32 |
+
|
33 |
+
model_names = [model.model for model in list_response.models]
|
34 |
+
return OllamaInfo(model_names=model_names, is_running=True, error_message=None)
|
35 |
+
|
36 |
+
def is_model_available(self, find_model: str) -> bool:
|
37 |
+
"""Checks if a specific model is available in the list of model names."""
|
38 |
+
return find_model in self.model_names
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
find_model = 'qwen2.5-coder:latest'
|
42 |
+
ollama_info = OllamaInfo.obtain_info()
|
43 |
+
print(f"Error message: {ollama_info.error_message}")
|
44 |
+
print(f'Is Ollama running: {ollama_info.is_running}')
|
45 |
+
found = ollama_info.is_model_available(find_model)
|
46 |
+
print(f'Has model {find_model}: {found}')
|
src/plan/app_text2plan.py
CHANGED
@@ -15,7 +15,7 @@ import logging
|
|
15 |
import json
|
16 |
from dataclasses import dataclass
|
17 |
from math import ceil
|
18 |
-
from src.llm_factory import
|
19 |
from src.plan.generate_run_id import generate_run_id, RUN_ID_PREFIX
|
20 |
from src.plan.create_zip_archive import create_zip_archive
|
21 |
from src.plan.filenames import FilenameEnum
|
@@ -28,6 +28,15 @@ from src.huggingface_spaces.is_huggingface_spaces import is_huggingface_spaces
|
|
28 |
from src.huggingface_spaces.huggingface_spaces_browserstate_secret import huggingface_spaces_browserstate_secret
|
29 |
from src.utils.time_since_last_modification import time_since_last_modification
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
# Slightly different behavior when running inside Hugging Face Spaces, where it's not possible to open a file explorer.
|
32 |
# And it's multi-user, so we need to keep track of the state for each user.
|
33 |
IS_HUGGINGFACE_SPACES = is_huggingface_spaces()
|
@@ -37,6 +46,7 @@ class Config:
|
|
37 |
use_uuid_as_run_id: bool
|
38 |
visible_top_header: bool
|
39 |
visible_open_output_dir_button: bool
|
|
|
40 |
visible_openrouter_api_key_textbox: bool
|
41 |
allow_only_openrouter_models: bool
|
42 |
run_planner_check_api_key_is_provided: bool
|
@@ -48,16 +58,18 @@ CONFIG_LOCAL = Config(
|
|
48 |
visible_top_header=True,
|
49 |
visible_open_output_dir_button=True,
|
50 |
visible_openrouter_api_key_textbox=False,
|
|
|
51 |
allow_only_openrouter_models=False,
|
52 |
run_planner_check_api_key_is_provided=False,
|
53 |
enable_purge_old_runs=False,
|
54 |
-
browser_state_secret=
|
55 |
)
|
56 |
CONFIG_HUGGINGFACE_SPACES = Config(
|
57 |
use_uuid_as_run_id=True,
|
58 |
visible_top_header=False,
|
59 |
visible_open_output_dir_button=False,
|
60 |
visible_openrouter_api_key_textbox=True,
|
|
|
61 |
allow_only_openrouter_models=True,
|
62 |
run_planner_check_api_key_is_provided=True,
|
63 |
enable_purge_old_runs=True,
|
@@ -94,11 +106,27 @@ gradio_examples = []
|
|
94 |
for prompt_item in all_prompts:
|
95 |
gradio_examples.append([prompt_item.prompt])
|
96 |
|
97 |
-
|
|
|
|
|
|
|
|
|
98 |
if CONFIG.allow_only_openrouter_models:
|
99 |
-
|
|
|
|
|
100 |
else:
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
def has_pipeline_complete_file(path_dir: str):
|
104 |
"""
|
@@ -147,7 +175,7 @@ class SessionState:
|
|
147 |
# Settings: the user's OpenRouter API key.
|
148 |
self.openrouter_api_key = "" # Initialize to empty string
|
149 |
# Settings: The model that the user has picked.
|
150 |
-
self.llm_model =
|
151 |
# Settings: The speedvsdetail that the user has picked.
|
152 |
self.speedvsdetail = SpeedVsDetailEnum.ALL_DETAILS_BUT_SLOW
|
153 |
# Holds the subprocess.Popen object for the currently running pipeline process.
|
@@ -173,7 +201,7 @@ def initialize_browser_settings(browser_state, session_state: SessionState):
|
|
173 |
except Exception:
|
174 |
settings = {}
|
175 |
openrouter_api_key = settings.get("openrouter_api_key_text", "")
|
176 |
-
model = settings.get("model_radio",
|
177 |
speedvsdetail = settings.get("speedvsdetail_radio", SpeedVsDetailEnum.ALL_DETAILS_BUT_SLOW)
|
178 |
session_state.openrouter_api_key = openrouter_api_key
|
179 |
session_state.llm_model = model
|
@@ -442,9 +470,18 @@ with gr.Blocks(title="PlanExe") as demo_text2plan:
|
|
442 |
)
|
443 |
|
444 |
with gr.Tab("Settings"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
445 |
model_radio = gr.Radio(
|
446 |
available_model_names,
|
447 |
-
value=
|
448 |
label="Model",
|
449 |
interactive=True
|
450 |
)
|
@@ -556,14 +593,6 @@ with gr.Blocks(title="PlanExe") as demo_text2plan:
|
|
556 |
)
|
557 |
|
558 |
def run_app_text2plan():
|
559 |
-
logging.basicConfig(
|
560 |
-
level=logging.INFO,
|
561 |
-
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
562 |
-
handlers=[
|
563 |
-
logging.StreamHandler()
|
564 |
-
]
|
565 |
-
)
|
566 |
-
|
567 |
if CONFIG.enable_purge_old_runs:
|
568 |
start_purge_scheduler(run_dir=os.path.abspath(RUN_DIR), purge_interval_seconds=60*60, prefix=RUN_ID_PREFIX)
|
569 |
|
|
|
15 |
import json
|
16 |
from dataclasses import dataclass
|
17 |
from math import ceil
|
18 |
+
from src.llm_factory import LLMInfo
|
19 |
from src.plan.generate_run_id import generate_run_id, RUN_ID_PREFIX
|
20 |
from src.plan.create_zip_archive import create_zip_archive
|
21 |
from src.plan.filenames import FilenameEnum
|
|
|
28 |
from src.huggingface_spaces.huggingface_spaces_browserstate_secret import huggingface_spaces_browserstate_secret
|
29 |
from src.utils.time_since_last_modification import time_since_last_modification
|
30 |
|
31 |
+
logger = logging.getLogger(__name__)
|
32 |
+
logging.basicConfig(
|
33 |
+
level=logging.INFO,
|
34 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
35 |
+
handlers=[
|
36 |
+
logging.StreamHandler()
|
37 |
+
]
|
38 |
+
)
|
39 |
+
|
40 |
# Slightly different behavior when running inside Hugging Face Spaces, where it's not possible to open a file explorer.
|
41 |
# And it's multi-user, so we need to keep track of the state for each user.
|
42 |
IS_HUGGINGFACE_SPACES = is_huggingface_spaces()
|
|
|
46 |
use_uuid_as_run_id: bool
|
47 |
visible_top_header: bool
|
48 |
visible_open_output_dir_button: bool
|
49 |
+
visible_llm_info: bool
|
50 |
visible_openrouter_api_key_textbox: bool
|
51 |
allow_only_openrouter_models: bool
|
52 |
run_planner_check_api_key_is_provided: bool
|
|
|
58 |
visible_top_header=True,
|
59 |
visible_open_output_dir_button=True,
|
60 |
visible_openrouter_api_key_textbox=False,
|
61 |
+
visible_llm_info=True,
|
62 |
allow_only_openrouter_models=False,
|
63 |
run_planner_check_api_key_is_provided=False,
|
64 |
enable_purge_old_runs=False,
|
65 |
+
browser_state_secret="insert-your-secret-here",
|
66 |
)
|
67 |
CONFIG_HUGGINGFACE_SPACES = Config(
|
68 |
use_uuid_as_run_id=True,
|
69 |
visible_top_header=False,
|
70 |
visible_open_output_dir_button=False,
|
71 |
visible_openrouter_api_key_textbox=True,
|
72 |
+
visible_llm_info=False,
|
73 |
allow_only_openrouter_models=True,
|
74 |
run_planner_check_api_key_is_provided=True,
|
75 |
enable_purge_old_runs=True,
|
|
|
106 |
for prompt_item in all_prompts:
|
107 |
gradio_examples.append([prompt_item.prompt])
|
108 |
|
109 |
+
llm_info = LLMInfo.obtain_info()
|
110 |
+
logger.info(f"LLMInfo.is_ollama_running: {llm_info.is_ollama_running}")
|
111 |
+
logger.info(f"LLMInfo.error_message_list: {llm_info.error_message_list}")
|
112 |
+
|
113 |
+
trimmed_llm_config_items = []
|
114 |
if CONFIG.allow_only_openrouter_models:
|
115 |
+
# On Hugging Face Spaces, show only openrouter models.
|
116 |
+
# Since it's not possible to run Ollama nor LM Studio.
|
117 |
+
trimmed_llm_config_items = [item for item in llm_info.llm_config_items if item.id.startswith("openrouter")]
|
118 |
else:
|
119 |
+
trimmed_llm_config_items = llm_info.llm_config_items
|
120 |
+
|
121 |
+
|
122 |
+
# Create tupples for the Gradio Radio buttons.
|
123 |
+
available_model_names = []
|
124 |
+
default_model_value = None
|
125 |
+
for config_index, config_item in enumerate(trimmed_llm_config_items):
|
126 |
+
if config_index == 0:
|
127 |
+
default_model_value = config_item.id
|
128 |
+
tuple_item = (config_item.label, config_item.id)
|
129 |
+
available_model_names.append(tuple_item)
|
130 |
|
131 |
def has_pipeline_complete_file(path_dir: str):
|
132 |
"""
|
|
|
175 |
# Settings: the user's OpenRouter API key.
|
176 |
self.openrouter_api_key = "" # Initialize to empty string
|
177 |
# Settings: The model that the user has picked.
|
178 |
+
self.llm_model = default_model_value
|
179 |
# Settings: The speedvsdetail that the user has picked.
|
180 |
self.speedvsdetail = SpeedVsDetailEnum.ALL_DETAILS_BUT_SLOW
|
181 |
# Holds the subprocess.Popen object for the currently running pipeline process.
|
|
|
201 |
except Exception:
|
202 |
settings = {}
|
203 |
openrouter_api_key = settings.get("openrouter_api_key_text", "")
|
204 |
+
model = settings.get("model_radio", default_model_value)
|
205 |
speedvsdetail = settings.get("speedvsdetail_radio", SpeedVsDetailEnum.ALL_DETAILS_BUT_SLOW)
|
206 |
session_state.openrouter_api_key = openrouter_api_key
|
207 |
session_state.llm_model = model
|
|
|
470 |
)
|
471 |
|
472 |
with gr.Tab("Settings"):
|
473 |
+
if CONFIG.visible_llm_info:
|
474 |
+
if llm_info.is_ollama_running == False:
|
475 |
+
gr.Markdown("**Ollama is not running**, so Ollama models are unavailable. Please start Ollama to use them.")
|
476 |
+
|
477 |
+
if len(llm_info.error_message_list) > 0:
|
478 |
+
gr.Markdown("**Error messages:**")
|
479 |
+
for error_message in llm_info.error_message_list:
|
480 |
+
gr.Markdown(f"- {error_message}")
|
481 |
+
|
482 |
model_radio = gr.Radio(
|
483 |
available_model_names,
|
484 |
+
value=default_model_value,
|
485 |
label="Model",
|
486 |
interactive=True
|
487 |
)
|
|
|
593 |
)
|
594 |
|
595 |
def run_app_text2plan():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
596 |
if CONFIG.enable_purge_old_runs:
|
597 |
start_purge_scheduler(run_dir=os.path.abspath(RUN_DIR), purge_interval_seconds=60*60, prefix=RUN_ID_PREFIX)
|
598 |
|
src/plan/data/simple_plan_prompts.jsonl
CHANGED
@@ -21,7 +21,8 @@
|
|
21 |
{"id": "d3e10877-446f-4eb0-8027-864e923973b0", "prompt": "Construct a train bridge between Denmark and England.", "tags": ["denmark", "england", "bridge"]}
|
22 |
{"id": "9fbb7ff9-5dc3-44f4-9823-dba3f31d3661", "prompt": "Write a Python script for a bouncing yellow ball within a square. Make sure to handle collision detection. Make the square slightly rotate. Implement it in Python. Make sure the ball stays within the square.", "tags": ["programming", "python", "collision detection"]}
|
23 |
{"id": "676cbca8-5d49-42a0-8826-398318004703", "prompt": "Write a Python script for a snake shape keep bouncing within a pentagon. Make sure to handle collision detection properly. Make the pentagon slowly rotate.", "tags": ["programming", "python", "collision detection"]}
|
24 |
-
{"id": "a9113924-6148-4a0c-b72a-eecdb856e1e2", "prompt": "Investigate outbreak of a deadly new disease in the jungle.", "tags": ["outbreak", "jungle"]}
|
|
|
25 |
{"id": "4dc34d55-0d0d-4e9d-92f4-23765f49dd29", "prompt": "Establish a solar farm in Denmark.", "tags": ["denmark", "energy", "sun"]}
|
26 |
{"id": "0bb4a7d3-c16b-4b21-8a9b-20e1cd4002d4", "prompt": "Develop a sustainable solution for extreme poverty in regions where people live on less than 2 USD per day. Focus on improving access to basic necessities like food, shelter, and clean water, and explain how you would allocate investments given that sectors like infrastructure may require higher per capita funding. Indicate whether your approach leverages existing systems or builds new capacity. Assume this initiative will impact 5 million people over 5 years with a total budget of 500 million USD.", "tags": ["poverty", "sustainability", "development"]}
|
27 |
{"id": "307f7e0c-a160-4b7a-9e3c-76577164497e", "prompt": "Create a comprehensive plan to address hunger and malnutrition in impoverished communities by enhancing food security and nutritional education. Provide a detailed cost breakdown and clarify which components might need additional per capita investment. State whether you will leverage existing food distribution networks or develop new infrastructure. Assume the plan targets 3 million individuals across 10 countries with a total budget of 300 million USD over 4 years.", "tags": ["hunger", "malnutrition", "nutrition"]}
|
@@ -29,7 +30,7 @@
|
|
29 |
{"id": "08505146-b6ad-4d7c-b6b6-85c2868e707d", "prompt": "Design an initiative to improve sanitation in both slums and rural areas to reduce disease spread and enhance living conditions. Provide a detailed allocation plan that accounts for the varying needs of urban versus rural contexts, and note that phased investments may be necessary for sustainable infrastructure. Assume the initiative will reach 1.5 million people in 50 different locations over 5 years with a total budget of 150 million USD.", "tags": ["sanitation", "health", "slums"]}
|
30 |
{"id": "0bb00fe6-711c-4612-8f83-a9a88e5c7958", "prompt": "Establish accessible healthcare systems in regions where medical facilities are scarce, focusing on both preventive care and emergency services. Clarify whether your approach involves upgrading existing facilities or constructing new ones, and account for the typically higher per capita costs in healthcare infrastructure. Assume the program will cover 4 million people in 20 countries over 6 years with a total budget of 400 million USD.", "tags": ["healthcare", "accessibility", "medicine"]}
|
31 |
{"id": "79b62957-cae7-4727-8450-4f5f29d4ddda", "prompt": "Create a detailed plan to increase educational opportunities in poverty-stricken areas. This plan should include building or improving schools, teacher training, and scholarship programs, with a clear breakdown of resource allocation among these components. Recognize that educational initiatives may require higher per capita investments for infrastructure compared to service-based interventions. Assume the plan will support 1 million students over 7 years with a total budget of 350 million USD.", "tags": ["education", "literacy", "development"]}
|
32 |
-
{"id": "
|
33 |
{"id": "a9f410c0-120e-45d6-b042-e88ca47b39bb", "prompt": "Formulate a housing security plan that addresses overcrowding, unsafe living conditions, and homelessness by promoting secure and dignified housing. Clarify whether the initiative involves new construction, rehabilitation of existing structures, or a combination of both, and outline a phased investment strategy given the higher costs typically associated with housing projects. Assume the plan will provide housing for 200,000 individuals over 4 years with a total budget of 250 million USD.", "tags": ["housing", "security", "urban development"]}
|
34 |
{"id": "cdf7f29d-bbcb-478d-8b5a-e82e74ed8626", "prompt": "Propose comprehensive peace initiatives and conflict resolution strategies for areas experiencing high rates of violence and political instability. Detail how your approach will protect vulnerable populations, including mechanisms for community engagement, reconciliation, and rebuilding. Assume the intervention will affect 1 million people in conflict zones over 3 years with a total budget of 150 million USD.", "tags": ["conflict", "peace", "stability"]}
|
35 |
{"id": "79ef9ebf-3173-4b33-81f9-abbd3da7da6d", "prompt": "Design robust adaptation and resilience programs for communities facing environmental degradation and the effects of climate change, especially in disaster-prone regions. Include both short-term relief measures and long-term sustainability strategies, and provide details on how funds will be allocated between immediate response and infrastructure improvements. Assume the initiative will aid 1.2 million people in the 10 most affected countries over 5 years with a total budget of 180 million USD.", "tags": ["environment", "climate change", "resilience"]}
|
|
|
21 |
{"id": "d3e10877-446f-4eb0-8027-864e923973b0", "prompt": "Construct a train bridge between Denmark and England.", "tags": ["denmark", "england", "bridge"]}
|
22 |
{"id": "9fbb7ff9-5dc3-44f4-9823-dba3f31d3661", "prompt": "Write a Python script for a bouncing yellow ball within a square. Make sure to handle collision detection. Make the square slightly rotate. Implement it in Python. Make sure the ball stays within the square.", "tags": ["programming", "python", "collision detection"]}
|
23 |
{"id": "676cbca8-5d49-42a0-8826-398318004703", "prompt": "Write a Python script for a snake shape keep bouncing within a pentagon. Make sure to handle collision detection properly. Make the pentagon slowly rotate.", "tags": ["programming", "python", "collision detection"]}
|
24 |
+
{"id": "a9113924-6148-4a0c-b72a-eecdb856e1e2", "prompt": "Investigate outbreak of a deadly new disease in the jungle.", "tags": ["outbreak", "jungle", "emergency"]}
|
25 |
+
{"id": "d68da41f-9341-40c0-85ee-7fc9181271d1", "prompt": "Eradication of Oak Processionary Caterpillars. It was discovered in Denmark for the first time just under three weeks ago, with around 800 nests found in trees in southeastern Odense. You suffocate from the caterpillar’s toxic hairs. Limit the outbreak as quick as possible.", "tags": ["outbreak", "toxic", "caterpillar", "emergency"]}
|
26 |
{"id": "4dc34d55-0d0d-4e9d-92f4-23765f49dd29", "prompt": "Establish a solar farm in Denmark.", "tags": ["denmark", "energy", "sun"]}
|
27 |
{"id": "0bb4a7d3-c16b-4b21-8a9b-20e1cd4002d4", "prompt": "Develop a sustainable solution for extreme poverty in regions where people live on less than 2 USD per day. Focus on improving access to basic necessities like food, shelter, and clean water, and explain how you would allocate investments given that sectors like infrastructure may require higher per capita funding. Indicate whether your approach leverages existing systems or builds new capacity. Assume this initiative will impact 5 million people over 5 years with a total budget of 500 million USD.", "tags": ["poverty", "sustainability", "development"]}
|
28 |
{"id": "307f7e0c-a160-4b7a-9e3c-76577164497e", "prompt": "Create a comprehensive plan to address hunger and malnutrition in impoverished communities by enhancing food security and nutritional education. Provide a detailed cost breakdown and clarify which components might need additional per capita investment. State whether you will leverage existing food distribution networks or develop new infrastructure. Assume the plan targets 3 million individuals across 10 countries with a total budget of 300 million USD over 4 years.", "tags": ["hunger", "malnutrition", "nutrition"]}
|
|
|
30 |
{"id": "08505146-b6ad-4d7c-b6b6-85c2868e707d", "prompt": "Design an initiative to improve sanitation in both slums and rural areas to reduce disease spread and enhance living conditions. Provide a detailed allocation plan that accounts for the varying needs of urban versus rural contexts, and note that phased investments may be necessary for sustainable infrastructure. Assume the initiative will reach 1.5 million people in 50 different locations over 5 years with a total budget of 150 million USD.", "tags": ["sanitation", "health", "slums"]}
|
31 |
{"id": "0bb00fe6-711c-4612-8f83-a9a88e5c7958", "prompt": "Establish accessible healthcare systems in regions where medical facilities are scarce, focusing on both preventive care and emergency services. Clarify whether your approach involves upgrading existing facilities or constructing new ones, and account for the typically higher per capita costs in healthcare infrastructure. Assume the program will cover 4 million people in 20 countries over 6 years with a total budget of 400 million USD.", "tags": ["healthcare", "accessibility", "medicine"]}
|
32 |
{"id": "79b62957-cae7-4727-8450-4f5f29d4ddda", "prompt": "Create a detailed plan to increase educational opportunities in poverty-stricken areas. This plan should include building or improving schools, teacher training, and scholarship programs, with a clear breakdown of resource allocation among these components. Recognize that educational initiatives may require higher per capita investments for infrastructure compared to service-based interventions. Assume the plan will support 1 million students over 7 years with a total budget of 350 million USD.", "tags": ["education", "literacy", "development"]}
|
33 |
+
{"id": "1382d4a1-5eb0-42f3-b93a-74c066ae1c97", "prompt": "Develop comprehensive policies and programs to eradicate child labor by ensuring children have access to education and protection from exploitation. Include details on scaling the initiative and allocating funds among enforcement, educational support, and social services. Assume this effort will benefit 500,000 children in 15 countries over 5 years with a total budget of 100 million USD.", "tags": ["child labor", "education", "protection"]}
|
34 |
{"id": "a9f410c0-120e-45d6-b042-e88ca47b39bb", "prompt": "Formulate a housing security plan that addresses overcrowding, unsafe living conditions, and homelessness by promoting secure and dignified housing. Clarify whether the initiative involves new construction, rehabilitation of existing structures, or a combination of both, and outline a phased investment strategy given the higher costs typically associated with housing projects. Assume the plan will provide housing for 200,000 individuals over 4 years with a total budget of 250 million USD.", "tags": ["housing", "security", "urban development"]}
|
35 |
{"id": "cdf7f29d-bbcb-478d-8b5a-e82e74ed8626", "prompt": "Propose comprehensive peace initiatives and conflict resolution strategies for areas experiencing high rates of violence and political instability. Detail how your approach will protect vulnerable populations, including mechanisms for community engagement, reconciliation, and rebuilding. Assume the intervention will affect 1 million people in conflict zones over 3 years with a total budget of 150 million USD.", "tags": ["conflict", "peace", "stability"]}
|
36 |
{"id": "79ef9ebf-3173-4b33-81f9-abbd3da7da6d", "prompt": "Design robust adaptation and resilience programs for communities facing environmental degradation and the effects of climate change, especially in disaster-prone regions. Include both short-term relief measures and long-term sustainability strategies, and provide details on how funds will be allocated between immediate response and infrastructure improvements. Assume the initiative will aid 1.2 million people in the 10 most affected countries over 5 years with a total budget of 180 million USD.", "tags": ["environment", "climate change", "resilience"]}
|
src/prompt/prompt_catalog.py
CHANGED
@@ -52,7 +52,7 @@ class PromptCatalog:
|
|
52 |
continue
|
53 |
|
54 |
if not is_valid_uuid(pid):
|
55 |
-
logger.error(f"Invalid UUID in {filepath} at line {line_num}: {pid}. Skipping row.")
|
56 |
continue
|
57 |
|
58 |
tags = data.get('tags', [])
|
|
|
52 |
continue
|
53 |
|
54 |
if not is_valid_uuid(pid):
|
55 |
+
logger.error(f"Invalid UUID in {filepath} at line {line_num}: '{pid}'. Skipping row.")
|
56 |
continue
|
57 |
|
58 |
tags = data.get('tags', [])
|
src/proof_of_concepts/run_ping.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Ping the LLM to check if it is running.
|
3 |
+
|
4 |
+
PROMPT> python -m src.proof_of_concepts.run_ping
|
5 |
+
"""
|
6 |
+
from src.llm_factory import get_llm
|
7 |
+
from llama_index.core.llms import ChatMessage
|
8 |
+
import json
|
9 |
+
from pydantic import BaseModel
|
10 |
+
from llama_index.core.llms import ChatMessage, MessageRole
|
11 |
+
|
12 |
+
model = "ollama-llama3.1"
|
13 |
+
|
14 |
+
llm = get_llm(model)
|
15 |
+
|
16 |
+
user_prompt = "location=unspecified, eventcount=8, weather=rainy, role=agent, state=empty, name=Simon"
|
17 |
+
|
18 |
+
PING_SYSTEM_PROMPT = """
|
19 |
+
You are an expert at extracting specific details from unstructured data and mapping them to predefined fields. Your task is to identify and extract the values related to weather, event_count, and state from the input data, and then assign those values to the corresponding fields in a Python dictionary.
|
20 |
+
|
21 |
+
Even if the labels in the input data are slightly different from the field names, you should use your understanding of the data to map the values correctly. For example, "eventcnt" should be mapped to the "event_count" field.
|
22 |
+
|
23 |
+
Example Input:
|
24 |
+
age=77, eventcount=5, id=1809246, climate=cold, role=freighter, status=active, name=Ripley, location=Nostromo
|
25 |
+
|
26 |
+
Example Output:
|
27 |
+
{'weather': 'cold', 'count': '5', 'state': 'active'}
|
28 |
+
|
29 |
+
Example Input:
|
30 |
+
name=Bob, location=Paris, attendees=100, condition=hot, state=finish
|
31 |
+
|
32 |
+
Example Output:
|
33 |
+
{'weather': 'hot', 'count': '100', 'state': 'finish'}
|
34 |
+
"""
|
35 |
+
|
36 |
+
class ExtractDetails(BaseModel):
|
37 |
+
weather: str = "sunshine"
|
38 |
+
count: str = "999"
|
39 |
+
state: str = "start"
|
40 |
+
|
41 |
+
system_prompt = PING_SYSTEM_PROMPT.strip()
|
42 |
+
|
43 |
+
chat_message_list = [
|
44 |
+
ChatMessage(
|
45 |
+
role=MessageRole.SYSTEM,
|
46 |
+
content=system_prompt,
|
47 |
+
),
|
48 |
+
ChatMessage(
|
49 |
+
role=MessageRole.USER,
|
50 |
+
content=user_prompt,
|
51 |
+
)
|
52 |
+
]
|
53 |
+
sllm = llm.as_structured_llm(ExtractDetails)
|
54 |
+
chat_response = sllm.chat(chat_message_list)
|
55 |
+
|
56 |
+
raw = chat_response.raw
|
57 |
+
#print(f"raw:\n{raw}\n\n")
|
58 |
+
|
59 |
+
json_data = raw.model_dump()
|
60 |
+
print(json.dumps(json_data, indent=2))
|
61 |
+
|
src/swot/app_swot_analysis.py
CHANGED
@@ -4,31 +4,51 @@ PROMPT> python -m src.swot.app_swot_analysis
|
|
4 |
import gradio as gr
|
5 |
import os
|
6 |
import json
|
|
|
7 |
from typing import List, Optional
|
8 |
from src.swot.swot_analysis import SWOTAnalysis
|
9 |
-
from src.llm_factory import
|
10 |
from src.prompt.prompt_catalog import PromptCatalog
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
DEFAULT_PROMPT_UUID = "427e5163-cefa-46e8-b1d0-eb12be270e19"
|
13 |
|
14 |
prompt_catalog = PromptCatalog()
|
15 |
prompt_catalog.load(os.path.join(os.path.dirname(__file__), 'data', 'example_swot_prompt.jsonl'))
|
16 |
|
17 |
-
# Prefill the input box with the default prompt
|
18 |
-
default_prompt_item = prompt_catalog.find(DEFAULT_PROMPT_UUID)
|
19 |
-
if default_prompt_item:
|
20 |
-
gradio_default_example = default_prompt_item.prompt
|
21 |
-
else:
|
22 |
-
raise ValueError("DEFAULT_PROMPT_UUID prompt not found.")
|
23 |
-
|
24 |
# Show all prompts in the catalog as examples
|
25 |
all_prompts = prompt_catalog.all()
|
26 |
gradio_examples = []
|
27 |
for prompt_item in all_prompts:
|
28 |
gradio_examples.append([prompt_item.prompt])
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
def make_swot(prompt_description, model_id, model_temperature):
|
34 |
temperature_float = float(model_temperature) / 100.0
|
@@ -72,7 +92,7 @@ with gr.Blocks(title="SWOT") as demo:
|
|
72 |
with gr.Tab("Settings"):
|
73 |
model_radio = gr.Radio(
|
74 |
available_model_names,
|
75 |
-
value=
|
76 |
label="Model",
|
77 |
interactive=True
|
78 |
)
|
|
|
4 |
import gradio as gr
|
5 |
import os
|
6 |
import json
|
7 |
+
import logging
|
8 |
from typing import List, Optional
|
9 |
from src.swot.swot_analysis import SWOTAnalysis
|
10 |
+
from src.llm_factory import LLMInfo, get_llm
|
11 |
from src.prompt.prompt_catalog import PromptCatalog
|
12 |
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
logging.basicConfig(
|
15 |
+
level=logging.INFO,
|
16 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
17 |
+
handlers=[
|
18 |
+
logging.StreamHandler()
|
19 |
+
]
|
20 |
+
)
|
21 |
+
|
22 |
DEFAULT_PROMPT_UUID = "427e5163-cefa-46e8-b1d0-eb12be270e19"
|
23 |
|
24 |
prompt_catalog = PromptCatalog()
|
25 |
prompt_catalog.load(os.path.join(os.path.dirname(__file__), 'data', 'example_swot_prompt.jsonl'))
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
# Show all prompts in the catalog as examples
|
28 |
all_prompts = prompt_catalog.all()
|
29 |
gradio_examples = []
|
30 |
for prompt_item in all_prompts:
|
31 |
gradio_examples.append([prompt_item.prompt])
|
32 |
|
33 |
+
llm_info = LLMInfo.obtain_info()
|
34 |
+
logger.info(f"LLMInfo.is_ollama_running: {llm_info.is_ollama_running}")
|
35 |
+
logger.info(f"LLMInfo.error_message_list: {llm_info.error_message_list}")
|
36 |
+
|
37 |
+
# Create tupples for the Gradio Radio buttons.
|
38 |
+
available_model_names = []
|
39 |
+
default_model_value = None
|
40 |
+
for config_index, config_item in enumerate(llm_info.llm_config_items):
|
41 |
+
if config_index == 0:
|
42 |
+
default_model_value = config_item.id
|
43 |
+
tuple_item = (config_item.label, config_item.id)
|
44 |
+
available_model_names.append(tuple_item)
|
45 |
+
|
46 |
+
# Prefill the input box with the default prompt
|
47 |
+
default_prompt_item = prompt_catalog.find(DEFAULT_PROMPT_UUID)
|
48 |
+
if default_prompt_item:
|
49 |
+
gradio_default_example = default_prompt_item.prompt
|
50 |
+
else:
|
51 |
+
raise ValueError("DEFAULT_PROMPT_UUID prompt not found.")
|
52 |
|
53 |
def make_swot(prompt_description, model_id, model_temperature):
|
54 |
temperature_float = float(model_temperature) / 100.0
|
|
|
92 |
with gr.Tab("Settings"):
|
93 |
model_radio = gr.Radio(
|
94 |
available_model_names,
|
95 |
+
value=default_model_value,
|
96 |
label="Model",
|
97 |
interactive=True
|
98 |
)
|
src/uuid_util/tests/test_is_valid_uuid.py
CHANGED
@@ -3,7 +3,12 @@ import uuid
|
|
3 |
from src.uuid_util.is_valid_uuid import is_valid_uuid
|
4 |
|
5 |
class TestIsValidUUID(unittest.TestCase):
|
6 |
-
def
|
|
|
|
|
|
|
|
|
|
|
7 |
"""Test that a proper UUID (version 4) returns True."""
|
8 |
valid_uuid = str(uuid.uuid4())
|
9 |
self.assertTrue(is_valid_uuid(valid_uuid))
|
|
|
3 |
from src.uuid_util.is_valid_uuid import is_valid_uuid
|
4 |
|
5 |
class TestIsValidUUID(unittest.TestCase):
|
6 |
+
def test_valid_uuid_hardcoded(self):
|
7 |
+
"""Test that a proper UUID (version 4) returns True."""
|
8 |
+
valid_uuid = "1382d4a1-5eb0-42f3-b93a-74c066ae1c97"
|
9 |
+
self.assertTrue(is_valid_uuid(valid_uuid))
|
10 |
+
|
11 |
+
def test_valid_uuid_generated(self):
|
12 |
"""Test that a proper UUID (version 4) returns True."""
|
13 |
valid_uuid = str(uuid.uuid4())
|
14 |
self.assertTrue(is_valid_uuid(valid_uuid))
|