update
Browse files- llm.py +4 -4
- requirements.txt +1 -1
llm.py
CHANGED
@@ -14,7 +14,7 @@ FLASH_MODEL = 'gemini-1.5-flash-002'
|
|
14 |
# https://github.com/google-gemini/cookbook/blob/main/quickstarts/Prompting.ipynb
|
15 |
# https://github.com/google-gemini/cookbook/blob/main/quickstarts/Streaming.ipynb
|
16 |
import google.generativeai as genai # pip install -U -q google-generativeai
|
17 |
-
llm_log_filename = f"{location__}/
|
18 |
|
19 |
|
20 |
genai.configure(api_key=os.getenv("GEMINI_FLASH_API_KEY"))
|
@@ -62,9 +62,9 @@ if thinker == "gemini": # gemini pro
|
|
62 |
thinker_chat = chat
|
63 |
|
64 |
elif thinker in "70b|405b":
|
65 |
-
cache_filename = f"{location__}/
|
66 |
-
lock_filename = f"{location__}/
|
67 |
-
log_filename = f"{location__}/
|
68 |
|
69 |
## Load thinker_cache
|
70 |
lines = [] if not os.path.exists(cache_filename) else \
|
|
|
14 |
# https://github.com/google-gemini/cookbook/blob/main/quickstarts/Prompting.ipynb
|
15 |
# https://github.com/google-gemini/cookbook/blob/main/quickstarts/Streaming.ipynb
|
16 |
import google.generativeai as genai # pip install -U -q google-generativeai
|
17 |
+
llm_log_filename = f"{location__}/.cache/llm.log"
|
18 |
|
19 |
|
20 |
genai.configure(api_key=os.getenv("GEMINI_FLASH_API_KEY"))
|
|
|
62 |
thinker_chat = chat
|
63 |
|
64 |
elif thinker in "70b|405b":
|
65 |
+
cache_filename = f"{location__}/.cache/thinker.jsonl.xz"
|
66 |
+
lock_filename = f"{location__}/.cache/thinker.lock"
|
67 |
+
log_filename = f"{location__}/.cache/thinker.log"
|
68 |
|
69 |
## Load thinker_cache
|
70 |
lines = [] if not os.path.exists(cache_filename) else \
|
requirements.txt
CHANGED
@@ -4,4 +4,4 @@ together
|
|
4 |
streamlit
|
5 |
trafilatura
|
6 |
markdown
|
7 |
-
|
|
|
4 |
streamlit
|
5 |
trafilatura
|
6 |
markdown
|
7 |
+
lxml_html_clean
|