momenaca commited on
Commit
d3fe3f2
·
1 Parent(s): 642c5e3

add hackathon notebook

Browse files
app.py CHANGED
@@ -58,7 +58,7 @@ if os.getenv("EKI_OPENAI_LLM_DEPLOYMENT_NAME"):
58
  bdd_afp = get_vectorstore_api("afp")
59
 
60
  else:
61
- qdrants_public = get_qdrants_public(config, "momenaca/hackathon_spinoza")
62
  qdrants = {**qdrants, **qdrants_public}
63
  bdd_presse = None
64
  bdd_afp = None
 
58
  bdd_afp = get_vectorstore_api("afp")
59
 
60
  else:
61
+ qdrants_public = get_qdrants_public(config, "your_database_hf")
62
  qdrants = {**qdrants, **qdrants_public}
63
  bdd_presse = None
64
  bdd_afp = None
spinoza_project/hackathon/Ekimetrics x Spinoza - Hackathon.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
spinoza_project/source/backend/llm_utils.py CHANGED
@@ -53,7 +53,7 @@ def get_llm_api():
53
  openai_api_version=os.getenv("EKI_OPENAI_API_VERSION"),
54
  streaming=True,
55
  temperature=0,
56
- max_tokens=2048, # 1024,
57
  stop=["<|im_end|>"],
58
  )
59
  )
@@ -64,6 +64,7 @@ def get_llm_api():
64
  ChatGroq(
65
  model="llama3-groq-70b-8192-tool-use-preview", # llama-3.1-8b-instant / llama3-groq-70b-8192-tool-use-preview / llama-3.2-90b-text-preview / llama-3.2-3b-preview
66
  temperature=0,
 
67
  )
68
  )
69
 
 
53
  openai_api_version=os.getenv("EKI_OPENAI_API_VERSION"),
54
  streaming=True,
55
  temperature=0,
56
+ max_tokens=2048,
57
  stop=["<|im_end|>"],
58
  )
59
  )
 
64
  ChatGroq(
65
  model="llama3-groq-70b-8192-tool-use-preview", # llama-3.1-8b-instant / llama3-groq-70b-8192-tool-use-preview / llama-3.2-90b-text-preview / llama-3.2-3b-preview
66
  temperature=0,
67
+ max_tokens=2048,
68
  )
69
  )
70