Fecalisboa commited on
Commit
ee565de
·
verified ·
1 Parent(s): 2ab1cce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -8
app.py CHANGED
@@ -31,7 +31,7 @@ import pandas as pd
31
  api_token = os.getenv("HF_TOKEN")
32
 
33
 
34
- list_llm = ["mistralai/Miceli", "mistralai/Mistral-7B-Instruct-v0.3"]
35
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
36
 
37
  # Load PDF document and create doc splits
@@ -68,14 +68,24 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
68
  progress(0.1, desc="Initializing HF tokenizer...")
69
 
70
  progress(0.5, desc="Initializing HF Hub...")
 
71
 
72
- llm = HuggingFaceEndpoint(
73
- repo_id=llm_model,
74
- huggingfacehub_api_token=api_token,
75
- temperature=temperature,
76
- max_new_tokens=max_tokens,
77
- top_k=top_k,
78
- )
 
 
 
 
 
 
 
 
 
79
 
80
  progress(0.75, desc="Defining buffer memory...")
81
  memory = ConversationBufferMemory(
 
31
  api_token = os.getenv("HF_TOKEN")
32
 
33
 
34
+ list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"]
35
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
36
 
37
  # Load PDF document and create doc splits
 
68
  progress(0.1, desc="Initializing HF tokenizer...")
69
 
70
  progress(0.5, desc="Initializing HF Hub...")
71
+
72
 
73
+ if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
74
+ llm = HuggingFaceEndpoint(
75
+ repo_id=llm_model,
76
+ huggingfacehub_api_token = api_token,
77
+ temperature = temperature,
78
+ max_new_tokens = max_tokens,
79
+ top_k = top_k,
80
+ )
81
+ else:
82
+ llm = HuggingFaceEndpoint(
83
+ huggingfacehub_api_token = api_token,
84
+ repo_id=llm_model,
85
+ temperature = temperature,
86
+ max_new_tokens = max_tokens,
87
+ top_k = top_k,
88
+ )
89
 
90
  progress(0.75, desc="Defining buffer memory...")
91
  memory = ConversationBufferMemory(