Fecalisboa commited on
Commit
962e70a
·
verified ·
1 Parent(s): 3cf1eb8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import gradio as gr
2
  import os
3
- from pathlib import Path
4
-
5
- api_token = os.getenv("HF_TOKEN")
6
-
7
  from langchain_community.vectorstores import FAISS
8
  from langchain_community.document_loaders import PyPDFLoader
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
@@ -16,6 +16,8 @@ from langchain.memory import ConversationBufferMemory
16
  from langchain_community.llms import HuggingFaceEndpoint
17
  import torch
18
 
 
 
19
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"]
20
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
21
 
@@ -58,18 +60,18 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
58
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
59
  llm = HuggingFaceEndpoint(
60
  repo_id=llm_model,
61
- huggingfacehub_api_token = api_token,
62
- temperature = temperature,
63
- max_new_tokens = max_tokens,
64
- top_k = top_k,
65
  )
66
  else:
67
  llm = HuggingFaceEndpoint(
68
- huggingfacehub_api_token = api_token,
69
  repo_id=llm_model,
70
- temperature = temperature,
71
- max_new_tokens = max_tokens,
72
- top_k = top_k,
73
  )
74
 
75
  progress(0.75, desc="Defining buffer memory...")
@@ -94,7 +96,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
94
  # Generate collection name for vector database
95
  def create_collection_name(filepath):
96
  collection_name = Path(filepath).stem
97
- collection_name = collection_name.replace(" ","-")
98
  collection_name = unidecode(collection_name)
99
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
100
  collection_name = collection_name[:50]
@@ -250,4 +252,4 @@ def demo():
250
 
251
 
252
  if __name__ == "__main__":
253
- demo()
 
1
  import gradio as gr
2
  import os
3
+ from pathlib import Path # Adicionando a importação necessária
4
+ import re
5
+ from unidecode import unidecode
6
+ import chromadb
7
  from langchain_community.vectorstores import FAISS
8
  from langchain_community.document_loaders import PyPDFLoader
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
16
  from langchain_community.llms import HuggingFaceEndpoint
17
  import torch
18
 
19
+ api_token = os.getenv("HF_TOKEN")
20
+
21
  list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"]
22
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
23
 
 
60
  if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
61
  llm = HuggingFaceEndpoint(
62
  repo_id=llm_model,
63
+ huggingfacehub_api_token=api_token,
64
+ temperature=temperature,
65
+ max_new_tokens=max_tokens,
66
+ top_k=top_k,
67
  )
68
  else:
69
  llm = HuggingFaceEndpoint(
70
+ huggingfacehub_api_token=api_token,
71
  repo_id=llm_model,
72
+ temperature=temperature,
73
+ max_new_tokens=max_tokens,
74
+ top_k=top_k,
75
  )
76
 
77
  progress(0.75, desc="Defining buffer memory...")
 
96
  # Generate collection name for vector database
97
  def create_collection_name(filepath):
98
  collection_name = Path(filepath).stem
99
+ collection_name = collection_name.replace(" ", "-")
100
  collection_name = unidecode(collection_name)
101
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
102
  collection_name = collection_name[:50]
 
252
 
253
 
254
  if __name__ == "__main__":
255
+ demo()