Spaces:
Sleeping
Sleeping
hf llm
Browse files- backend.py +1 -2
backend.py
CHANGED
@@ -2,7 +2,6 @@ import torch
|
|
2 |
import os
|
3 |
from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorStreamer, AutoTokenizer
|
4 |
#from interface import GemmaLLMInterface
|
5 |
-
from llama_index.core.node_parser import SentenceSplitter
|
6 |
from llama_index.embeddings.instructor import InstructorEmbedding
|
7 |
import gradio as gr
|
8 |
from llama_index.core import Settings, ServiceContext, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, PromptTemplate, load_index_from_storage, StorageContext
|
@@ -78,7 +77,7 @@ Settings.num_output = 512
|
|
78 |
Settings.context_window = 3900
|
79 |
|
80 |
|
81 |
-
documents = SimpleDirectoryReader('data
|
82 |
|
83 |
nodes = SentenceSplitter(chunk_size=512, chunk_overlap=20, paragraph_separator="\n\n").get_nodes_from_documents(documents)
|
84 |
# Build the vector store index from the nodes
|
|
|
2 |
import os
|
3 |
from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorStreamer, AutoTokenizer
|
4 |
#from interface import GemmaLLMInterface
|
|
|
5 |
from llama_index.embeddings.instructor import InstructorEmbedding
|
6 |
import gradio as gr
|
7 |
from llama_index.core import Settings, ServiceContext, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, PromptTemplate, load_index_from_storage, StorageContext
|
|
|
77 |
Settings.context_window = 3900
|
78 |
|
79 |
|
80 |
+
documents = SimpleDirectoryReader('./data').load_data()
|
81 |
|
82 |
nodes = SentenceSplitter(chunk_size=512, chunk_overlap=20, paragraph_separator="\n\n").get_nodes_from_documents(documents)
|
83 |
# Build the vector store index from the nodes
|