Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import pandas as pd
|
|
| 2 |
from langchain.chains import LLMChain
|
| 3 |
from langchain_huggingface import HuggingFacePipeline
|
| 4 |
from transformers import LlamaForCausalLM, LlamaTokenizer, pipeline
|
| 5 |
-
from langchain.llms import HuggingFaceLLM
|
| 6 |
from huggingface_hub import login
|
| 7 |
import streamlit as st
|
| 8 |
|
|
@@ -27,7 +27,8 @@ except Exception as e:
|
|
| 27 |
print(f"Error al cargar el tokenizador: {e}")
|
| 28 |
|
| 29 |
# Crear una cadena LLM con LangChain
|
| 30 |
-
llm =
|
|
|
|
| 31 |
chain = LLMChain(llm)
|
| 32 |
|
| 33 |
def calcular_similitud(texto):
|
|
|
|
| 2 |
from langchain.chains import LLMChain
|
| 3 |
from langchain_huggingface import HuggingFacePipeline
|
| 4 |
from transformers import LlamaForCausalLM, LlamaTokenizer, pipeline
|
| 5 |
+
from langchain.llms import HuggingFaceHub#HuggingFaceLLM
|
| 6 |
from huggingface_hub import login
|
| 7 |
import streamlit as st
|
| 8 |
|
|
|
|
| 27 |
print(f"Error al cargar el tokenizador: {e}")
|
| 28 |
|
| 29 |
# Crear una cadena LLM con LangChain
|
| 30 |
+
llm = HuggingFaceHub(modelo, tokenizer)
|
| 31 |
+
#llm = HuggingFaceLLM(modelo, tokenizer)
|
| 32 |
chain = LLMChain(llm)
|
| 33 |
|
| 34 |
def calcular_similitud(texto):
|