Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ import pandas as pd
|
|
10 |
import nltk
|
11 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
12 |
from textblob import TextBlob
|
13 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM
|
14 |
|
15 |
warnings.filterwarnings('ignore', category=FutureWarning)
|
16 |
|
@@ -111,9 +111,10 @@ class LLMResponder:
|
|
111 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
112 |
self.backend = "llama_cpp"
|
113 |
else:
|
114 |
-
#
|
115 |
-
|
116 |
-
self.
|
|
|
117 |
self.backend = "transformers"
|
118 |
|
119 |
def generate_response(self, prompt):
|
@@ -190,5 +191,4 @@ iface = gr.Interface(
|
|
190 |
)
|
191 |
|
192 |
if __name__ == "__main__":
|
193 |
-
# In Hugging Face Spaces, remove share=True.
|
194 |
iface.launch()
|
|
|
10 |
import nltk
|
11 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
12 |
from textblob import TextBlob
|
13 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, PretrainedConfig
|
14 |
|
15 |
warnings.filterwarnings('ignore', category=FutureWarning)
|
16 |
|
|
|
111 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
112 |
self.backend = "llama_cpp"
|
113 |
else:
|
114 |
+
# Create a dummy config to bypass the missing model_type in config.json.
|
115 |
+
dummy_config = PretrainedConfig.from_dict({"model_type": "llama"})
|
116 |
+
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
117 |
+
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
118 |
self.backend = "transformers"
|
119 |
|
120 |
def generate_response(self, prompt):
|
|
|
191 |
)
|
192 |
|
193 |
if __name__ == "__main__":
|
|
|
194 |
iface.launch()
|