Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,14 +10,13 @@ import pandas as pd
|
|
10 |
import nltk
|
11 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
12 |
from textblob import TextBlob
|
13 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM,
|
14 |
|
15 |
warnings.filterwarnings('ignore', category=FutureWarning)
|
16 |
|
17 |
# --- Monkey Patch for Gradio Client JSON Schema Bug ---
|
18 |
import gradio_client.utils as client_utils
|
19 |
|
20 |
-
# Patch get_type to check for non-dict types.
|
21 |
original_get_type = client_utils.get_type
|
22 |
def patched_get_type(schema):
|
23 |
if not isinstance(schema, dict):
|
@@ -25,7 +24,6 @@ def patched_get_type(schema):
|
|
25 |
return original_get_type(schema)
|
26 |
client_utils.get_type = patched_get_type
|
27 |
|
28 |
-
# Additionally, patch _json_schema_to_python_type to handle boolean schemas.
|
29 |
if not hasattr(client_utils, "_original_json_schema_to_python_type"):
|
30 |
client_utils._original_json_schema_to_python_type = client_utils._json_schema_to_python_type
|
31 |
|
@@ -111,8 +109,8 @@ class LLMResponder:
|
|
111 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
112 |
self.backend = "llama_cpp"
|
113 |
else:
|
114 |
-
# Create a dummy config
|
115 |
-
dummy_config =
|
116 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
117 |
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
118 |
self.backend = "transformers"
|
|
|
10 |
import nltk
|
11 |
from nltk.sentiment import SentimentIntensityAnalyzer
|
12 |
from textblob import TextBlob
|
13 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, LlamaConfig
|
14 |
|
15 |
warnings.filterwarnings('ignore', category=FutureWarning)
|
16 |
|
17 |
# --- Monkey Patch for Gradio Client JSON Schema Bug ---
|
18 |
import gradio_client.utils as client_utils
|
19 |
|
|
|
20 |
original_get_type = client_utils.get_type
|
21 |
def patched_get_type(schema):
|
22 |
if not isinstance(schema, dict):
|
|
|
24 |
return original_get_type(schema)
|
25 |
client_utils.get_type = patched_get_type
|
26 |
|
|
|
27 |
if not hasattr(client_utils, "_original_json_schema_to_python_type"):
|
28 |
client_utils._original_json_schema_to_python_type = client_utils._json_schema_to_python_type
|
29 |
|
|
|
109 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
110 |
self.backend = "llama_cpp"
|
111 |
else:
|
112 |
+
# Create a dummy config using LlamaConfig so that the model loads even if its config is missing a model_type key.
|
113 |
+
dummy_config = LlamaConfig.from_dict({"model_type": "llama"})
|
114 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
115 |
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
|
116 |
self.backend = "transformers"
|