Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -17,14 +17,23 @@ warnings.filterwarnings('ignore', category=FutureWarning)
|
|
17 |
# --- Monkey Patch for Gradio Client JSON Schema Bug ---
|
18 |
import gradio_client.utils as client_utils
|
19 |
|
|
|
20 |
original_get_type = client_utils.get_type
|
21 |
-
|
22 |
def patched_get_type(schema):
|
23 |
if not isinstance(schema, dict):
|
24 |
return type(schema).__name__
|
25 |
return original_get_type(schema)
|
26 |
-
|
27 |
client_utils.get_type = patched_get_type
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
# --- End of Monkey Patch ---
|
29 |
|
30 |
# Download necessary NLTK data
|
@@ -34,7 +43,6 @@ nltk.download('vader_lexicon', quiet=True)
|
|
34 |
# Backend Support for GGUF Models
|
35 |
# ---------------------------
|
36 |
try:
|
37 |
-
# Attempt to import a hypothetical llama_cpp binding for GGUF models.
|
38 |
from llama_cpp import Llama
|
39 |
BACKEND = "llama_cpp"
|
40 |
except ImportError:
|
@@ -45,14 +53,12 @@ except ImportError:
|
|
45 |
# ---------------------------
|
46 |
class EmotionalAnalyzer:
|
47 |
def __init__(self):
|
48 |
-
# Load a pre-trained emotion classifier and tokenizer
|
49 |
self.emotion_model = AutoModelForSequenceClassification.from_pretrained(
|
50 |
"bhadresh-savani/distilbert-base-uncased-emotion"
|
51 |
)
|
52 |
self.emotion_tokenizer = AutoTokenizer.from_pretrained(
|
53 |
"bhadresh-savani/distilbert-base-uncased-emotion"
|
54 |
)
|
55 |
-
# Define the emotion labels as per the model card
|
56 |
self.emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
|
57 |
self.sia = SentimentIntensityAnalyzer()
|
58 |
|
@@ -101,19 +107,16 @@ class LLMResponder:
|
|
101 |
def __init__(self, model_name="SicariusSicariiStuff/Impish_LLAMA_3B_GGUF"):
|
102 |
self.model_name = model_name
|
103 |
if BACKEND == "llama_cpp":
|
104 |
-
#
|
105 |
-
# Replace "path/to/your/gguf/file.gguf" with the actual path to your GGUF file.
|
106 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
107 |
self.backend = "llama_cpp"
|
108 |
else:
|
109 |
-
# Load model via Hugging Face transformers.
|
110 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
111 |
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name)
|
112 |
self.backend = "transformers"
|
113 |
|
114 |
def generate_response(self, prompt):
|
115 |
if self.backend == "llama_cpp":
|
116 |
-
# Use llama_cpp inference (adjust parameters as needed)
|
117 |
result = self.llm(prompt=prompt, max_tokens=256, temperature=0.95, top_p=0.95)
|
118 |
response = result.get("response", "")
|
119 |
else:
|
@@ -137,9 +140,7 @@ def interactive_interface(input_text):
|
|
137 |
emotion_analyzer = EmotionalAnalyzer()
|
138 |
llm_responder = LLMResponder()
|
139 |
|
140 |
-
# Perform detailed emotional analysis on the input
|
141 |
emotional_data = emotion_analyzer.detailed_emotional_analysis(input_text)
|
142 |
-
# Simulate dynamic emotional state (could be updated based on conversation history)
|
143 |
current_emotions = {
|
144 |
'joy': random.randint(10, 30),
|
145 |
'sadness': random.randint(5, 20),
|
@@ -150,7 +151,6 @@ def interactive_interface(input_text):
|
|
150 |
}
|
151 |
emotion_image = emotion_analyzer.visualize_emotions(current_emotions)
|
152 |
|
153 |
-
# Create a prompt combining the input and detected emotion data
|
154 |
prompt = (
|
155 |
f"Input: {input_text}\n"
|
156 |
f"Detected Emotion: {emotional_data['predicted_emotion']}\n"
|
|
|
17 |
# --- Monkey Patch for Gradio Client JSON Schema Bug ---
|
18 |
import gradio_client.utils as client_utils
|
19 |
|
20 |
+
# Patch get_type to check for non-dict types.
|
21 |
original_get_type = client_utils.get_type
|
|
|
22 |
def patched_get_type(schema):
|
23 |
if not isinstance(schema, dict):
|
24 |
return type(schema).__name__
|
25 |
return original_get_type(schema)
|
|
|
26 |
client_utils.get_type = patched_get_type
|
27 |
+
|
28 |
+
# Additionally, patch _json_schema_to_python_type to handle boolean schemas.
|
29 |
+
if not hasattr(client_utils, "_original_json_schema_to_python_type"):
|
30 |
+
client_utils._original_json_schema_to_python_type = client_utils._json_schema_to_python_type
|
31 |
+
|
32 |
+
def patched_json_schema_to_python_type(schema, defs=None):
|
33 |
+
if isinstance(schema, bool):
|
34 |
+
return "bool"
|
35 |
+
return client_utils._original_json_schema_to_python_type(schema, defs)
|
36 |
+
client_utils._json_schema_to_python_type = patched_json_schema_to_python_type
|
37 |
# --- End of Monkey Patch ---
|
38 |
|
39 |
# Download necessary NLTK data
|
|
|
43 |
# Backend Support for GGUF Models
|
44 |
# ---------------------------
|
45 |
try:
|
|
|
46 |
from llama_cpp import Llama
|
47 |
BACKEND = "llama_cpp"
|
48 |
except ImportError:
|
|
|
53 |
# ---------------------------
|
54 |
class EmotionalAnalyzer:
|
55 |
def __init__(self):
|
|
|
56 |
self.emotion_model = AutoModelForSequenceClassification.from_pretrained(
|
57 |
"bhadresh-savani/distilbert-base-uncased-emotion"
|
58 |
)
|
59 |
self.emotion_tokenizer = AutoTokenizer.from_pretrained(
|
60 |
"bhadresh-savani/distilbert-base-uncased-emotion"
|
61 |
)
|
|
|
62 |
self.emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
|
63 |
self.sia = SentimentIntensityAnalyzer()
|
64 |
|
|
|
107 |
def __init__(self, model_name="SicariusSicariiStuff/Impish_LLAMA_3B_GGUF"):
|
108 |
self.model_name = model_name
|
109 |
if BACKEND == "llama_cpp":
|
110 |
+
# Replace with the actual path to your GGUF file.
|
|
|
111 |
self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
|
112 |
self.backend = "llama_cpp"
|
113 |
else:
|
|
|
114 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name)
|
115 |
self.llm_model = AutoModelForCausalLM.from_pretrained(model_name)
|
116 |
self.backend = "transformers"
|
117 |
|
118 |
def generate_response(self, prompt):
|
119 |
if self.backend == "llama_cpp":
|
|
|
120 |
result = self.llm(prompt=prompt, max_tokens=256, temperature=0.95, top_p=0.95)
|
121 |
response = result.get("response", "")
|
122 |
else:
|
|
|
140 |
emotion_analyzer = EmotionalAnalyzer()
|
141 |
llm_responder = LLMResponder()
|
142 |
|
|
|
143 |
emotional_data = emotion_analyzer.detailed_emotional_analysis(input_text)
|
|
|
144 |
current_emotions = {
|
145 |
'joy': random.randint(10, 30),
|
146 |
'sadness': random.randint(5, 20),
|
|
|
151 |
}
|
152 |
emotion_image = emotion_analyzer.visualize_emotions(current_emotions)
|
153 |
|
|
|
154 |
prompt = (
|
155 |
f"Input: {input_text}\n"
|
156 |
f"Detected Emotion: {emotional_data['predicted_emotion']}\n"
|