Spaces:
Running
Running
Commit
·
a6fc424
1
Parent(s):
1ebdf9a
added extra safety filter option
Browse files- app/api_helpers.py +6 -1
- app/openai_handler.py +6 -1
- app/requirements.txt +1 -1
app/api_helpers.py
CHANGED
@@ -122,7 +122,12 @@ def create_generation_config(request: OpenAIRequest) -> Dict[str, Any]:
|
|
122 |
types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold=safety_threshold),
|
123 |
types.SafetySetting(category="HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold=safety_threshold),
|
124 |
types.SafetySetting(category="HARM_CATEGORY_HARASSMENT", threshold=safety_threshold),
|
125 |
-
types.SafetySetting(category="HARM_CATEGORY_CIVIC_INTEGRITY", threshold=safety_threshold)
|
|
|
|
|
|
|
|
|
|
|
126 |
]
|
127 |
# config["thinking_config"] = {"include_thoughts": True}
|
128 |
|
|
|
122 |
types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold=safety_threshold),
|
123 |
types.SafetySetting(category="HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold=safety_threshold),
|
124 |
types.SafetySetting(category="HARM_CATEGORY_HARASSMENT", threshold=safety_threshold),
|
125 |
+
types.SafetySetting(category="HARM_CATEGORY_CIVIC_INTEGRITY", threshold=safety_threshold),
|
126 |
+
types.SafetySetting(category="HARM_CATEGORY_UNSPECIFIED", threshold=safety_threshold),
|
127 |
+
types.SafetySetting(category="HARM_CATEGORY_IMAGE_HATE", threshold=safety_threshold),
|
128 |
+
types.SafetySetting(category="HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT", threshold=safety_threshold),
|
129 |
+
types.SafetySetting(category="HARM_CATEGORY_IMAGE_HARASSMENT", threshold=safety_threshold),
|
130 |
+
types.SafetySetting(category="HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT", threshold=safety_threshold),
|
131 |
]
|
132 |
# config["thinking_config"] = {"include_thoughts": True}
|
133 |
|
app/openai_handler.py
CHANGED
@@ -148,7 +148,12 @@ class OpenAIDirectHandler:
|
|
148 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": safety_threshold},
|
149 |
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": safety_threshold},
|
150 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": safety_threshold},
|
151 |
-
{"category": 'HARM_CATEGORY_CIVIC_INTEGRITY', "threshold": safety_threshold}
|
|
|
|
|
|
|
|
|
|
|
152 |
]
|
153 |
|
154 |
def create_openai_client(self, project_id: str, gcp_token: str, location: str = "global") -> openai.AsyncOpenAI:
|
|
|
148 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": safety_threshold},
|
149 |
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": safety_threshold},
|
150 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": safety_threshold},
|
151 |
+
{"category": 'HARM_CATEGORY_CIVIC_INTEGRITY', "threshold": safety_threshold},
|
152 |
+
{"category": "HARM_CATEGORY_UNSPECIFIED", "threshold": safety_threshold},
|
153 |
+
{"category": "HARM_CATEGORY_IMAGE_HATE", "threshold": safety_threshold},
|
154 |
+
{"category": "HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT", "threshold": safety_threshold},
|
155 |
+
{"category": "HARM_CATEGORY_IMAGE_HARASSMENT", "threshold": safety_threshold},
|
156 |
+
{"category": 'HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT', "threshold": safety_threshold}
|
157 |
]
|
158 |
|
159 |
def create_openai_client(self, project_id: str, gcp_token: str, location: str = "global") -> openai.AsyncOpenAI:
|
app/requirements.txt
CHANGED
@@ -3,7 +3,7 @@ uvicorn==0.27.1
|
|
3 |
google-auth==2.38.0
|
4 |
google-cloud-aiplatform==1.86.0
|
5 |
pydantic==2.6.1
|
6 |
-
google-genai==1.
|
7 |
httpx[socks]>=0.25.0
|
8 |
openai
|
9 |
google-auth-oauthlib
|
|
|
3 |
google-auth==2.38.0
|
4 |
google-cloud-aiplatform==1.86.0
|
5 |
pydantic==2.6.1
|
6 |
+
google-genai==1.24.0
|
7 |
httpx[socks]>=0.25.0
|
8 |
openai
|
9 |
google-auth-oauthlib
|