File size: 5,909 Bytes
e3278e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
# +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
import copy
from typing import TYPE_CHECKING, Any, Optional
import litellm
from litellm.integrations.custom_logger import CustomLogger
from litellm.secret_managers.main import str_to_bool
from litellm.types.utils import StandardCallbackDynamicParams
if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import (
Logging as _LiteLLMLoggingObject,
)
LiteLLMLoggingObject = _LiteLLMLoggingObject
else:
LiteLLMLoggingObject = Any
def redact_message_input_output_from_custom_logger(
litellm_logging_obj: LiteLLMLoggingObject, result, custom_logger: CustomLogger
):
if (
hasattr(custom_logger, "message_logging")
and custom_logger.message_logging is not True
):
return perform_redaction(litellm_logging_obj.model_call_details, result)
return result
def perform_redaction(model_call_details: dict, result):
"""
Performs the actual redaction on the logging object and result.
"""
# Redact model_call_details
model_call_details["messages"] = [
{"role": "user", "content": "redacted-by-litellm"}
]
model_call_details["prompt"] = ""
model_call_details["input"] = ""
# Redact streaming response
if (
model_call_details.get("stream", False) is True
and "complete_streaming_response" in model_call_details
):
_streaming_response = model_call_details["complete_streaming_response"]
for choice in _streaming_response.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
# Redact result
if result is not None and isinstance(result, litellm.ModelResponse):
_result = copy.deepcopy(result)
if hasattr(_result, "choices") and _result.choices is not None:
for choice in _result.choices:
if isinstance(choice, litellm.Choices):
choice.message.content = "redacted-by-litellm"
elif isinstance(choice, litellm.utils.StreamingChoices):
choice.delta.content = "redacted-by-litellm"
return _result
else:
return {"text": "redacted-by-litellm"}
def should_redact_message_logging(model_call_details: dict) -> bool:
"""
Determine if message logging should be redacted.
"""
_request_headers = (
model_call_details.get("litellm_params", {}).get("metadata", {}) or {}
)
request_headers = _request_headers.get("headers", {})
possible_request_headers = [
"litellm-enable-message-redaction", # old header. maintain backwards compatibility
"x-litellm-enable-message-redaction", # new header
]
is_redaction_enabled_via_header = False
for header in possible_request_headers:
if bool(request_headers.get(header, False)):
is_redaction_enabled_via_header = True
break
# check if user opted out of logging message/response to callbacks
if (
litellm.turn_off_message_logging is not True
and is_redaction_enabled_via_header is not True
and _get_turn_off_message_logging_from_dynamic_params(model_call_details)
is not True
):
return False
if request_headers and bool(
request_headers.get("litellm-disable-message-redaction", False)
):
return False
# user has OPTED OUT of message redaction
if _get_turn_off_message_logging_from_dynamic_params(model_call_details) is False:
return False
return True
def redact_message_input_output_from_logging(
model_call_details: dict, result, input: Optional[Any] = None
) -> Any:
"""
Removes messages, prompts, input, response from logging. This modifies the data in-place
only redacts when litellm.turn_off_message_logging == True
"""
if should_redact_message_logging(model_call_details):
return perform_redaction(model_call_details, result)
return result
def _get_turn_off_message_logging_from_dynamic_params(
model_call_details: dict,
) -> Optional[bool]:
"""
gets the value of `turn_off_message_logging` from the dynamic params, if it exists.
handles boolean and string values of `turn_off_message_logging`
"""
standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = (
model_call_details.get("standard_callback_dynamic_params", None)
)
if standard_callback_dynamic_params:
_turn_off_message_logging = standard_callback_dynamic_params.get(
"turn_off_message_logging"
)
if isinstance(_turn_off_message_logging, bool):
return _turn_off_message_logging
elif isinstance(_turn_off_message_logging, str):
return str_to_bool(_turn_off_message_logging)
return None
def redact_user_api_key_info(metadata: dict) -> dict:
"""
removes any user_api_key_info before passing to logging object, if flag set
Usage:
SDK
```python
litellm.redact_user_api_key_info = True
```
PROXY:
```yaml
litellm_settings:
redact_user_api_key_info: true
```
"""
if litellm.redact_user_api_key_info is not True:
return metadata
new_metadata = {}
for k, v in metadata.items():
if isinstance(k, str) and k.startswith("user_api_key"):
pass
else:
new_metadata[k] = v
return new_metadata
|