index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
16,387 | presidio_analyzer.pattern_recognizer | __init__ | null | def __init__(
self,
supported_entity: str,
name: str = None,
supported_language: str = "en",
patterns: List[Pattern] = None,
deny_list: List[str] = None,
context: List[str] = None,
deny_list_score: float = 1.0,
global_regex_flags: Optional[int] = re.DOTALL | re.MULTILINE | re.IGNORECASE,
version: str = "0.0.1",
):
if not supported_entity:
raise ValueError("Pattern recognizer should be initialized with entity")
if not patterns and not deny_list:
raise ValueError(
"Pattern recognizer should be initialized with patterns"
" or with deny list"
)
super().__init__(
supported_entities=[supported_entity],
supported_language=supported_language,
name=name,
version=version,
)
if patterns is None:
self.patterns = []
else:
self.patterns = patterns
self.context = context
self.deny_list_score = deny_list_score
self.global_regex_flags = global_regex_flags
if deny_list:
deny_list_pattern = self._deny_list_to_regex(deny_list)
self.patterns.append(deny_list_pattern)
self.deny_list = deny_list
else:
self.deny_list = []
| (self, supported_entity: str, name: Optional[str] = None, supported_language: str = 'en', patterns: Optional[List[presidio_analyzer.pattern.Pattern]] = None, deny_list: Optional[List[str]] = None, context: Optional[List[str]] = None, deny_list_score: float = 1.0, global_regex_flags: Optional[int] = regex.I|regex.M|regex.S, version: str = '0.0.1') |
16,388 | presidio_analyzer.pattern_recognizer | _deny_list_to_regex |
Convert a list of words to a matching regex.
To be analyzed by the analyze method as any other regex patterns.
:param deny_list: the list of words to detect
:return:the regex of the words for detection
| def _deny_list_to_regex(self, deny_list: List[str]) -> Pattern:
"""
Convert a list of words to a matching regex.
To be analyzed by the analyze method as any other regex patterns.
:param deny_list: the list of words to detect
:return:the regex of the words for detection
"""
# Escape deny list elements as preparation for regex
escaped_deny_list = [re.escape(element) for element in deny_list]
regex = r"(?:^|(?<=\W))(" + "|".join(escaped_deny_list) + r")(?:(?=\W)|$)"
return Pattern(name="deny_list", regex=regex, score=self.deny_list_score)
| (self, deny_list: List[str]) -> presidio_analyzer.pattern.Pattern |
16,389 | presidio_analyzer.pattern_recognizer | analyze |
Analyzes text to detect PII using regular expressions or deny-lists.
:param text: Text to be analyzed
:param entities: Entities this recognizer can detect
:param nlp_artifacts: Output values from the NLP engine
:param regex_flags: regex flags to be used in regex matching
:return:
| def analyze(
self,
text: str,
entities: List[str],
nlp_artifacts: Optional[NlpArtifacts] = None,
regex_flags: Optional[int] = None,
) -> List[RecognizerResult]:
"""
Analyzes text to detect PII using regular expressions or deny-lists.
:param text: Text to be analyzed
:param entities: Entities this recognizer can detect
:param nlp_artifacts: Output values from the NLP engine
:param regex_flags: regex flags to be used in regex matching
:return:
"""
results = []
if self.patterns:
pattern_result = self.__analyze_patterns(text, regex_flags)
results.extend(pattern_result)
return results
| (self, text: str, entities: List[str], nlp_artifacts: Optional[presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts] = None, regex_flags: Optional[int] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,390 | presidio_analyzer.pattern_recognizer | build_regex_explanation |
Construct an explanation for why this entity was detected.
:param recognizer_name: Name of recognizer detecting the entity
:param pattern_name: Regex pattern name which detected the entity
:param pattern: Regex pattern logic
:param original_score: Score given by the recognizer
:param validation_result: Whether validation was used and its result
:param regex_flags: Regex flags used in the regex matching
:return: Analysis explanation
| @staticmethod
def build_regex_explanation(
recognizer_name: str,
pattern_name: str,
pattern: str,
original_score: float,
validation_result: bool,
regex_flags: int,
) -> AnalysisExplanation:
"""
Construct an explanation for why this entity was detected.
:param recognizer_name: Name of recognizer detecting the entity
:param pattern_name: Regex pattern name which detected the entity
:param pattern: Regex pattern logic
:param original_score: Score given by the recognizer
:param validation_result: Whether validation was used and its result
:param regex_flags: Regex flags used in the regex matching
:return: Analysis explanation
"""
explanation = AnalysisExplanation(
recognizer=recognizer_name,
original_score=original_score,
pattern_name=pattern_name,
pattern=pattern,
validation_result=validation_result,
regex_flags=regex_flags,
)
return explanation
| (recognizer_name: str, pattern_name: str, pattern: str, original_score: float, validation_result: bool, regex_flags: int) -> presidio_analyzer.analysis_explanation.AnalysisExplanation |
16,395 | presidio_analyzer.pattern_recognizer | invalidate_result |
Logic to check for result invalidation by running pruning logic.
For example, each SSN number group should not consist of all the same digits.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the result is invalidated
| def invalidate_result(self, pattern_text: str) -> Optional[bool]:
"""
Logic to check for result invalidation by running pruning logic.
For example, each SSN number group should not consist of all the same digits.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the result is invalidated
"""
return None
| (self, pattern_text: str) -> Optional[bool] |
16,396 | presidio_analyzer.pattern_recognizer | load | null | def load(self): # noqa D102
pass
| (self) |
16,398 | presidio_analyzer.pattern_recognizer | to_dict | Serialize instance into a dictionary. | def to_dict(self) -> Dict:
"""Serialize instance into a dictionary."""
return_dict = super().to_dict()
return_dict["patterns"] = [pat.to_dict() for pat in self.patterns]
return_dict["deny_list"] = self.deny_list
return_dict["context"] = self.context
return_dict["supported_entity"] = return_dict["supported_entities"][0]
del return_dict["supported_entities"]
return return_dict
| (self) -> Dict |
16,399 | presidio_analyzer.pattern_recognizer | validate_result |
Validate the pattern logic e.g., by running checksum on a detected pattern.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the validation was successful.
| def validate_result(self, pattern_text: str) -> Optional[bool]:
"""
Validate the pattern logic e.g., by running checksum on a detected pattern.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the validation was successful.
"""
return None
| (self, pattern_text: str) -> Optional[bool] |
16,400 | presidio_analyzer.analyzer_utils | PresidioAnalyzerUtils |
Utility functions for Presidio Analyzer.
The class provides a bundle of utility functions that help centralizing the
logic for re-usability and maintainability
| class PresidioAnalyzerUtils:
"""
Utility functions for Presidio Analyzer.
The class provides a bundle of utility functions that help centralizing the
logic for re-usability and maintainability
"""
@staticmethod
def is_palindrome(text: str, case_insensitive: bool = False):
"""
Validate if input text is a true palindrome.
:param text: input text string to check for palindrome
:param case_insensitive: optional flag to check palindrome with no case
:return: True / False
"""
palindrome_text = text
if case_insensitive:
palindrome_text = palindrome_text.replace(" ", "").lower()
return palindrome_text == palindrome_text[::-1]
@staticmethod
def sanitize_value(text: str, replacement_pairs: List[Tuple[str, str]]) -> str:
"""
Cleanse the input string of the replacement pairs specified as argument.
:param text: input string
:param replacement_pairs: pairs of what has to be replaced with which value
:return: cleansed string
"""
for search_string, replacement_string in replacement_pairs:
text = text.replace(search_string, replacement_string)
return text
@staticmethod
def is_verhoeff_number(input_number: int):
"""
Check if the input number is a true verhoeff number.
:param input_number:
:return:
"""
__d__ = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 0, 6, 7, 8, 9, 5],
[2, 3, 4, 0, 1, 7, 8, 9, 5, 6],
[3, 4, 0, 1, 2, 8, 9, 5, 6, 7],
[4, 0, 1, 2, 3, 9, 5, 6, 7, 8],
[5, 9, 8, 7, 6, 0, 4, 3, 2, 1],
[6, 5, 9, 8, 7, 1, 0, 4, 3, 2],
[7, 6, 5, 9, 8, 2, 1, 0, 4, 3],
[8, 7, 6, 5, 9, 3, 2, 1, 0, 4],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
]
__p__ = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 5, 7, 6, 2, 8, 3, 0, 9, 4],
[5, 8, 0, 3, 7, 9, 6, 1, 4, 2],
[8, 9, 1, 6, 0, 4, 3, 5, 2, 7],
[9, 4, 5, 3, 1, 2, 6, 8, 7, 0],
[4, 2, 8, 6, 5, 7, 3, 9, 0, 1],
[2, 7, 9, 3, 8, 0, 6, 4, 1, 5],
[7, 0, 4, 6, 9, 1, 3, 2, 5, 8],
]
__inv__ = [0, 4, 3, 2, 1, 5, 6, 7, 8, 9]
c = 0
inverted_number = list(map(int, reversed(str(input_number))))
for i in range(len(inverted_number)):
c = __d__[c][__p__[i % 8][inverted_number[i]]]
return __inv__[c] == 0
| () |
16,401 | presidio_analyzer.analyzer_utils | is_palindrome |
Validate if input text is a true palindrome.
:param text: input text string to check for palindrome
:param case_insensitive: optional flag to check palindrome with no case
:return: True / False
| @staticmethod
def is_palindrome(text: str, case_insensitive: bool = False):
"""
Validate if input text is a true palindrome.
:param text: input text string to check for palindrome
:param case_insensitive: optional flag to check palindrome with no case
:return: True / False
"""
palindrome_text = text
if case_insensitive:
palindrome_text = palindrome_text.replace(" ", "").lower()
return palindrome_text == palindrome_text[::-1]
| (text: str, case_insensitive: bool = False) |
16,402 | presidio_analyzer.analyzer_utils | is_verhoeff_number |
Check if the input number is a true verhoeff number.
:param input_number:
:return:
| @staticmethod
def is_verhoeff_number(input_number: int):
"""
Check if the input number is a true verhoeff number.
:param input_number:
:return:
"""
__d__ = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 0, 6, 7, 8, 9, 5],
[2, 3, 4, 0, 1, 7, 8, 9, 5, 6],
[3, 4, 0, 1, 2, 8, 9, 5, 6, 7],
[4, 0, 1, 2, 3, 9, 5, 6, 7, 8],
[5, 9, 8, 7, 6, 0, 4, 3, 2, 1],
[6, 5, 9, 8, 7, 1, 0, 4, 3, 2],
[7, 6, 5, 9, 8, 2, 1, 0, 4, 3],
[8, 7, 6, 5, 9, 3, 2, 1, 0, 4],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
]
__p__ = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 5, 7, 6, 2, 8, 3, 0, 9, 4],
[5, 8, 0, 3, 7, 9, 6, 1, 4, 2],
[8, 9, 1, 6, 0, 4, 3, 5, 2, 7],
[9, 4, 5, 3, 1, 2, 6, 8, 7, 0],
[4, 2, 8, 6, 5, 7, 3, 9, 0, 1],
[2, 7, 9, 3, 8, 0, 6, 4, 1, 5],
[7, 0, 4, 6, 9, 1, 3, 2, 5, 8],
]
__inv__ = [0, 4, 3, 2, 1, 5, 6, 7, 8, 9]
c = 0
inverted_number = list(map(int, reversed(str(input_number))))
for i in range(len(inverted_number)):
c = __d__[c][__p__[i % 8][inverted_number[i]]]
return __inv__[c] == 0
| (input_number: int) |
16,403 | presidio_analyzer.analyzer_utils | sanitize_value |
Cleanse the input string of the replacement pairs specified as argument.
:param text: input string
:param replacement_pairs: pairs of what has to be replaced with which value
:return: cleansed string
| @staticmethod
def sanitize_value(text: str, replacement_pairs: List[Tuple[str, str]]) -> str:
"""
Cleanse the input string of the replacement pairs specified as argument.
:param text: input string
:param replacement_pairs: pairs of what has to be replaced with which value
:return: cleansed string
"""
for search_string, replacement_string in replacement_pairs:
text = text.replace(search_string, replacement_string)
return text
| (text: str, replacement_pairs: List[Tuple[str, str]]) -> str |
16,404 | presidio_analyzer.recognizer_registry | RecognizerRegistry |
Detect, register and hold all recognizers to be used by the analyzer.
:param recognizers: An optional list of recognizers,
that will be available instead of the predefined recognizers
:param global_regex_flags : regex flags to be used in regex matching,
including deny-lists
| class RecognizerRegistry:
"""
Detect, register and hold all recognizers to be used by the analyzer.
:param recognizers: An optional list of recognizers,
that will be available instead of the predefined recognizers
:param global_regex_flags : regex flags to be used in regex matching,
including deny-lists
"""
def __init__(
self,
recognizers: Optional[Iterable[EntityRecognizer]] = None,
global_regex_flags: Optional[int] = re.DOTALL | re.MULTILINE | re.IGNORECASE,
):
if recognizers:
self.recognizers = recognizers
else:
self.recognizers = []
self.global_regex_flags = global_regex_flags
def load_predefined_recognizers(
self, languages: Optional[List[str]] = None, nlp_engine: NlpEngine = None
) -> None:
"""
Load the existing recognizers into memory.
:param languages: List of languages for which to load recognizers
:param nlp_engine: The NLP engine to use.
:return: None
"""
if not languages:
languages = ["en"]
nlp_recognizer = self._get_nlp_recognizer(nlp_engine)
recognizers_map = {
"en": [
UsBankRecognizer,
UsLicenseRecognizer,
UsItinRecognizer,
UsPassportRecognizer,
UsSsnRecognizer,
NhsRecognizer,
SgFinRecognizer,
AuAbnRecognizer,
AuAcnRecognizer,
AuTfnRecognizer,
AuMedicareRecognizer,
InPanRecognizer,
InAadhaarRecognizer,
InVehicleRegistrationRecognizer,
],
"es": [EsNifRecognizer],
"it": [
ItDriverLicenseRecognizer,
ItFiscalCodeRecognizer,
ItVatCodeRecognizer,
ItIdentityCardRecognizer,
ItPassportRecognizer,
],
"pl": [PlPeselRecognizer],
"ALL": [
CreditCardRecognizer,
CryptoRecognizer,
DateRecognizer,
EmailRecognizer,
IbanRecognizer,
IpRecognizer,
MedicalLicenseRecognizer,
PhoneRecognizer,
UrlRecognizer,
],
}
for lang in languages:
lang_recognizers = [
self.__instantiate_recognizer(
recognizer_class=rc, supported_language=lang
)
for rc in recognizers_map.get(lang, [])
]
self.recognizers.extend(lang_recognizers)
all_recognizers = [
self.__instantiate_recognizer(
recognizer_class=rc, supported_language=lang
)
for rc in recognizers_map.get("ALL", [])
]
self.recognizers.extend(all_recognizers)
if nlp_engine:
nlp_recognizer_inst = nlp_recognizer(
supported_language=lang,
supported_entities=nlp_engine.get_supported_entities(),
)
else:
nlp_recognizer_inst = nlp_recognizer(supported_language=lang)
self.recognizers.append(nlp_recognizer_inst)
@staticmethod
def _get_nlp_recognizer(
nlp_engine: NlpEngine,
) -> Type[SpacyRecognizer]:
"""Return the recognizer leveraging the selected NLP Engine."""
if isinstance(nlp_engine, StanzaNlpEngine):
return StanzaRecognizer
if isinstance(nlp_engine, TransformersNlpEngine):
return TransformersRecognizer
if not nlp_engine or isinstance(nlp_engine, SpacyNlpEngine):
return SpacyRecognizer
else:
logger.warning(
"nlp engine should be either SpacyNlpEngine,"
"StanzaNlpEngine or TransformersNlpEngine"
)
# Returning default
return SpacyRecognizer
def get_recognizers(
self,
language: str,
entities: Optional[List[str]] = None,
all_fields: bool = False,
ad_hoc_recognizers: Optional[List[EntityRecognizer]] = None,
) -> List[EntityRecognizer]:
"""
Return a list of recognizers which supports the specified name and language.
:param entities: the requested entities
:param language: the requested language
:param all_fields: a flag to return all fields of a requested language.
:param ad_hoc_recognizers: Additional recognizers provided by the user
as part of the request
:return: A list of the recognizers which supports the supplied entities
and language
"""
if language is None:
raise ValueError("No language provided")
if entities is None and all_fields is False:
raise ValueError("No entities provided")
all_possible_recognizers = copy.copy(self.recognizers)
if ad_hoc_recognizers:
all_possible_recognizers.extend(ad_hoc_recognizers)
# filter out unwanted recognizers
to_return = set()
if all_fields:
to_return = [
rec
for rec in all_possible_recognizers
if language == rec.supported_language
]
else:
for entity in entities:
subset = [
rec
for rec in all_possible_recognizers
if entity in rec.supported_entities
and language == rec.supported_language
]
if not subset:
logger.warning(
"Entity %s doesn't have the corresponding"
" recognizer in language : %s",
entity,
language,
)
else:
to_return.update(set(subset))
logger.debug(
"Returning a total of %s recognizers",
str(len(to_return)),
)
if not to_return:
raise ValueError("No matching recognizers were found to serve the request.")
return list(to_return)
def add_recognizer(self, recognizer: EntityRecognizer) -> None:
"""
Add a new recognizer to the list of recognizers.
:param recognizer: Recognizer to add
"""
if not isinstance(recognizer, EntityRecognizer):
raise ValueError("Input is not of type EntityRecognizer")
self.recognizers.append(recognizer)
def remove_recognizer(self, recognizer_name: str) -> None:
"""
Remove a recognizer based on its name.
:param recognizer_name: Name of recognizer to remove
"""
new_recognizers = [
rec for rec in self.recognizers if rec.name != recognizer_name
]
logger.info(
"Removed %s recognizers which had the name %s",
str(len(self.recognizers) - len(new_recognizers)),
recognizer_name,
)
self.recognizers = new_recognizers
def add_pattern_recognizer_from_dict(self, recognizer_dict: Dict) -> None:
"""
Load a pattern recognizer from a Dict into the recognizer registry.
:param recognizer_dict: Dict holding a serialization of an PatternRecognizer
:example:
>>> registry = RecognizerRegistry()
>>> recognizer = { "name": "Titles Recognizer", "supported_language": "de","supported_entity": "TITLE", "deny_list": ["Mr.","Mrs."]} # noqa: E501
>>> registry.add_pattern_recognizer_from_dict(recognizer)
"""
recognizer = PatternRecognizer.from_dict(recognizer_dict)
self.add_recognizer(recognizer)
def add_recognizers_from_yaml(self, yml_path: Union[str, Path]) -> None:
r"""
Read YAML file and load recognizers into the recognizer registry.
See example yaml file here:
https://github.com/microsoft/presidio/blob/main/presidio-analyzer/conf/example_recognizers.yaml
:example:
>>> yaml_file = "recognizers.yaml"
>>> registry = RecognizerRegistry()
>>> registry.add_recognizers_from_yaml(yaml_file)
"""
try:
with open(yml_path, "r") as stream:
yaml_recognizers = yaml.safe_load(stream)
for yaml_recognizer in yaml_recognizers["recognizers"]:
self.add_pattern_recognizer_from_dict(yaml_recognizer)
except IOError as io_error:
print(f"Error reading file {yml_path}")
raise io_error
except yaml.YAMLError as yaml_error:
print(f"Failed to parse file {yml_path}")
raise yaml_error
except TypeError as yaml_error:
print(f"Failed to parse file {yml_path}")
raise yaml_error
def __instantiate_recognizer(
self, recognizer_class: Type[EntityRecognizer], supported_language: str
):
"""
Instantiate a recognizer class given type and input.
:param recognizer_class: Class object of the recognizer
:param supported_language: Language this recognizer should support
"""
inst = recognizer_class(supported_language=supported_language)
if isinstance(inst, PatternRecognizer):
inst.global_regex_flags = self.global_regex_flags
return inst
def _get_supported_languages(self) -> List[str]:
languages = []
for rec in self.recognizers:
languages.append(rec.supported_language)
return list(set(languages))
def get_supported_entities(
self, languages: Optional[List[str]] = None
) -> List[str]:
"""
Return the supported entities by the set of recognizers loaded.
:param languages: The languages to get the supported entities for.
If languages=None, returns all entities for all languages.
"""
if not languages:
languages = self._get_supported_languages()
supported_entities = []
for language in languages:
recognizers = self.get_recognizers(language=language, all_fields=True)
for recognizer in recognizers:
supported_entities.extend(recognizer.get_supported_entities())
return list(set(supported_entities))
| (recognizers: Optional[Iterable[presidio_analyzer.entity_recognizer.EntityRecognizer]] = None, global_regex_flags: Optional[int] = regex.I|regex.M|regex.S) |
16,405 | presidio_analyzer.recognizer_registry | __instantiate_recognizer |
Instantiate a recognizer class given type and input.
:param recognizer_class: Class object of the recognizer
:param supported_language: Language this recognizer should support
| def __instantiate_recognizer(
self, recognizer_class: Type[EntityRecognizer], supported_language: str
):
"""
Instantiate a recognizer class given type and input.
:param recognizer_class: Class object of the recognizer
:param supported_language: Language this recognizer should support
"""
inst = recognizer_class(supported_language=supported_language)
if isinstance(inst, PatternRecognizer):
inst.global_regex_flags = self.global_regex_flags
return inst
| (self, recognizer_class: Type[presidio_analyzer.entity_recognizer.EntityRecognizer], supported_language: str) |
16,406 | presidio_analyzer.recognizer_registry | __init__ | null | def __init__(
self,
recognizers: Optional[Iterable[EntityRecognizer]] = None,
global_regex_flags: Optional[int] = re.DOTALL | re.MULTILINE | re.IGNORECASE,
):
if recognizers:
self.recognizers = recognizers
else:
self.recognizers = []
self.global_regex_flags = global_regex_flags
| (self, recognizers: Optional[Iterable[presidio_analyzer.entity_recognizer.EntityRecognizer]] = None, global_regex_flags: Optional[int] = regex.I|regex.M|regex.S) |
16,407 | presidio_analyzer.recognizer_registry | _get_nlp_recognizer | Return the recognizer leveraging the selected NLP Engine. | @staticmethod
def _get_nlp_recognizer(
nlp_engine: NlpEngine,
) -> Type[SpacyRecognizer]:
"""Return the recognizer leveraging the selected NLP Engine."""
if isinstance(nlp_engine, StanzaNlpEngine):
return StanzaRecognizer
if isinstance(nlp_engine, TransformersNlpEngine):
return TransformersRecognizer
if not nlp_engine or isinstance(nlp_engine, SpacyNlpEngine):
return SpacyRecognizer
else:
logger.warning(
"nlp engine should be either SpacyNlpEngine,"
"StanzaNlpEngine or TransformersNlpEngine"
)
# Returning default
return SpacyRecognizer
| (nlp_engine: presidio_analyzer.nlp_engine.nlp_engine.NlpEngine) -> Type[presidio_analyzer.predefined_recognizers.spacy_recognizer.SpacyRecognizer] |
16,408 | presidio_analyzer.recognizer_registry | _get_supported_languages | null | def _get_supported_languages(self) -> List[str]:
languages = []
for rec in self.recognizers:
languages.append(rec.supported_language)
return list(set(languages))
| (self) -> List[str] |
16,409 | presidio_analyzer.recognizer_registry | add_pattern_recognizer_from_dict |
Load a pattern recognizer from a Dict into the recognizer registry.
:param recognizer_dict: Dict holding a serialization of an PatternRecognizer
:example:
>>> registry = RecognizerRegistry()
>>> recognizer = { "name": "Titles Recognizer", "supported_language": "de","supported_entity": "TITLE", "deny_list": ["Mr.","Mrs."]} # noqa: E501
>>> registry.add_pattern_recognizer_from_dict(recognizer)
| def add_pattern_recognizer_from_dict(self, recognizer_dict: Dict) -> None:
"""
Load a pattern recognizer from a Dict into the recognizer registry.
:param recognizer_dict: Dict holding a serialization of an PatternRecognizer
:example:
>>> registry = RecognizerRegistry()
>>> recognizer = { "name": "Titles Recognizer", "supported_language": "de","supported_entity": "TITLE", "deny_list": ["Mr.","Mrs."]} # noqa: E501
>>> registry.add_pattern_recognizer_from_dict(recognizer)
"""
recognizer = PatternRecognizer.from_dict(recognizer_dict)
self.add_recognizer(recognizer)
| (self, recognizer_dict: Dict) -> NoneType |
16,410 | presidio_analyzer.recognizer_registry | add_recognizer |
Add a new recognizer to the list of recognizers.
:param recognizer: Recognizer to add
| def add_recognizer(self, recognizer: EntityRecognizer) -> None:
"""
Add a new recognizer to the list of recognizers.
:param recognizer: Recognizer to add
"""
if not isinstance(recognizer, EntityRecognizer):
raise ValueError("Input is not of type EntityRecognizer")
self.recognizers.append(recognizer)
| (self, recognizer: presidio_analyzer.entity_recognizer.EntityRecognizer) -> NoneType |
16,411 | presidio_analyzer.recognizer_registry | add_recognizers_from_yaml |
Read YAML file and load recognizers into the recognizer registry.
See example yaml file here:
https://github.com/microsoft/presidio/blob/main/presidio-analyzer/conf/example_recognizers.yaml
:example:
>>> yaml_file = "recognizers.yaml"
>>> registry = RecognizerRegistry()
>>> registry.add_recognizers_from_yaml(yaml_file)
| def add_recognizers_from_yaml(self, yml_path: Union[str, Path]) -> None:
r"""
Read YAML file and load recognizers into the recognizer registry.
See example yaml file here:
https://github.com/microsoft/presidio/blob/main/presidio-analyzer/conf/example_recognizers.yaml
:example:
>>> yaml_file = "recognizers.yaml"
>>> registry = RecognizerRegistry()
>>> registry.add_recognizers_from_yaml(yaml_file)
"""
try:
with open(yml_path, "r") as stream:
yaml_recognizers = yaml.safe_load(stream)
for yaml_recognizer in yaml_recognizers["recognizers"]:
self.add_pattern_recognizer_from_dict(yaml_recognizer)
except IOError as io_error:
print(f"Error reading file {yml_path}")
raise io_error
except yaml.YAMLError as yaml_error:
print(f"Failed to parse file {yml_path}")
raise yaml_error
except TypeError as yaml_error:
print(f"Failed to parse file {yml_path}")
raise yaml_error
| (self, yml_path: Union[str, pathlib.Path]) -> NoneType |
16,412 | presidio_analyzer.recognizer_registry | get_recognizers |
Return a list of recognizers which supports the specified name and language.
:param entities: the requested entities
:param language: the requested language
:param all_fields: a flag to return all fields of a requested language.
:param ad_hoc_recognizers: Additional recognizers provided by the user
as part of the request
:return: A list of the recognizers which supports the supplied entities
and language
| def get_recognizers(
self,
language: str,
entities: Optional[List[str]] = None,
all_fields: bool = False,
ad_hoc_recognizers: Optional[List[EntityRecognizer]] = None,
) -> List[EntityRecognizer]:
"""
Return a list of recognizers which supports the specified name and language.
:param entities: the requested entities
:param language: the requested language
:param all_fields: a flag to return all fields of a requested language.
:param ad_hoc_recognizers: Additional recognizers provided by the user
as part of the request
:return: A list of the recognizers which supports the supplied entities
and language
"""
if language is None:
raise ValueError("No language provided")
if entities is None and all_fields is False:
raise ValueError("No entities provided")
all_possible_recognizers = copy.copy(self.recognizers)
if ad_hoc_recognizers:
all_possible_recognizers.extend(ad_hoc_recognizers)
# filter out unwanted recognizers
to_return = set()
if all_fields:
to_return = [
rec
for rec in all_possible_recognizers
if language == rec.supported_language
]
else:
for entity in entities:
subset = [
rec
for rec in all_possible_recognizers
if entity in rec.supported_entities
and language == rec.supported_language
]
if not subset:
logger.warning(
"Entity %s doesn't have the corresponding"
" recognizer in language : %s",
entity,
language,
)
else:
to_return.update(set(subset))
logger.debug(
"Returning a total of %s recognizers",
str(len(to_return)),
)
if not to_return:
raise ValueError("No matching recognizers were found to serve the request.")
return list(to_return)
| (self, language: str, entities: Optional[List[str]] = None, all_fields: bool = False, ad_hoc_recognizers: Optional[List[presidio_analyzer.entity_recognizer.EntityRecognizer]] = None) -> List[presidio_analyzer.entity_recognizer.EntityRecognizer] |
16,413 | presidio_analyzer.recognizer_registry | get_supported_entities |
Return the supported entities by the set of recognizers loaded.
:param languages: The languages to get the supported entities for.
If languages=None, returns all entities for all languages.
| def get_supported_entities(
self, languages: Optional[List[str]] = None
) -> List[str]:
"""
Return the supported entities by the set of recognizers loaded.
:param languages: The languages to get the supported entities for.
If languages=None, returns all entities for all languages.
"""
if not languages:
languages = self._get_supported_languages()
supported_entities = []
for language in languages:
recognizers = self.get_recognizers(language=language, all_fields=True)
for recognizer in recognizers:
supported_entities.extend(recognizer.get_supported_entities())
return list(set(supported_entities))
| (self, languages: Optional[List[str]] = None) -> List[str] |
16,414 | presidio_analyzer.recognizer_registry | load_predefined_recognizers |
Load the existing recognizers into memory.
:param languages: List of languages for which to load recognizers
:param nlp_engine: The NLP engine to use.
:return: None
| def load_predefined_recognizers(
self, languages: Optional[List[str]] = None, nlp_engine: NlpEngine = None
) -> None:
"""
Load the existing recognizers into memory.
:param languages: List of languages for which to load recognizers
:param nlp_engine: The NLP engine to use.
:return: None
"""
if not languages:
languages = ["en"]
nlp_recognizer = self._get_nlp_recognizer(nlp_engine)
recognizers_map = {
"en": [
UsBankRecognizer,
UsLicenseRecognizer,
UsItinRecognizer,
UsPassportRecognizer,
UsSsnRecognizer,
NhsRecognizer,
SgFinRecognizer,
AuAbnRecognizer,
AuAcnRecognizer,
AuTfnRecognizer,
AuMedicareRecognizer,
InPanRecognizer,
InAadhaarRecognizer,
InVehicleRegistrationRecognizer,
],
"es": [EsNifRecognizer],
"it": [
ItDriverLicenseRecognizer,
ItFiscalCodeRecognizer,
ItVatCodeRecognizer,
ItIdentityCardRecognizer,
ItPassportRecognizer,
],
"pl": [PlPeselRecognizer],
"ALL": [
CreditCardRecognizer,
CryptoRecognizer,
DateRecognizer,
EmailRecognizer,
IbanRecognizer,
IpRecognizer,
MedicalLicenseRecognizer,
PhoneRecognizer,
UrlRecognizer,
],
}
for lang in languages:
lang_recognizers = [
self.__instantiate_recognizer(
recognizer_class=rc, supported_language=lang
)
for rc in recognizers_map.get(lang, [])
]
self.recognizers.extend(lang_recognizers)
all_recognizers = [
self.__instantiate_recognizer(
recognizer_class=rc, supported_language=lang
)
for rc in recognizers_map.get("ALL", [])
]
self.recognizers.extend(all_recognizers)
if nlp_engine:
nlp_recognizer_inst = nlp_recognizer(
supported_language=lang,
supported_entities=nlp_engine.get_supported_entities(),
)
else:
nlp_recognizer_inst = nlp_recognizer(supported_language=lang)
self.recognizers.append(nlp_recognizer_inst)
| (self, languages: Optional[List[str]] = None, nlp_engine: Optional[presidio_analyzer.nlp_engine.nlp_engine.NlpEngine] = None) -> NoneType |
16,415 | presidio_analyzer.recognizer_registry | remove_recognizer |
Remove a recognizer based on its name.
:param recognizer_name: Name of recognizer to remove
| def remove_recognizer(self, recognizer_name: str) -> None:
"""
Remove a recognizer based on its name.
:param recognizer_name: Name of recognizer to remove
"""
new_recognizers = [
rec for rec in self.recognizers if rec.name != recognizer_name
]
logger.info(
"Removed %s recognizers which had the name %s",
str(len(self.recognizers) - len(new_recognizers)),
recognizer_name,
)
self.recognizers = new_recognizers
| (self, recognizer_name: str) -> NoneType |
16,416 | presidio_analyzer.recognizer_result | RecognizerResult |
Recognizer Result represents the findings of the detected entity.
Result of a recognizer analyzing the text.
:param entity_type: the type of the entity
:param start: the start location of the detected entity
:param end: the end location of the detected entity
:param score: the score of the detection
:param analysis_explanation: contains the explanation of why this
entity was identified
:param recognition_metadata: a dictionary of metadata to be used in
recognizer specific cases, for example specific recognized context words
and recognizer name
| class RecognizerResult:
"""
Recognizer Result represents the findings of the detected entity.
Result of a recognizer analyzing the text.
:param entity_type: the type of the entity
:param start: the start location of the detected entity
:param end: the end location of the detected entity
:param score: the score of the detection
:param analysis_explanation: contains the explanation of why this
entity was identified
:param recognition_metadata: a dictionary of metadata to be used in
recognizer specific cases, for example specific recognized context words
and recognizer name
"""
# Keys for recognizer metadata
RECOGNIZER_NAME_KEY = "recognizer_name"
RECOGNIZER_IDENTIFIER_KEY = "recognizer_identifier"
# Key of a flag inside recognition_metadata dictionary
# which is set to true if the result enhanced by context
IS_SCORE_ENHANCED_BY_CONTEXT_KEY = "is_score_enhanced_by_context"
logger = logging.getLogger("presidio-analyzer")
def __init__(
self,
entity_type: str,
start: int,
end: int,
score: float,
analysis_explanation: AnalysisExplanation = None,
recognition_metadata: Dict = None,
):
self.entity_type = entity_type
self.start = start
self.end = end
self.score = score
self.analysis_explanation = analysis_explanation
if not recognition_metadata:
self.logger.debug(
"recognition_metadata should be passed, "
"containing a recognizer_name value"
)
self.recognition_metadata = recognition_metadata
def append_analysis_explanation_text(self, text: str) -> None:
"""Add text to the analysis explanation."""
if self.analysis_explanation:
self.analysis_explanation.append_textual_explanation_line(text)
def to_dict(self) -> Dict:
"""
Serialize self to dictionary.
:return: a dictionary
"""
return self.__dict__
@classmethod
def from_json(cls, data: Dict) -> "RecognizerResult":
"""
Create RecognizerResult from json.
:param data: e.g. {
"start": 24,
"end": 32,
"score": 0.8,
"entity_type": "NAME"
}
:return: RecognizerResult
"""
score = data.get("score")
entity_type = data.get("entity_type")
start = data.get("start")
end = data.get("end")
return cls(entity_type, start, end, score)
def __repr__(self) -> str:
"""Return a string representation of the instance."""
return self.__str__()
def intersects(self, other: "RecognizerResult") -> int:
"""
Check if self intersects with a different RecognizerResult.
:return: If intersecting, returns the number of
intersecting characters.
If not, returns 0
"""
# if they do not overlap the intersection is 0
if self.end < other.start or other.end < self.start:
return 0
# otherwise the intersection is min(end) - max(start)
return min(self.end, other.end) - max(self.start, other.start)
def contained_in(self, other: "RecognizerResult") -> bool:
"""
Check if self is contained in a different RecognizerResult.
:return: true if contained
"""
return self.start >= other.start and self.end <= other.end
def contains(self, other: "RecognizerResult") -> bool:
"""
Check if one result is contained or equal to another result.
:param other: another RecognizerResult
:return: bool
"""
return self.start <= other.start and self.end >= other.end
def equal_indices(self, other: "RecognizerResult") -> bool:
"""
Check if the indices are equal between two results.
:param other: another RecognizerResult
:return:
"""
return self.start == other.start and self.end == other.end
def __gt__(self, other: "RecognizerResult") -> bool:
"""
Check if one result is greater by using the results indices in the text.
:param other: another RecognizerResult
:return: bool
"""
if self.start == other.start:
return self.end > other.end
return self.start > other.start
def __eq__(self, other: "RecognizerResult") -> bool:
"""
Check two results are equal by using all class fields.
:param other: another RecognizerResult
:return: bool
"""
equal_type = self.entity_type == other.entity_type
equal_score = self.score == other.score
return self.equal_indices(other) and equal_type and equal_score
def __hash__(self):
"""
Hash the result data by using all class fields.
:return: int
"""
return hash(
f"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}"
)
def __str__(self) -> str:
"""Return a string representation of the instance."""
return (
f"type: {self.entity_type}, "
f"start: {self.start}, "
f"end: {self.end}, "
f"score: {self.score}"
)
def has_conflict(self, other: "RecognizerResult") -> bool:
"""
Check if two recognizer results are conflicted or not.
I have a conflict if:
1. My indices are the same as the other and my score is lower.
2. If my indices are contained in another.
:param other: RecognizerResult
:return:
"""
if self.equal_indices(other):
return self.score <= other.score
return other.contains(self)
| (entity_type: str, start: int, end: int, score: float, analysis_explanation: presidio_analyzer.analysis_explanation.AnalysisExplanation = None, recognition_metadata: Dict = None) |
16,417 | presidio_analyzer.recognizer_result | __eq__ |
Check two results are equal by using all class fields.
:param other: another RecognizerResult
:return: bool
| def __eq__(self, other: "RecognizerResult") -> bool:
"""
Check two results are equal by using all class fields.
:param other: another RecognizerResult
:return: bool
"""
equal_type = self.entity_type == other.entity_type
equal_score = self.score == other.score
return self.equal_indices(other) and equal_type and equal_score
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,418 | presidio_analyzer.recognizer_result | __gt__ |
Check if one result is greater by using the results indices in the text.
:param other: another RecognizerResult
:return: bool
| def __gt__(self, other: "RecognizerResult") -> bool:
"""
Check if one result is greater by using the results indices in the text.
:param other: another RecognizerResult
:return: bool
"""
if self.start == other.start:
return self.end > other.end
return self.start > other.start
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,419 | presidio_analyzer.recognizer_result | __hash__ |
Hash the result data by using all class fields.
:return: int
| def __hash__(self):
"""
Hash the result data by using all class fields.
:return: int
"""
return hash(
f"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}"
)
| (self) |
16,420 | presidio_analyzer.recognizer_result | __init__ | null | def __init__(
self,
entity_type: str,
start: int,
end: int,
score: float,
analysis_explanation: AnalysisExplanation = None,
recognition_metadata: Dict = None,
):
self.entity_type = entity_type
self.start = start
self.end = end
self.score = score
self.analysis_explanation = analysis_explanation
if not recognition_metadata:
self.logger.debug(
"recognition_metadata should be passed, "
"containing a recognizer_name value"
)
self.recognition_metadata = recognition_metadata
| (self, entity_type: str, start: int, end: int, score: float, analysis_explanation: Optional[presidio_analyzer.analysis_explanation.AnalysisExplanation] = None, recognition_metadata: Optional[Dict] = None) |
16,421 | presidio_analyzer.recognizer_result | __repr__ | Return a string representation of the instance. | def __repr__(self) -> str:
"""Return a string representation of the instance."""
return self.__str__()
| (self) -> str |
16,422 | presidio_analyzer.recognizer_result | __str__ | Return a string representation of the instance. | def __str__(self) -> str:
"""Return a string representation of the instance."""
return (
f"type: {self.entity_type}, "
f"start: {self.start}, "
f"end: {self.end}, "
f"score: {self.score}"
)
| (self) -> str |
16,423 | presidio_analyzer.recognizer_result | append_analysis_explanation_text | Add text to the analysis explanation. | def append_analysis_explanation_text(self, text: str) -> None:
"""Add text to the analysis explanation."""
if self.analysis_explanation:
self.analysis_explanation.append_textual_explanation_line(text)
| (self, text: str) -> NoneType |
16,424 | presidio_analyzer.recognizer_result | contained_in |
Check if self is contained in a different RecognizerResult.
:return: true if contained
| def contained_in(self, other: "RecognizerResult") -> bool:
"""
Check if self is contained in a different RecognizerResult.
:return: true if contained
"""
return self.start >= other.start and self.end <= other.end
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,425 | presidio_analyzer.recognizer_result | contains |
Check if one result is contained or equal to another result.
:param other: another RecognizerResult
:return: bool
| def contains(self, other: "RecognizerResult") -> bool:
"""
Check if one result is contained or equal to another result.
:param other: another RecognizerResult
:return: bool
"""
return self.start <= other.start and self.end >= other.end
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,426 | presidio_analyzer.recognizer_result | equal_indices |
Check if the indices are equal between two results.
:param other: another RecognizerResult
:return:
| def equal_indices(self, other: "RecognizerResult") -> bool:
"""
Check if the indices are equal between two results.
:param other: another RecognizerResult
:return:
"""
return self.start == other.start and self.end == other.end
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,427 | presidio_analyzer.recognizer_result | has_conflict |
Check if two recognizer results are conflicted or not.
I have a conflict if:
1. My indices are the same as the other and my score is lower.
2. If my indices are contained in another.
:param other: RecognizerResult
:return:
| def has_conflict(self, other: "RecognizerResult") -> bool:
"""
Check if two recognizer results are conflicted or not.
I have a conflict if:
1. My indices are the same as the other and my score is lower.
2. If my indices are contained in another.
:param other: RecognizerResult
:return:
"""
if self.equal_indices(other):
return self.score <= other.score
return other.contains(self)
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> bool |
16,428 | presidio_analyzer.recognizer_result | intersects |
Check if self intersects with a different RecognizerResult.
:return: If intersecting, returns the number of
intersecting characters.
If not, returns 0
| def intersects(self, other: "RecognizerResult") -> int:
"""
Check if self intersects with a different RecognizerResult.
:return: If intersecting, returns the number of
intersecting characters.
If not, returns 0
"""
# if they do not overlap the intersection is 0
if self.end < other.start or other.end < self.start:
return 0
# otherwise the intersection is min(end) - max(start)
return min(self.end, other.end) - max(self.start, other.start)
| (self, other: presidio_analyzer.recognizer_result.RecognizerResult) -> int |
16,430 | presidio_analyzer.remote_recognizer | RemoteRecognizer |
A configuration for a recognizer that runs on a different process / remote machine.
:param supported_entities: A list of entities this recognizer can identify
:param name: name of recognizer
:param supported_language: The language this recognizer can detect entities in
:param version: Version of this recognizer
| class RemoteRecognizer(ABC, EntityRecognizer):
"""
A configuration for a recognizer that runs on a different process / remote machine.
:param supported_entities: A list of entities this recognizer can identify
:param name: name of recognizer
:param supported_language: The language this recognizer can detect entities in
:param version: Version of this recognizer
"""
def __init__(
self,
supported_entities: List[str],
name: Optional[str],
supported_language: str,
version: str,
context: Optional[List[str]] = None,
):
super().__init__(
supported_entities=supported_entities,
name=name,
supported_language=supported_language,
version=version,
context=context,
)
def load(self): # noqa D102
pass
@abstractmethod
def analyze(
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
): # noqa ANN201
"""
Call an external service for PII detection.
:param text: text to be analyzed
:param entities: Entities that should be looked for
:param nlp_artifacts: Additional metadata from the NLP engine
:return: List of identified PII entities
"""
# 1. Call the external service.
# 2. Translate results into List[RecognizerResult]
pass
@abstractmethod
def get_supported_entities(self) -> List[str]: # noqa D102
pass
| (supported_entities: List[str], name: Optional[str], supported_language: str, version: str, context: Optional[List[str]] = None) |
16,431 | presidio_analyzer.remote_recognizer | __init__ | null | def __init__(
self,
supported_entities: List[str],
name: Optional[str],
supported_language: str,
version: str,
context: Optional[List[str]] = None,
):
super().__init__(
supported_entities=supported_entities,
name=name,
supported_language=supported_language,
version=version,
context=context,
)
| (self, supported_entities: List[str], name: Optional[str], supported_language: str, version: str, context: Optional[List[str]] = None) |
16,432 | presidio_analyzer.remote_recognizer | analyze |
Call an external service for PII detection.
:param text: text to be analyzed
:param entities: Entities that should be looked for
:param nlp_artifacts: Additional metadata from the NLP engine
:return: List of identified PII entities
| @abstractmethod
def analyze(
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
): # noqa ANN201
"""
Call an external service for PII detection.
:param text: text to be analyzed
:param entities: Entities that should be looked for
:param nlp_artifacts: Additional metadata from the NLP engine
:return: List of identified PII entities
"""
# 1. Call the external service.
# 2. Translate results into List[RecognizerResult]
pass
| (self, text: str, entities: List[str], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts) |
16,434 | presidio_analyzer.remote_recognizer | get_supported_entities | null | @abstractmethod
def get_supported_entities(self) -> List[str]: # noqa D102
pass
| (self) -> List[str] |
16,458 | sklearn.ensemble._weight_boosting | AdaBoostClassifier | An AdaBoost classifier.
An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm based on [2]_.
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
initialized with `max_depth=1`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each classifier at each boosting iteration. A higher
learning rate increases the contribution of each classifier. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
.. deprecated:: 1.4
`"SAMME.R"` is deprecated and will be removed in version 1.6.
'"SAMME"' will become the default.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
regressor on the original dataset and then fits additional copies of
the regressor on the same dataset but where the weights of instances
are adjusted according to the error of the current prediction.
GradientBoostingClassifier : GB builds an additive model in a forward
stage-wise fashion. Regression trees are fit on the negative gradient
of the binomial or multinomial deviance loss function. Binary
classification is a special case where only a single regression tree is
induced.
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
method used for classification.
Creates a model that predicts the value of a target variable by
learning simple decision rules inferred from the data features.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost."
Statistics and its Interface 2.3 (2009): 349-360.
<10.4310/SII.2009.v2.n3.a8>`
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, algorithm="SAMME", random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(algorithm='SAMME', n_estimators=100, random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.96...
| class AdaBoostClassifier(
_RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting
):
"""An AdaBoost classifier.
An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm based on [2]_.
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
initialized with `max_depth=1`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each classifier at each boosting iteration. A higher
learning rate increases the contribution of each classifier. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
.. deprecated:: 1.4
`"SAMME.R"` is deprecated and will be removed in version 1.6.
'"SAMME"' will become the default.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
regressor on the original dataset and then fits additional copies of
the regressor on the same dataset but where the weights of instances
are adjusted according to the error of the current prediction.
GradientBoostingClassifier : GB builds an additive model in a forward
stage-wise fashion. Regression trees are fit on the negative gradient
of the binomial or multinomial deviance loss function. Binary
classification is a special case where only a single regression tree is
induced.
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
method used for classification.
Creates a model that predicts the value of a target variable by
learning simple decision rules inferred from the data features.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost."
Statistics and its Interface 2.3 (2009): 349-360.
<10.4310/SII.2009.v2.n3.a8>`
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, algorithm="SAMME", random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(algorithm='SAMME', n_estimators=100, random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.96...
"""
# TODO(1.6): Modify _parameter_constraints for "algorithm" to only check
# for "SAMME"
_parameter_constraints: dict = {
**BaseWeightBoosting._parameter_constraints,
"algorithm": [
StrOptions({"SAMME", "SAMME.R"}),
],
}
# TODO(1.6): Change default "algorithm" value to "SAMME"
def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
algorithm="SAMME.R",
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.algorithm = algorithm
def _validate_estimator(self):
"""Check the estimator and set the estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
# TODO(1.6): Remove, as "SAMME.R" value for "algorithm" param will be
# removed in 1.6
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm != "SAMME":
warnings.warn(
(
"The SAMME.R algorithm (the default) is deprecated and will be"
" removed in 1.6. Use the SAMME algorithm to circumvent this"
" warning."
),
FutureWarning,
)
if not hasattr(self.estimator_, "predict_proba"):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead."
)
if not has_fit_parameter(self.estimator_, "sample_weight"):
raise ValueError(
f"{self.estimator.__class__.__name__} doesn't support sample_weight."
)
# TODO(1.6): Redefine the scope of the `_boost` and `_boost_discrete`
# functions to be the same since SAMME will be the default value for the
# "algorithm" parameter in version 1.6. Thus, a distinguishing function is
# no longer needed. (Or adjust code here, if another algorithm, shall be
# used instead of SAMME.R.)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == "SAMME.R":
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight, random_state)
# TODO(1.6): Remove function. The `_boost_real` function won't be used any
# longer, because the SAMME.R algorithm will be deprecated in 1.6.
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1.0 / (n_classes - 1), 1.0])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (
-1.0
* self.learning_rate
* ((n_classes - 1.0) / n_classes)
* xlogy(y_coding, y_predict_proba).sum(axis=1)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(
estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))
)
return sample_weight, 1.0, estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight = np.exp(
np.log(sample_weight)
+ estimator_weight * incorrect * (sample_weight > 0)
)
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same as that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
# TODO(1.6): Remove, because "algorithm" param will be deprecated in 1.6
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
pred = sum(
_samme_proba(estimator, n_classes, X) for estimator in self.estimators_
)
else: # self.algorithm == "SAMME"
pred = sum(
np.where(
(estimator.predict(X) == classes).T,
w,
-1 / (n_classes - 1) * w,
)
for estimator, w in zip(self.estimators_, self.estimator_weights_)
)
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.0
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
norm += weight
# TODO(1.6): Remove, because "algorithm" param will be deprecated in
# 1.6
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = np.where(
(estimator.predict(X) == classes).T,
weight,
-1 / (n_classes - 1) * weight,
)
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
@staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (15) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= n_classes - 1
return softmax(decision, copy=False)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
check_is_fitted(self)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
return np.log(self.predict_proba(X))
| (estimator=None, *, n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None) |
16,459 | sklearn.ensemble._base | __getitem__ | Return the index'th estimator in the ensemble. | def __getitem__(self, index):
"""Return the index'th estimator in the ensemble."""
return self.estimators_[index]
| (self, index) |
16,461 | sklearn.ensemble._weight_boosting | __init__ | null | def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
algorithm="SAMME.R",
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.algorithm = algorithm
| (self, estimator=None, *, n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None) |
16,462 | sklearn.ensemble._base | __iter__ | Return iterator over estimators in the ensemble. | def __iter__(self):
"""Return iterator over estimators in the ensemble."""
return iter(self.estimators_)
| (self) |
16,463 | sklearn.ensemble._base | __len__ | Return the number of estimators in the ensemble. | def __len__(self):
"""Return the number of estimators in the ensemble."""
return len(self.estimators_)
| (self) |
16,467 | sklearn.ensemble._weight_boosting | _boost | Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
| def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == "SAMME.R":
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight, random_state)
| (self, iboost, X, y, sample_weight, random_state) |
16,468 | sklearn.ensemble._weight_boosting | _boost_discrete | Implement a single boost using the SAMME discrete algorithm. | def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight = np.exp(
np.log(sample_weight)
+ estimator_weight * incorrect * (sample_weight > 0)
)
return sample_weight, estimator_weight, estimator_error
| (self, iboost, X, y, sample_weight, random_state) |
16,469 | sklearn.ensemble._weight_boosting | _boost_real | Implement a single boost using the SAMME.R real algorithm. | def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1.0 / (n_classes - 1), 1.0])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (
-1.0
* self.learning_rate
* ((n_classes - 1.0) / n_classes)
* xlogy(y_coding, y_predict_proba).sum(axis=1)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(
estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))
)
return sample_weight, 1.0, estimator_error
| (self, iboost, X, y, sample_weight, random_state) |
16,470 | sklearn.ensemble._weight_boosting | _check_X | null | def _check_X(self, X):
# Only called to validate X in non-fit methods, therefore reset=False
return self._validate_data(
X,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
reset=False,
)
| (self, X) |
16,473 | sklearn.ensemble._weight_boosting | _compute_proba_from_decision | Compute probabilities from the decision function.
This is based eq. (15) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
| @staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (15) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= n_classes - 1
return softmax(decision, copy=False)
| (decision, n_classes) |
16,477 | sklearn.ensemble._base | _make_estimator | Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
| def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
| (self, append=True, random_state=None) |
16,478 | sklearn.base | _more_tags | null | def _more_tags(self):
return {"requires_y": True}
| (self) |
16,482 | sklearn.ensemble._weight_boosting | _validate_estimator | Check the estimator and set the estimator_ attribute. | def _validate_estimator(self):
"""Check the estimator and set the estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
# TODO(1.6): Remove, as "SAMME.R" value for "algorithm" param will be
# removed in 1.6
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm != "SAMME":
warnings.warn(
(
"The SAMME.R algorithm (the default) is deprecated and will be"
" removed in 1.6. Use the SAMME algorithm to circumvent this"
" warning."
),
FutureWarning,
)
if not hasattr(self.estimator_, "predict_proba"):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead."
)
if not has_fit_parameter(self.estimator_, "sample_weight"):
raise ValueError(
f"{self.estimator.__class__.__name__} doesn't support sample_weight."
)
| (self) |
16,484 | sklearn.ensemble._weight_boosting | decision_function | Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same as that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
| def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same as that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
# TODO(1.6): Remove, because "algorithm" param will be deprecated in 1.6
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
pred = sum(
_samme_proba(estimator, n_classes, X) for estimator in self.estimators_
)
else: # self.algorithm == "SAMME"
pred = sum(
np.where(
(estimator.predict(X) == classes).T,
w,
-1 / (n_classes - 1) * w,
)
for estimator, w in zip(self.estimators_, self.estimator_weights_)
)
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
| (self, X) |
16,485 | sklearn.ensemble._weight_boosting | fit | Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Fitted estimator.
| null | (self, X, y, sample_weight=None) |
16,486 | sklearn.utils._metadata_requests | get_metadata_routing | Raise `NotImplementedError`.
This estimator does not support metadata routing yet. | def get_metadata_routing(self):
"""Raise `NotImplementedError`.
This estimator does not support metadata routing yet."""
raise NotImplementedError(
f"{self.__class__.__name__} has not implemented metadata routing yet."
)
| (self) |
16,488 | sklearn.ensemble._weight_boosting | predict | Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
| def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
| (self, X) |
16,489 | sklearn.ensemble._weight_boosting | predict_log_proba | Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
| def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
return np.log(self.predict_proba(X))
| (self, X) |
16,490 | sklearn.ensemble._weight_boosting | predict_proba | Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
| def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
check_is_fitted(self)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
| (self, X) |
16,491 | sklearn.base | score |
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
| def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| (self, X, y, sample_weight=None) |
16,492 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: sklearn.ensemble._weight_boosting.AdaBoostClassifier, *, sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> sklearn.ensemble._weight_boosting.AdaBoostClassifier |
16,494 | sklearn.utils._metadata_requests | set_score_request | Request metadata passed to the ``score`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``score`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``score``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``score``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: sklearn.ensemble._weight_boosting.AdaBoostClassifier, *, sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> sklearn.ensemble._weight_boosting.AdaBoostClassifier |
16,495 | sklearn.ensemble._weight_boosting | staged_decision_function | Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
| def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.0
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
norm += weight
# TODO(1.6): Remove, because "algorithm" param will be deprecated in
# 1.6
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = np.where(
(estimator.predict(X) == classes).T,
weight,
-1 / (n_classes - 1) * weight,
)
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
| (self, X) |
16,496 | sklearn.ensemble._weight_boosting | staged_predict | Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
| def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
| (self, X) |
16,497 | sklearn.ensemble._weight_boosting | staged_predict_proba | Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
| def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
| (self, X) |
16,498 | sklearn.ensemble._weight_boosting | staged_score | Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
| def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
"""
X = self._check_X(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
| (self, X, y, sample_weight=None) |
16,499 | sklearn.ensemble._weight_boosting | AdaBoostRegressor | An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
`max_depth=3`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each regressor at each boosting iteration. A higher
learning rate increases the contribution of each regressor. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
loss : {'linear', 'square', 'exponential'}, default='linear'
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
In addition, it controls the bootstrap of the weights used to train the
`estimator` at each boosting iteration.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of regressors
The collection of fitted sub-estimators.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostClassifier : An AdaBoost classifier.
GradientBoostingRegressor : Gradient Boosting Classification Tree.
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
AdaBoostRegressor(n_estimators=100, random_state=0)
>>> regr.predict([[0, 0, 0, 0]])
array([4.7972...])
>>> regr.score(X, y)
0.9771...
| class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
`max_depth=3`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each regressor at each boosting iteration. A higher
learning rate increases the contribution of each regressor. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
loss : {'linear', 'square', 'exponential'}, default='linear'
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
In addition, it controls the bootstrap of the weights used to train the
`estimator` at each boosting iteration.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of regressors
The collection of fitted sub-estimators.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostClassifier : An AdaBoost classifier.
GradientBoostingRegressor : Gradient Boosting Classification Tree.
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
AdaBoostRegressor(n_estimators=100, random_state=0)
>>> regr.predict([[0, 0, 0, 0]])
array([4.7972...])
>>> regr.score(X, y)
0.9771...
"""
_parameter_constraints: dict = {
**BaseWeightBoosting._parameter_constraints,
"loss": [StrOptions({"linear", "square", "exponential"})],
}
def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
loss="linear",
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.loss = loss
self.random_state = random_state
def _validate_estimator(self):
"""Check the estimator and set the estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Controls also the bootstrap of the weights used to train the weak
learner.
replacement.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
bootstrap_idx = random_state.choice(
np.arange(_num_samples(X)),
size=_num_samples(X),
replace=True,
p=sample_weight,
)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
X_ = _safe_indexing(X, bootstrap_idx)
y_ = _safe_indexing(y, bootstrap_idx)
estimator.fit(X_, y_)
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
sample_mask = sample_weight > 0
masked_sample_weight = sample_weight[sample_mask]
masked_error_vector = error_vect[sample_mask]
error_max = masked_error_vector.max()
if error_max != 0:
masked_error_vector /= error_max
if self.loss == "square":
masked_error_vector **= 2
elif self.loss == "exponential":
masked_error_vector = 1.0 - np.exp(-masked_error_vector)
# Calculate the average loss
estimator_error = (masked_sample_weight * masked_error_vector).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1.0, 0.0
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1.0 - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1.0 / beta)
if not iboost == self.n_estimators - 1:
sample_weight[sample_mask] *= np.power(
beta, (1.0 - masked_error_vector) * self.learning_rate
)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
# Return median predictions
return predictions[np.arange(_num_samples(X)), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| (estimator=None, *, n_estimators=50, learning_rate=1.0, loss='linear', random_state=None) |
16,502 | sklearn.ensemble._weight_boosting | __init__ | null | def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
loss="linear",
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.loss = loss
self.random_state = random_state
| (self, estimator=None, *, n_estimators=50, learning_rate=1.0, loss='linear', random_state=None) |
16,508 | sklearn.ensemble._weight_boosting | _boost | Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Controls also the bootstrap of the weights used to train the weak
learner.
replacement.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
| def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Controls also the bootstrap of the weights used to train the weak
learner.
replacement.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
bootstrap_idx = random_state.choice(
np.arange(_num_samples(X)),
size=_num_samples(X),
replace=True,
p=sample_weight,
)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
X_ = _safe_indexing(X, bootstrap_idx)
y_ = _safe_indexing(y, bootstrap_idx)
estimator.fit(X_, y_)
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
sample_mask = sample_weight > 0
masked_sample_weight = sample_weight[sample_mask]
masked_error_vector = error_vect[sample_mask]
error_max = masked_error_vector.max()
if error_max != 0:
masked_error_vector /= error_max
if self.loss == "square":
masked_error_vector **= 2
elif self.loss == "exponential":
masked_error_vector = 1.0 - np.exp(-masked_error_vector)
# Calculate the average loss
estimator_error = (masked_sample_weight * masked_error_vector).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1.0, 0.0
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1.0 - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1.0 / beta)
if not iboost == self.n_estimators - 1:
sample_weight[sample_mask] *= np.power(
beta, (1.0 - masked_error_vector) * self.learning_rate
)
return sample_weight, estimator_weight, estimator_error
| (self, iboost, X, y, sample_weight, random_state) |
16,513 | sklearn.ensemble._weight_boosting | _get_median_predict | null | def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
# Return median predictions
return predictions[np.arange(_num_samples(X)), median_estimators]
| (self, X, limit) |
16,521 | sklearn.ensemble._weight_boosting | _validate_estimator | Check the estimator and set the estimator_ attribute. | def _validate_estimator(self):
"""Check the estimator and set the estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))
| (self) |
16,526 | sklearn.ensemble._weight_boosting | predict | Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted regression values.
| def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._get_median_predict(X, len(self.estimators_))
| (self, X) |
16,527 | sklearn.base | score | Return the coefficient of determination of the prediction.
The coefficient of determination :math:`R^2` is defined as
:math:`(1 - \frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
| def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination of the prediction.
The coefficient of determination :math:`R^2` is defined as
:math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
| (self, X, y, sample_weight=None) |
16,531 | sklearn.ensemble._weight_boosting | staged_predict | Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted regression values.
| def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| (self, X) |
16,533 | imodels.util.automl | AutoInterpretableClassifier | null | class AutoInterpretableClassifier(AutoInterpretableModel, ClassifierMixin):
...
| (param_grid=None, refit=True) |
16,535 | imodels.util.automl | __init__ | null | def __init__(self, param_grid=None, refit=True):
if param_grid is None:
if isinstance(self, ClassifierMixin):
self.param_grid = self.PARAM_GRID_DEFAULT_CLASSIFICATION
elif isinstance(self, RegressorMixin):
self.param_grid = self.PARAM_GRID_DEFAULT_REGRESSION
else:
self.param_grid = param_grid
self.refit = refit
| (self, param_grid=None, refit=True) |
16,549 | imodels.util.automl | fit | null | def fit(self, X, y, cv=5):
self.pipe_ = Pipeline([("est", BaseEstimator())]
) # Placeholder Estimator
if isinstance(self, ClassifierMixin):
scoring = "roc_auc"
elif isinstance(self, RegressorMixin):
scoring = "r2"
self.est_ = GridSearchCV(
self.pipe_, self.param_grid, scoring=scoring, cv=cv, refit=self.refit)
self.est_.fit(X, y)
return self
| (self, X, y, cv=5) |
16,552 | imodels.util.automl | predict | null | def predict(self, X):
return self.est_.predict(X)
| (self, X) |
16,553 | imodels.util.automl | predict_proba | null | def predict_proba(self, X):
return self.est_.predict_proba(X)
| (self, X) |
16,554 | imodels.util.automl | score | null | def score(self, X, y):
return self.est_.score(X, y)
| (self, X, y) |
16,555 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
cv : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``cv`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.util.automl.AutoInterpretableClassifier, *, cv: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.util.automl.AutoInterpretableClassifier |
16,557 | imodels.util.automl | AutoInterpretableRegressor | null | class AutoInterpretableRegressor(AutoInterpretableModel, RegressorMixin):
...
| (param_grid=None, refit=True) |
16,581 | imodels.experimental.bartpy.sklearnmodel | BART | null | class BART(SklearnModel):
@staticmethod
def _get_n_nodes(trees):
nodes = 0
for tree in trees:
nodes += len(tree.decision_nodes)
return nodes
@property
def sample_complexity(self):
# samples = self._model_samples
# trees = [s.trees for s in samples]
complexities = [self._get_n_nodes(t) for t in self.trees]
return np.sum(complexities)
@staticmethod
def sub_forest(trees, n_nodes):
nodes = 0
for i, tree in enumerate(trees):
nodes += len(tree.decision_nodes)
if nodes >= n_nodes:
return trees[0:i + 1]
@property
def trees(self):
trs = [s.trees for s in self._model_samples]
return trs
def update_complexity(self, i):
samples_complexity = [self._get_n_nodes(t) for t in self.trees]
# complexity_sum = 0
arg_sort_complexity = np.argsort(samples_complexity)
self._model_samples = self._model_samples[arg_sort_complexity[:i + 1]]
return self
| (n_trees: int = 200, n_chains: int = 4, sigma_a: float = 0.001, sigma_b: float = 0.001, n_samples: int = 200, n_burn: int = 200, thin: float = 0.1, alpha: float = 0.95, beta: float = 2.0, store_in_sample_predictions: bool = False, store_acceptance_trace: bool = False, tree_sampler: imodels.experimental.bartpy.samplers.treemutation.TreeMutationSampler = <imodels.experimental.bartpy.samplers.unconstrainedtree.treemutation.UnconstrainedTreeMutationSampler object at 0x7f16638312a0>, initializer: Optional[imodels.experimental.bartpy.initializers.initializer.Initializer] = None, n_jobs=-1, classification: bool = False, max_rules=None) |
16,583 | imodels.experimental.bartpy.sklearnmodel | __init__ | null | def __init__(self,
n_trees: int = 200,
n_chains: int = 4,
sigma_a: float = 0.001,
sigma_b: float = 0.001,
n_samples: int = 200,
n_burn: int = 200,
thin: float = 0.1,
alpha: float = 0.95,
beta: float = 2.,
store_in_sample_predictions: bool = False,
store_acceptance_trace: bool = False,
tree_sampler: TreeMutationSampler = get_tree_sampler(0.5, 0.5),
initializer: Optional[Initializer] = None,
n_jobs=-1,
classification: bool = False,
max_rules=None):
self.n_trees = n_trees
self.n_chains = n_chains
self.sigma_a = sigma_a
self.sigma_b = sigma_b
self.n_burn = n_burn
self.n_samples = n_samples
self.p_grow = 0.5
self.p_prune = 0.5
self.alpha = alpha
self.beta = beta
self.thin = thin
self.n_jobs = n_jobs
self.store_in_sample_predictions = store_in_sample_predictions
self.store_acceptance_trace = store_acceptance_trace
self.columns = None
self.tree_sampler = tree_sampler
self.initializer = initializer
self.schedule = SampleSchedule(self.tree_sampler, LeafNodeSampler(), SigmaSampler())
self.sampler = ModelSampler(self.schedule)
self.classification = classification
self.max_rules = max_rules
self.sigma, self.data, self.model, self._prediction_samples, self._model_samples, self.extract = [None] * 6
| (self, n_trees: int = 200, n_chains: int = 4, sigma_a: float = 0.001, sigma_b: float = 0.001, n_samples: int = 200, n_burn: int = 200, thin: float = 0.1, alpha: float = 0.95, beta: float = 2.0, store_in_sample_predictions: bool = False, store_acceptance_trace: bool = False, tree_sampler: imodels.experimental.bartpy.samplers.treemutation.TreeMutationSampler = <imodels.experimental.bartpy.samplers.unconstrainedtree.treemutation.UnconstrainedTreeMutationSampler object at 0x7f16638312a0>, initializer: Optional[imodels.experimental.bartpy.initializers.initializer.Initializer] = None, n_jobs=-1, classification: bool = False, max_rules=None) |
16,587 | imodels.experimental.bartpy.sklearnmodel | _chain_pred_arr | null | def _chain_pred_arr(self, X, chain_number):
chain_len = int(self.n_samples)
samples_chain = self._model_samples[chain_number * chain_len: (chain_number + 1) * chain_len]
predictions_transformed = [x.predict(X) for x in samples_chain]
return predictions_transformed
| (self, X, chain_number) |
16,590 | imodels.experimental.bartpy.sklearnmodel | _combine_chains | null | @staticmethod
def _combine_chains(extract: List[Chain]) -> Chain:
keys = list(extract[0].keys())
combined = {}
for key in keys:
combined[key] = np.concatenate([chain[key] for chain in extract], axis=0)
return combined
| (extract: List[Mapping[str, Union[List[Any], numpy.ndarray]]]) -> Mapping[str, Union[List[Any], numpy.ndarray]] |
16,591 | imodels.experimental.bartpy.sklearnmodel | _construct_model | null | def _construct_model(self, X: np.ndarray, y: np.ndarray) -> Model:
if len(X) == 0 or X.shape[1] == 0:
raise ValueError("Empty covariate matrix passed")
self.data = self._convert_covariates_to_data(X, y)
self.sigma = Sigma(self.sigma_a, self.sigma_b, self.data.y.normalizing_scale, self.classification)
self.model = Model(self.data,
self.sigma,
n_trees=self.n_trees,
alpha=self.alpha,
beta=self.beta,
initializer=self.initializer,
classification=self.classification)
n_trees = self.n_trees if self.initializer is None else self.initializer.n_trees
self.n_trees = n_trees
return self.model
| (self, X: numpy.ndarray, y: numpy.ndarray) -> imodels.experimental.bartpy.model.Model |
16,592 | imodels.experimental.bartpy.sklearnmodel | _convert_covariates_to_data | null | @staticmethod
def _convert_covariates_to_data(X: np.ndarray, y: np.ndarray) -> Data:
from copy import deepcopy
if type(X) == pd.DataFrame:
X: pd.DataFrame = X
X = X.values
return Data(deepcopy(X), deepcopy(y), normalize=True)
| (X: numpy.ndarray, y: numpy.ndarray) -> imodels.experimental.bartpy.data.Data |
16,595 | imodels.experimental.bartpy.sklearnmodel | _get_n_nodes | null | @staticmethod
def _get_n_nodes(trees):
nodes = 0
for tree in trees:
nodes += len(tree.decision_nodes)
return nodes
| (trees) |
16,598 | imodels.experimental.bartpy.sklearnmodel | _out_of_sample_predict | null | def _out_of_sample_predict(self, X):
samples = self._model_samples
predictions_transformed = [x.predict(X) for x in samples]
predictions = self.data.y.unnormalize_y(np.mean(predictions_transformed, axis=0))
if self.classification:
predictions = scipy.stats.norm.cdf(predictions)
return predictions
| (self, X) |
16,603 | imodels.experimental.bartpy.sklearnmodel | between_chains_var | null | def between_chains_var(self, X):
all_predictions = np.stack([self.data.y.unnormalize_y(x.predict(X)) for x in self._model_samples], axis=1)
def _get_var(preds_arr):
mean_pred = preds_arr.mean(axis=1)
var = np.mean((preds_arr - np.expand_dims(mean_pred, 1)) ** 2)
return var
total_var = _get_var(all_predictions)
within_chain_var = 0
for c in range(self.n_chains):
chain_preds = self._chain_pred_arr(X, c)
within_chain_var += _get_var(np.stack(chain_preds, axis=1))
return total_var - within_chain_var
| (self, X) |
16,604 | imodels.experimental.bartpy.sklearnmodel | chain_mse_std | null | def chain_mse_std(self, X, y, chain_number):
predictions_transformed = self._chain_pred_arr(X, chain_number)
predictions_std = np.std(
[mean_squared_error(self.data.y.unnormalize_y(preds), y) for preds in predictions_transformed])
return predictions_std
| (self, X, y, chain_number) |
16,605 | imodels.experimental.bartpy.sklearnmodel | chain_predictions | null | def chain_predictions(self, X, chain_number):
predictions_transformed = self._chain_pred_arr(X, chain_number)
preds_arr = [self.data.y.unnormalize_y(preds) for preds in predictions_transformed]
return preds_arr
| (self, X, chain_number) |
16,606 | imodels.experimental.bartpy.sklearnmodel | f_chains |
List of methods to run MCMC chains
Useful for running multiple models in parallel
Returns
-------
List[Callable[[], Extract]]
List of method to run individual chains
Length of n_chains
| def f_chains(self) -> List[Callable[[], Chain]]:
"""
List of methods to run MCMC chains
Useful for running multiple models in parallel
Returns
-------
List[Callable[[], Extract]]
List of method to run individual chains
Length of n_chains
"""
return [delayed_run_chain() for _ in range(self.n_chains)]
| (self) -> List[Callable[[], Mapping[str, Union[List[Any], numpy.ndarray]]]] |
16,607 | imodels.experimental.bartpy.sklearnmodel | f_delayed_chains |
Access point for getting access to delayed methods for running chains
Useful for when you want to run multiple instances of the model in parallel
e.g. when calculating a null distribution for feature importance
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
List[Callable[[], ChainExtract]]
| def f_delayed_chains(self, X: np.ndarray, y: np.ndarray):
"""
Access point for getting access to delayed methods for running chains
Useful for when you want to run multiple instances of the model in parallel
e.g. when calculating a null distribution for feature importance
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
List[Callable[[], ChainExtract]]
"""
return [delayed(x)(self, X, y) for x in self.f_chains()]
| (self, X: numpy.ndarray, y: numpy.ndarray) |
16,608 | imodels.experimental.bartpy.sklearnmodel | fit |
Learn the model based on training data
Parameters
----------
X: pd.DataFrame
training covariates
y: np.ndarray
training targets
Returns
-------
SklearnModel
self with trained parameter values
| def fit(self, X: Union[np.ndarray, pd.DataFrame], y: np.ndarray) -> 'SklearnModel':
"""
Learn the model based on training data
Parameters
----------
X: pd.DataFrame
training covariates
y: np.ndarray
training targets
Returns
-------
SklearnModel
self with trained parameter values
"""
self.model = self._construct_model(X, y)
self.extract = Parallel(n_jobs=self.n_jobs)(self.f_delayed_chains(X, y))
self.combined_chains = self._combine_chains(self.extract)
self._model_samples, self._prediction_samples = self.combined_chains["model"], self.combined_chains[
"in_sample_predictions"]
self._acceptance_trace = self.combined_chains["acceptance"]
self._likelihood = self.combined_chains["likelihood"]
self._probs = self.combined_chains["probs"]
self.fitted_ = True
return self
| (self, X: Union[numpy.ndarray, pandas.core.frame.DataFrame], y: numpy.ndarray) -> imodels.experimental.bartpy.sklearnmodel.SklearnModel |
16,609 | imodels.experimental.bartpy.sklearnmodel | fit_predict | null | def fit_predict(self, X, y):
self.fit(X, y)
if self.store_in_sample_predictions:
return self.predict()
else:
return self.predict(X)
| (self, X, y) |
16,610 | imodels.experimental.bartpy.sklearnmodel | from_extract |
Create a copy of the model using an extract
Useful for doing operations on extracts created in external processes like feature selection
Parameters
----------
extract: Extract
samples produced by delayed chain methods
X: np.ndarray
Covariate matrix
y: np.ndarray
Target variable
Returns
-------
SklearnModel
Copy of the current model with samples
| def from_extract(self, extract: List[Chain], X: np.ndarray, y: np.ndarray) -> 'SklearnModel':
"""
Create a copy of the model using an extract
Useful for doing operations on extracts created in external processes like feature selection
Parameters
----------
extract: Extract
samples produced by delayed chain methods
X: np.ndarray
Covariate matrix
y: np.ndarray
Target variable
Returns
-------
SklearnModel
Copy of the current model with samples
"""
new_model = deepcopy(self)
combined_chain = self._combine_chains(extract)
self._model_samples, self._prediction_samples = combined_chain["model"], combined_chain["in_sample_predictions"]
self._acceptance_trace = combined_chain["acceptance"]
new_model.data = self._convert_covariates_to_data(X, y)
return new_model
| (self, extract: List[Mapping[str, Union[List[Any], numpy.ndarray]]], X: numpy.ndarray, y: numpy.ndarray) -> imodels.experimental.bartpy.sklearnmodel.SklearnModel |
16,613 | imodels.experimental.bartpy.sklearnmodel | l2_error |
Calculate the squared errors for each row in the covariate matrix
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
np.ndarray
Squared error for each observation
| def l2_error(self, X=None, y=None) -> np.ndarray:
"""
Calculate the squared errors for each row in the covariate matrix
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
np.ndarray
Squared error for each observation
"""
return np.square(self.residuals(X, y))
| (self, X=None, y=None) -> numpy.ndarray |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.