id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
800
aegirhall/console-menu
consolemenu/validators/regex.py
RegexValidator.validate
def validate(self, input_string): """ Validate input_string against a regex pattern :return: True if match / False otherwise """ validation_result = False try: validation_result = bool(match(pattern=self.pattern, string=input_string)) except TypeError as e: self.log.error( 'Exception while validating Regex, pattern={}, input_string={} - exception: {}'.format(self.pattern, input_string, e)) return validation_result
python
def validate(self, input_string): """ Validate input_string against a regex pattern :return: True if match / False otherwise """ validation_result = False try: validation_result = bool(match(pattern=self.pattern, string=input_string)) except TypeError as e: self.log.error( 'Exception while validating Regex, pattern={}, input_string={} - exception: {}'.format(self.pattern, input_string, e)) return validation_result
[ "def", "validate", "(", "self", ",", "input_string", ")", ":", "validation_result", "=", "False", "try", ":", "validation_result", "=", "bool", "(", "match", "(", "pattern", "=", "self", ".", "pattern", ",", "string", "=", "input_string", ")", ")", "except", "TypeError", "as", "e", ":", "self", ".", "log", ".", "error", "(", "'Exception while validating Regex, pattern={}, input_string={} - exception: {}'", ".", "format", "(", "self", ".", "pattern", ",", "input_string", ",", "e", ")", ")", "return", "validation_result" ]
Validate input_string against a regex pattern :return: True if match / False otherwise
[ "Validate", "input_string", "against", "a", "regex", "pattern" ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/validators/regex.py#L16-L30
801
aegirhall/console-menu
consolemenu/multiselect_menu.py
MultiSelectMenu.process_user_input
def process_user_input(self): """ This overrides the method in ConsoleMenu to allow for comma-delimited and range inputs. Examples: All of the following inputs would have the same result: * 1,2,3,4 * 1-4 * 1-2,3-4 * 1 - 4 * 1, 2, 3, 4 Raises: ValueError: If the input cannot be correctly parsed. """ user_input = self.screen.input() try: indexes = self.__parse_range_list(user_input) # Subtract 1 from each number for its actual index number indexes[:] = [x - 1 for x in indexes if 0 < x < len(self.items) + 1] for index in indexes: self.current_option = index self.select() except Exception as e: return
python
def process_user_input(self): """ This overrides the method in ConsoleMenu to allow for comma-delimited and range inputs. Examples: All of the following inputs would have the same result: * 1,2,3,4 * 1-4 * 1-2,3-4 * 1 - 4 * 1, 2, 3, 4 Raises: ValueError: If the input cannot be correctly parsed. """ user_input = self.screen.input() try: indexes = self.__parse_range_list(user_input) # Subtract 1 from each number for its actual index number indexes[:] = [x - 1 for x in indexes if 0 < x < len(self.items) + 1] for index in indexes: self.current_option = index self.select() except Exception as e: return
[ "def", "process_user_input", "(", "self", ")", ":", "user_input", "=", "self", ".", "screen", ".", "input", "(", ")", "try", ":", "indexes", "=", "self", ".", "__parse_range_list", "(", "user_input", ")", "# Subtract 1 from each number for its actual index number", "indexes", "[", ":", "]", "=", "[", "x", "-", "1", "for", "x", "in", "indexes", "if", "0", "<", "x", "<", "len", "(", "self", ".", "items", ")", "+", "1", "]", "for", "index", "in", "indexes", ":", "self", ".", "current_option", "=", "index", "self", ".", "select", "(", ")", "except", "Exception", "as", "e", ":", "return" ]
This overrides the method in ConsoleMenu to allow for comma-delimited and range inputs. Examples: All of the following inputs would have the same result: * 1,2,3,4 * 1-4 * 1-2,3-4 * 1 - 4 * 1, 2, 3, 4 Raises: ValueError: If the input cannot be correctly parsed.
[ "This", "overrides", "the", "method", "in", "ConsoleMenu", "to", "allow", "for", "comma", "-", "delimited", "and", "range", "inputs", "." ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/multiselect_menu.py#L43-L67
802
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.remove_item
def remove_item(self, item): """ Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise. """ for idx, _item in enumerate(self.items): if item == _item: del self.items[idx] return True return False
python
def remove_item(self, item): """ Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise. """ for idx, _item in enumerate(self.items): if item == _item: del self.items[idx] return True return False
[ "def", "remove_item", "(", "self", ",", "item", ")", ":", "for", "idx", ",", "_item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "if", "item", "==", "_item", ":", "del", "self", ".", "items", "[", "idx", "]", "return", "True", "return", "False" ]
Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise.
[ "Remove", "the", "specified", "item", "from", "the", "menu", "." ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L116-L130
803
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.remove_exit
def remove_exit(self): """ Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else. Returns: bool: True if item needed to be removed, False otherwise. """ if self.items: if self.items[-1] is self.exit_item: del self.items[-1] return True return False
python
def remove_exit(self): """ Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else. Returns: bool: True if item needed to be removed, False otherwise. """ if self.items: if self.items[-1] is self.exit_item: del self.items[-1] return True return False
[ "def", "remove_exit", "(", "self", ")", ":", "if", "self", ".", "items", ":", "if", "self", ".", "items", "[", "-", "1", "]", "is", "self", ".", "exit_item", ":", "del", "self", ".", "items", "[", "-", "1", "]", "return", "True", "return", "False" ]
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else. Returns: bool: True if item needed to be removed, False otherwise.
[ "Remove", "the", "exit", "item", "if", "necessary", ".", "Used", "to", "make", "sure", "we", "only", "remove", "the", "exit", "item", "not", "something", "else", "." ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L144-L155
804
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.draw
def draw(self): """ Refresh the screen and redraw the menu. Should be called whenever something changes that needs to be redrawn. """ self.screen.printf(self.formatter.format(title=self.title, subtitle=self.subtitle, items=self.items, prologue_text=self.prologue_text, epilogue_text=self.epilogue_text))
python
def draw(self): """ Refresh the screen and redraw the menu. Should be called whenever something changes that needs to be redrawn. """ self.screen.printf(self.formatter.format(title=self.title, subtitle=self.subtitle, items=self.items, prologue_text=self.prologue_text, epilogue_text=self.epilogue_text))
[ "def", "draw", "(", "self", ")", ":", "self", ".", "screen", ".", "printf", "(", "self", ".", "formatter", ".", "format", "(", "title", "=", "self", ".", "title", ",", "subtitle", "=", "self", ".", "subtitle", ",", "items", "=", "self", ".", "items", ",", "prologue_text", "=", "self", ".", "prologue_text", ",", "epilogue_text", "=", "self", ".", "epilogue_text", ")", ")" ]
Refresh the screen and redraw the menu. Should be called whenever something changes that needs to be redrawn.
[ "Refresh", "the", "screen", "and", "redraw", "the", "menu", ".", "Should", "be", "called", "whenever", "something", "changes", "that", "needs", "to", "be", "redrawn", "." ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L226-L231
805
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.process_user_input
def process_user_input(self): """ Gets the next single character and decides what to do with it """ user_input = self.get_input() try: num = int(user_input) except Exception: return if 0 < num < len(self.items) + 1: self.current_option = num - 1 self.select() return user_input
python
def process_user_input(self): """ Gets the next single character and decides what to do with it """ user_input = self.get_input() try: num = int(user_input) except Exception: return if 0 < num < len(self.items) + 1: self.current_option = num - 1 self.select() return user_input
[ "def", "process_user_input", "(", "self", ")", ":", "user_input", "=", "self", ".", "get_input", "(", ")", "try", ":", "num", "=", "int", "(", "user_input", ")", "except", "Exception", ":", "return", "if", "0", "<", "num", "<", "len", "(", "self", ".", "items", ")", "+", "1", ":", "self", ".", "current_option", "=", "num", "-", "1", "self", ".", "select", "(", ")", "return", "user_input" ]
Gets the next single character and decides what to do with it
[ "Gets", "the", "next", "single", "character", "and", "decides", "what", "to", "do", "with", "it" ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L297-L311
806
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.go_down
def go_down(self): """ Go down one, wrap to beginning if necessary """ if self.current_option < len(self.items) - 1: self.current_option += 1 else: self.current_option = 0 self.draw()
python
def go_down(self): """ Go down one, wrap to beginning if necessary """ if self.current_option < len(self.items) - 1: self.current_option += 1 else: self.current_option = 0 self.draw()
[ "def", "go_down", "(", "self", ")", ":", "if", "self", ".", "current_option", "<", "len", "(", "self", ".", "items", ")", "-", "1", ":", "self", ".", "current_option", "+=", "1", "else", ":", "self", ".", "current_option", "=", "0", "self", ".", "draw", "(", ")" ]
Go down one, wrap to beginning if necessary
[ "Go", "down", "one", "wrap", "to", "beginning", "if", "necessary" ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L323-L331
807
aegirhall/console-menu
consolemenu/console_menu.py
ConsoleMenu.go_up
def go_up(self): """ Go up one, wrap to end if necessary """ if self.current_option > 0: self.current_option += -1 else: self.current_option = len(self.items) - 1 self.draw()
python
def go_up(self): """ Go up one, wrap to end if necessary """ if self.current_option > 0: self.current_option += -1 else: self.current_option = len(self.items) - 1 self.draw()
[ "def", "go_up", "(", "self", ")", ":", "if", "self", ".", "current_option", ">", "0", ":", "self", ".", "current_option", "+=", "-", "1", "else", ":", "self", ".", "current_option", "=", "len", "(", "self", ".", "items", ")", "-", "1", "self", ".", "draw", "(", ")" ]
Go up one, wrap to end if necessary
[ "Go", "up", "one", "wrap", "to", "end", "if", "necessary" ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L333-L341
808
aegirhall/console-menu
consolemenu/selection_menu.py
SelectionMenu.get_selection
def get_selection(cls, strings, title="Select an option", subtitle=None, exit_option=True, _menu=None): """ Single-method way of getting a selection out of a list of strings. Args: strings (:obj:`list` of :obj:`str`): The list of strings this menu should be built from. title (str): The title of the menu. subtitle (str): The subtitle of the menu. exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True. _menu: Should probably only be used for testing, pass in a list and the created menu used internally by the method will be appended to it Returns: int: The index of the selected option. """ menu = cls(strings, title, subtitle, exit_option) if _menu is not None: _menu.append(menu) menu.show() menu.join() return menu.selected_option
python
def get_selection(cls, strings, title="Select an option", subtitle=None, exit_option=True, _menu=None): """ Single-method way of getting a selection out of a list of strings. Args: strings (:obj:`list` of :obj:`str`): The list of strings this menu should be built from. title (str): The title of the menu. subtitle (str): The subtitle of the menu. exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True. _menu: Should probably only be used for testing, pass in a list and the created menu used internally by the method will be appended to it Returns: int: The index of the selected option. """ menu = cls(strings, title, subtitle, exit_option) if _menu is not None: _menu.append(menu) menu.show() menu.join() return menu.selected_option
[ "def", "get_selection", "(", "cls", ",", "strings", ",", "title", "=", "\"Select an option\"", ",", "subtitle", "=", "None", ",", "exit_option", "=", "True", ",", "_menu", "=", "None", ")", ":", "menu", "=", "cls", "(", "strings", ",", "title", ",", "subtitle", ",", "exit_option", ")", "if", "_menu", "is", "not", "None", ":", "_menu", ".", "append", "(", "menu", ")", "menu", ".", "show", "(", ")", "menu", ".", "join", "(", ")", "return", "menu", ".", "selected_option" ]
Single-method way of getting a selection out of a list of strings. Args: strings (:obj:`list` of :obj:`str`): The list of strings this menu should be built from. title (str): The title of the menu. subtitle (str): The subtitle of the menu. exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True. _menu: Should probably only be used for testing, pass in a list and the created menu used internally by the method will be appended to it Returns: int: The index of the selected option.
[ "Single", "-", "method", "way", "of", "getting", "a", "selection", "out", "of", "a", "list", "of", "strings", "." ]
1a28959d6f1dd6ac79c87b11efd8529d05532422
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/selection_menu.py#L29-L50
809
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_object_is_ordered_dict
def ensure_object_is_ordered_dict(item, title): """ Checks that the item is an OrderedDict. If not, raises ValueError. """ assert isinstance(title, str) if not isinstance(item, OrderedDict): msg = "{} must be an OrderedDict. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
python
def ensure_object_is_ordered_dict(item, title): """ Checks that the item is an OrderedDict. If not, raises ValueError. """ assert isinstance(title, str) if not isinstance(item, OrderedDict): msg = "{} must be an OrderedDict. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
[ "def", "ensure_object_is_ordered_dict", "(", "item", ",", "title", ")", ":", "assert", "isinstance", "(", "title", ",", "str", ")", "if", "not", "isinstance", "(", "item", ",", "OrderedDict", ")", ":", "msg", "=", "\"{} must be an OrderedDict. {} passed instead.\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "title", ",", "type", "(", "item", ")", ")", ")", "return", "None" ]
Checks that the item is an OrderedDict. If not, raises ValueError.
[ "Checks", "that", "the", "item", "is", "an", "OrderedDict", ".", "If", "not", "raises", "ValueError", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L73-L83
810
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_object_is_string
def ensure_object_is_string(item, title): """ Checks that the item is a string. If not, raises ValueError. """ assert isinstance(title, str) if not isinstance(item, str): msg = "{} must be a string. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
python
def ensure_object_is_string(item, title): """ Checks that the item is a string. If not, raises ValueError. """ assert isinstance(title, str) if not isinstance(item, str): msg = "{} must be a string. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
[ "def", "ensure_object_is_string", "(", "item", ",", "title", ")", ":", "assert", "isinstance", "(", "title", ",", "str", ")", "if", "not", "isinstance", "(", "item", ",", "str", ")", ":", "msg", "=", "\"{} must be a string. {} passed instead.\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "title", ",", "type", "(", "item", ")", ")", ")", "return", "None" ]
Checks that the item is a string. If not, raises ValueError.
[ "Checks", "that", "the", "item", "is", "a", "string", ".", "If", "not", "raises", "ValueError", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L86-L96
811
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_object_is_ndarray
def ensure_object_is_ndarray(item, title): """ Ensures that a given mapping matrix is a dense numpy array. Raises a helpful TypeError if otherwise. """ assert isinstance(title, str) if not isinstance(item, np.ndarray): msg = "{} must be a np.ndarray. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
python
def ensure_object_is_ndarray(item, title): """ Ensures that a given mapping matrix is a dense numpy array. Raises a helpful TypeError if otherwise. """ assert isinstance(title, str) if not isinstance(item, np.ndarray): msg = "{} must be a np.ndarray. {} passed instead." raise TypeError(msg.format(title, type(item))) return None
[ "def", "ensure_object_is_ndarray", "(", "item", ",", "title", ")", ":", "assert", "isinstance", "(", "title", ",", "str", ")", "if", "not", "isinstance", "(", "item", ",", "np", ".", "ndarray", ")", ":", "msg", "=", "\"{} must be a np.ndarray. {} passed instead.\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "title", ",", "type", "(", "item", ")", ")", ")", "return", "None" ]
Ensures that a given mapping matrix is a dense numpy array. Raises a helpful TypeError if otherwise.
[ "Ensures", "that", "a", "given", "mapping", "matrix", "is", "a", "dense", "numpy", "array", ".", "Raises", "a", "helpful", "TypeError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L99-L110
812
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_columns_are_in_dataframe
def ensure_columns_are_in_dataframe(columns, dataframe, col_title='', data_title='data'): """ Checks whether each column in `columns` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- columns : list of strings. Each string should represent a column heading in dataframe. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. col_title : str, optional. Denotes the title of the columns that were passed to the function. data_title : str, optional. Denotes the title of the dataframe that is being checked to see whether it contains the passed columns. Default == 'data' Returns ------- None. """ # Make sure columns is an iterable assert isinstance(columns, Iterable) # Make sure dataframe is a pandas dataframe assert isinstance(dataframe, pd.DataFrame) # Make sure title is a string assert isinstance(col_title, str) assert isinstance(data_title, str) problem_cols = [col for col in columns if col not in dataframe.columns] if problem_cols != []: if col_title == '': msg = "{} not in {}.columns" final_msg = msg.format(problem_cols, data_title) else: msg = "The following columns in {} are not in {}.columns: {}" final_msg = msg.format(col_title, data_title, problem_cols) raise ValueError(final_msg) return None
python
def ensure_columns_are_in_dataframe(columns, dataframe, col_title='', data_title='data'): """ Checks whether each column in `columns` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- columns : list of strings. Each string should represent a column heading in dataframe. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. col_title : str, optional. Denotes the title of the columns that were passed to the function. data_title : str, optional. Denotes the title of the dataframe that is being checked to see whether it contains the passed columns. Default == 'data' Returns ------- None. """ # Make sure columns is an iterable assert isinstance(columns, Iterable) # Make sure dataframe is a pandas dataframe assert isinstance(dataframe, pd.DataFrame) # Make sure title is a string assert isinstance(col_title, str) assert isinstance(data_title, str) problem_cols = [col for col in columns if col not in dataframe.columns] if problem_cols != []: if col_title == '': msg = "{} not in {}.columns" final_msg = msg.format(problem_cols, data_title) else: msg = "The following columns in {} are not in {}.columns: {}" final_msg = msg.format(col_title, data_title, problem_cols) raise ValueError(final_msg) return None
[ "def", "ensure_columns_are_in_dataframe", "(", "columns", ",", "dataframe", ",", "col_title", "=", "''", ",", "data_title", "=", "'data'", ")", ":", "# Make sure columns is an iterable", "assert", "isinstance", "(", "columns", ",", "Iterable", ")", "# Make sure dataframe is a pandas dataframe", "assert", "isinstance", "(", "dataframe", ",", "pd", ".", "DataFrame", ")", "# Make sure title is a string", "assert", "isinstance", "(", "col_title", ",", "str", ")", "assert", "isinstance", "(", "data_title", ",", "str", ")", "problem_cols", "=", "[", "col", "for", "col", "in", "columns", "if", "col", "not", "in", "dataframe", ".", "columns", "]", "if", "problem_cols", "!=", "[", "]", ":", "if", "col_title", "==", "''", ":", "msg", "=", "\"{} not in {}.columns\"", "final_msg", "=", "msg", ".", "format", "(", "problem_cols", ",", "data_title", ")", "else", ":", "msg", "=", "\"The following columns in {} are not in {}.columns: {}\"", "final_msg", "=", "msg", ".", "format", "(", "col_title", ",", "data_title", ",", "problem_cols", ")", "raise", "ValueError", "(", "final_msg", ")", "return", "None" ]
Checks whether each column in `columns` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- columns : list of strings. Each string should represent a column heading in dataframe. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. col_title : str, optional. Denotes the title of the columns that were passed to the function. data_title : str, optional. Denotes the title of the dataframe that is being checked to see whether it contains the passed columns. Default == 'data' Returns ------- None.
[ "Checks", "whether", "each", "column", "in", "columns", "is", "in", "dataframe", ".", "Raises", "ValueError", "if", "any", "of", "the", "columns", "are", "not", "in", "the", "dataframe", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L113-L156
813
timothyb0912/pylogit
pylogit/choice_tools.py
check_argument_type
def check_argument_type(long_form, specification_dict): """ Ensures that long_form is a pandas dataframe and that specification_dict is an OrderedDict, raising a ValueError otherwise. Parameters ---------- long_form : pandas dataframe. Contains one row for each available alternative, for each observation. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). Returns ------- None. """ if not isinstance(long_form, pd.DataFrame): msg = "long_form should be a pandas dataframe. It is a {}" raise TypeError(msg.format(type(long_form))) ensure_object_is_ordered_dict(specification_dict, "specification_dict") return None
python
def check_argument_type(long_form, specification_dict): """ Ensures that long_form is a pandas dataframe and that specification_dict is an OrderedDict, raising a ValueError otherwise. Parameters ---------- long_form : pandas dataframe. Contains one row for each available alternative, for each observation. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). Returns ------- None. """ if not isinstance(long_form, pd.DataFrame): msg = "long_form should be a pandas dataframe. It is a {}" raise TypeError(msg.format(type(long_form))) ensure_object_is_ordered_dict(specification_dict, "specification_dict") return None
[ "def", "check_argument_type", "(", "long_form", ",", "specification_dict", ")", ":", "if", "not", "isinstance", "(", "long_form", ",", "pd", ".", "DataFrame", ")", ":", "msg", "=", "\"long_form should be a pandas dataframe. It is a {}\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "type", "(", "long_form", ")", ")", ")", "ensure_object_is_ordered_dict", "(", "specification_dict", ",", "\"specification_dict\"", ")", "return", "None" ]
Ensures that long_form is a pandas dataframe and that specification_dict is an OrderedDict, raising a ValueError otherwise. Parameters ---------- long_form : pandas dataframe. Contains one row for each available alternative, for each observation. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). Returns ------- None.
[ "Ensures", "that", "long_form", "is", "a", "pandas", "dataframe", "and", "that", "specification_dict", "is", "an", "OrderedDict", "raising", "a", "ValueError", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L159-L194
814
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_alt_id_in_long_form
def ensure_alt_id_in_long_form(alt_id_col, long_form): """ Ensures alt_id_col is in long_form, and raises a ValueError if not. Parameters ---------- alt_id_col : str. Column name which denotes the column in `long_form` that contains the alternative ID for each row in `long_form`. long_form : pandas dataframe. Contains one row for each available alternative, for each observation. Returns ------- None. """ if alt_id_col not in long_form.columns: msg = "alt_id_col == {} is not a column in long_form." raise ValueError(msg.format(alt_id_col)) return None
python
def ensure_alt_id_in_long_form(alt_id_col, long_form): """ Ensures alt_id_col is in long_form, and raises a ValueError if not. Parameters ---------- alt_id_col : str. Column name which denotes the column in `long_form` that contains the alternative ID for each row in `long_form`. long_form : pandas dataframe. Contains one row for each available alternative, for each observation. Returns ------- None. """ if alt_id_col not in long_form.columns: msg = "alt_id_col == {} is not a column in long_form." raise ValueError(msg.format(alt_id_col)) return None
[ "def", "ensure_alt_id_in_long_form", "(", "alt_id_col", ",", "long_form", ")", ":", "if", "alt_id_col", "not", "in", "long_form", ".", "columns", ":", "msg", "=", "\"alt_id_col == {} is not a column in long_form.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "alt_id_col", ")", ")", "return", "None" ]
Ensures alt_id_col is in long_form, and raises a ValueError if not. Parameters ---------- alt_id_col : str. Column name which denotes the column in `long_form` that contains the alternative ID for each row in `long_form`. long_form : pandas dataframe. Contains one row for each available alternative, for each observation. Returns ------- None.
[ "Ensures", "alt_id_col", "is", "in", "long_form", "and", "raises", "a", "ValueError", "if", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L197-L217
815
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_specification_cols_are_in_dataframe
def ensure_specification_cols_are_in_dataframe(specification, dataframe): """ Checks whether each column in `specification` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- specification : OrderedDict. Keys are a proper subset of the columns in `data`. Values are either a list or a single string, "all_diff" or "all_same". If a list, the elements should be: - single objects that are in the alternative ID column of `data` - lists of objects that are within the alternative ID column of `data`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification` values, a single column will be created for all the alternatives within the iterable (i.e. there will be one common coefficient for the variables in the iterable). dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None. """ # Make sure specification is an OrderedDict try: assert isinstance(specification, OrderedDict) except AssertionError: raise TypeError("`specification` must be an OrderedDict.") # Make sure dataframe is a pandas dataframe assert isinstance(dataframe, pd.DataFrame) problem_cols = [] dataframe_cols = dataframe.columns for key in specification: if key not in dataframe_cols: problem_cols.append(key) if problem_cols != []: msg = "The following keys in the specification are not in 'data':\n{}" raise ValueError(msg.format(problem_cols)) return None
python
def ensure_specification_cols_are_in_dataframe(specification, dataframe): """ Checks whether each column in `specification` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- specification : OrderedDict. Keys are a proper subset of the columns in `data`. Values are either a list or a single string, "all_diff" or "all_same". If a list, the elements should be: - single objects that are in the alternative ID column of `data` - lists of objects that are within the alternative ID column of `data`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification` values, a single column will be created for all the alternatives within the iterable (i.e. there will be one common coefficient for the variables in the iterable). dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None. """ # Make sure specification is an OrderedDict try: assert isinstance(specification, OrderedDict) except AssertionError: raise TypeError("`specification` must be an OrderedDict.") # Make sure dataframe is a pandas dataframe assert isinstance(dataframe, pd.DataFrame) problem_cols = [] dataframe_cols = dataframe.columns for key in specification: if key not in dataframe_cols: problem_cols.append(key) if problem_cols != []: msg = "The following keys in the specification are not in 'data':\n{}" raise ValueError(msg.format(problem_cols)) return None
[ "def", "ensure_specification_cols_are_in_dataframe", "(", "specification", ",", "dataframe", ")", ":", "# Make sure specification is an OrderedDict", "try", ":", "assert", "isinstance", "(", "specification", ",", "OrderedDict", ")", "except", "AssertionError", ":", "raise", "TypeError", "(", "\"`specification` must be an OrderedDict.\"", ")", "# Make sure dataframe is a pandas dataframe", "assert", "isinstance", "(", "dataframe", ",", "pd", ".", "DataFrame", ")", "problem_cols", "=", "[", "]", "dataframe_cols", "=", "dataframe", ".", "columns", "for", "key", "in", "specification", ":", "if", "key", "not", "in", "dataframe_cols", ":", "problem_cols", ".", "append", "(", "key", ")", "if", "problem_cols", "!=", "[", "]", ":", "msg", "=", "\"The following keys in the specification are not in 'data':\\n{}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "problem_cols", ")", ")", "return", "None" ]
Checks whether each column in `specification` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- specification : OrderedDict. Keys are a proper subset of the columns in `data`. Values are either a list or a single string, "all_diff" or "all_same". If a list, the elements should be: - single objects that are in the alternative ID column of `data` - lists of objects that are within the alternative ID column of `data`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification` values, a single column will be created for all the alternatives within the iterable (i.e. there will be one common coefficient for the variables in the iterable). dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None.
[ "Checks", "whether", "each", "column", "in", "specification", "is", "in", "dataframe", ".", "Raises", "ValueError", "if", "any", "of", "the", "columns", "are", "not", "in", "the", "dataframe", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L220-L264
816
timothyb0912/pylogit
pylogit/choice_tools.py
check_keys_and_values_of_name_dictionary
def check_keys_and_values_of_name_dictionary(names, specification_dict, num_alts): """ Check the validity of the keys and values in the names dictionary. Parameters ---------- names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in `specification_dict` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification_dict` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification_dict` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification_dict`. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). num_alts : int. The number of alternatives in this dataset's universal choice set. Returns ------- None. """ if names.keys() != specification_dict.keys(): msg = "names.keys() does not equal specification_dict.keys()" raise ValueError(msg) for key in names: specification = specification_dict[key] name_object = names[key] if isinstance(specification, list): try: assert isinstance(name_object, list) assert len(name_object) == len(specification) assert all([isinstance(x, str) for x in name_object]) except AssertionError: msg = "names[{}] must be a list AND it must have the same" msg_2 = " number of strings as there are elements of the" msg_3 = " corresponding list in specification_dict" raise ValueError(msg.format(key) + msg_2 + msg_3) else: if specification == "all_same": if not isinstance(name_object, str): msg = "names[{}] should be a string".format(key) raise TypeError(msg) else: # This means speciffication == 'all_diff' try: assert isinstance(name_object, list) assert len(name_object) == num_alts except AssertionError: msg_1 = "names[{}] should be a list with {} elements," msg_2 = " 1 element for each possible alternative" msg = (msg_1.format(key, num_alts) + msg_2) raise ValueError(msg) return None
python
def check_keys_and_values_of_name_dictionary(names, specification_dict, num_alts): """ Check the validity of the keys and values in the names dictionary. Parameters ---------- names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in `specification_dict` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification_dict` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification_dict` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification_dict`. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). num_alts : int. The number of alternatives in this dataset's universal choice set. Returns ------- None. """ if names.keys() != specification_dict.keys(): msg = "names.keys() does not equal specification_dict.keys()" raise ValueError(msg) for key in names: specification = specification_dict[key] name_object = names[key] if isinstance(specification, list): try: assert isinstance(name_object, list) assert len(name_object) == len(specification) assert all([isinstance(x, str) for x in name_object]) except AssertionError: msg = "names[{}] must be a list AND it must have the same" msg_2 = " number of strings as there are elements of the" msg_3 = " corresponding list in specification_dict" raise ValueError(msg.format(key) + msg_2 + msg_3) else: if specification == "all_same": if not isinstance(name_object, str): msg = "names[{}] should be a string".format(key) raise TypeError(msg) else: # This means speciffication == 'all_diff' try: assert isinstance(name_object, list) assert len(name_object) == num_alts except AssertionError: msg_1 = "names[{}] should be a list with {} elements," msg_2 = " 1 element for each possible alternative" msg = (msg_1.format(key, num_alts) + msg_2) raise ValueError(msg) return None
[ "def", "check_keys_and_values_of_name_dictionary", "(", "names", ",", "specification_dict", ",", "num_alts", ")", ":", "if", "names", ".", "keys", "(", ")", "!=", "specification_dict", ".", "keys", "(", ")", ":", "msg", "=", "\"names.keys() does not equal specification_dict.keys()\"", "raise", "ValueError", "(", "msg", ")", "for", "key", "in", "names", ":", "specification", "=", "specification_dict", "[", "key", "]", "name_object", "=", "names", "[", "key", "]", "if", "isinstance", "(", "specification", ",", "list", ")", ":", "try", ":", "assert", "isinstance", "(", "name_object", ",", "list", ")", "assert", "len", "(", "name_object", ")", "==", "len", "(", "specification", ")", "assert", "all", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "name_object", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"names[{}] must be a list AND it must have the same\"", "msg_2", "=", "\" number of strings as there are elements of the\"", "msg_3", "=", "\" corresponding list in specification_dict\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "key", ")", "+", "msg_2", "+", "msg_3", ")", "else", ":", "if", "specification", "==", "\"all_same\"", ":", "if", "not", "isinstance", "(", "name_object", ",", "str", ")", ":", "msg", "=", "\"names[{}] should be a string\"", ".", "format", "(", "key", ")", "raise", "TypeError", "(", "msg", ")", "else", ":", "# This means speciffication == 'all_diff'", "try", ":", "assert", "isinstance", "(", "name_object", ",", "list", ")", "assert", "len", "(", "name_object", ")", "==", "num_alts", "except", "AssertionError", ":", "msg_1", "=", "\"names[{}] should be a list with {} elements,\"", "msg_2", "=", "\" 1 element for each possible alternative\"", "msg", "=", "(", "msg_1", ".", "format", "(", "key", ",", "num_alts", ")", "+", "msg_2", ")", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Check the validity of the keys and values in the names dictionary. Parameters ---------- names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in `specification_dict` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification_dict` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification_dict` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification_dict`. specification_dict : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `"all_diff"` or `"all_same"`. If a list, the elements should be: - single objects that are within the alternative ID column of `long_form_df` - lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). num_alts : int. The number of alternatives in this dataset's universal choice set. Returns ------- None.
[ "Check", "the", "validity", "of", "the", "keys", "and", "values", "in", "the", "names", "dictionary", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L340-L417
817
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_all_columns_are_used
def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
python
def ensure_all_columns_are_used(num_vars_accounted_for, dataframe, data_title='long_data'): """ Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None. """ dataframe_vars = set(dataframe.columns.tolist()) num_dataframe_vars = len(dataframe_vars) if num_vars_accounted_for == num_dataframe_vars: pass elif num_vars_accounted_for < num_dataframe_vars: msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings.warn(msg.format(num_dataframe_vars, data_title) + msg_2 + msg_3.format(num_vars_accounted_for)) else: # This means num_vars_accounted_for > num_dataframe_vars msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings.warn(msg + msg_2.format(num_vars_accounted_for) + msg_3.format(data_title, num_dataframe_vars)) return None
[ "def", "ensure_all_columns_are_used", "(", "num_vars_accounted_for", ",", "dataframe", ",", "data_title", "=", "'long_data'", ")", ":", "dataframe_vars", "=", "set", "(", "dataframe", ".", "columns", ".", "tolist", "(", ")", ")", "num_dataframe_vars", "=", "len", "(", "dataframe_vars", ")", "if", "num_vars_accounted_for", "==", "num_dataframe_vars", ":", "pass", "elif", "num_vars_accounted_for", "<", "num_dataframe_vars", ":", "msg", "=", "\"Note, there are {:,} variables in {} but the inputs\"", "msg_2", "=", "\" ind_vars, alt_specific_vars, and subset_specific_vars only\"", "msg_3", "=", "\" account for {:,} variables.\"", "warnings", ".", "warn", "(", "msg", ".", "format", "(", "num_dataframe_vars", ",", "data_title", ")", "+", "msg_2", "+", "msg_3", ".", "format", "(", "num_vars_accounted_for", ")", ")", "else", ":", "# This means num_vars_accounted_for > num_dataframe_vars", "msg", "=", "\"There are more variable specified in ind_vars, \"", "msg_2", "=", "\"alt_specific_vars, and subset_specific_vars ({:,}) than there\"", "msg_3", "=", "\" are variables in {} ({:,})\"", "warnings", ".", "warn", "(", "msg", "+", "msg_2", ".", "format", "(", "num_vars_accounted_for", ")", "+", "msg_3", ".", "format", "(", "data_title", ",", "num_dataframe_vars", ")", ")", "return", "None" ]
Ensure that all of the columns from dataframe are in the list of used_cols. Will raise a helpful UserWarning if otherwise. Parameters ---------- num_vars_accounted_for : int. Denotes the number of variables used in one's function. dataframe : pandas dataframe. Contains all of the data to be converted from one format to another. data_title : str, optional. Denotes the title by which `dataframe` should be referred in the UserWarning. Returns ------- None.
[ "Ensure", "that", "all", "of", "the", "columns", "from", "dataframe", "are", "in", "the", "list", "of", "used_cols", ".", "Will", "raise", "a", "helpful", "UserWarning", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L420-L463
818
timothyb0912/pylogit
pylogit/choice_tools.py
check_dataframe_for_duplicate_records
def check_dataframe_for_duplicate_records(obs_id_col, alt_id_col, df): """ Checks a cross-sectional dataframe of long-format data for duplicate observations. Duplicate observations are defined as rows with the same observation id value and the same alternative id value. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that is to be checked for duplicates. Returns ------- None. """ if df.duplicated(subset=[obs_id_col, alt_id_col]).any(): msg = "One or more observation-alternative_id pairs is not unique." raise ValueError(msg) return None
python
def check_dataframe_for_duplicate_records(obs_id_col, alt_id_col, df): """ Checks a cross-sectional dataframe of long-format data for duplicate observations. Duplicate observations are defined as rows with the same observation id value and the same alternative id value. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that is to be checked for duplicates. Returns ------- None. """ if df.duplicated(subset=[obs_id_col, alt_id_col]).any(): msg = "One or more observation-alternative_id pairs is not unique." raise ValueError(msg) return None
[ "def", "check_dataframe_for_duplicate_records", "(", "obs_id_col", ",", "alt_id_col", ",", "df", ")", ":", "if", "df", ".", "duplicated", "(", "subset", "=", "[", "obs_id_col", ",", "alt_id_col", "]", ")", ".", "any", "(", ")", ":", "msg", "=", "\"One or more observation-alternative_id pairs is not unique.\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Checks a cross-sectional dataframe of long-format data for duplicate observations. Duplicate observations are defined as rows with the same observation id value and the same alternative id value. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that is to be checked for duplicates. Returns ------- None.
[ "Checks", "a", "cross", "-", "sectional", "dataframe", "of", "long", "-", "format", "data", "for", "duplicate", "observations", ".", "Duplicate", "observations", "are", "defined", "as", "rows", "with", "the", "same", "observation", "id", "value", "and", "the", "same", "alternative", "id", "value", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L466-L491
819
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_num_chosen_alts_equals_num_obs
def ensure_num_chosen_alts_equals_num_obs(obs_id_col, choice_col, df): """ Checks that the total number of recorded choices equals the total number of observations. If this is not the case, raise helpful ValueError messages. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. choice_col : str. Denotes the column in `long_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. df : pandas dataframe. The dataframe whose choices and observations will be checked. Returns ------- None. """ num_obs = df[obs_id_col].unique().shape[0] num_choices = df[choice_col].sum() if num_choices < num_obs: msg = "One or more observations have not chosen one " msg_2 = "of the alternatives available to him/her" raise ValueError(msg + msg_2) if num_choices > num_obs: msg = "One or more observations has chosen multiple alternatives" raise ValueError(msg) return None
python
def ensure_num_chosen_alts_equals_num_obs(obs_id_col, choice_col, df): """ Checks that the total number of recorded choices equals the total number of observations. If this is not the case, raise helpful ValueError messages. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. choice_col : str. Denotes the column in `long_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. df : pandas dataframe. The dataframe whose choices and observations will be checked. Returns ------- None. """ num_obs = df[obs_id_col].unique().shape[0] num_choices = df[choice_col].sum() if num_choices < num_obs: msg = "One or more observations have not chosen one " msg_2 = "of the alternatives available to him/her" raise ValueError(msg + msg_2) if num_choices > num_obs: msg = "One or more observations has chosen multiple alternatives" raise ValueError(msg) return None
[ "def", "ensure_num_chosen_alts_equals_num_obs", "(", "obs_id_col", ",", "choice_col", ",", "df", ")", ":", "num_obs", "=", "df", "[", "obs_id_col", "]", ".", "unique", "(", ")", ".", "shape", "[", "0", "]", "num_choices", "=", "df", "[", "choice_col", "]", ".", "sum", "(", ")", "if", "num_choices", "<", "num_obs", ":", "msg", "=", "\"One or more observations have not chosen one \"", "msg_2", "=", "\"of the alternatives available to him/her\"", "raise", "ValueError", "(", "msg", "+", "msg_2", ")", "if", "num_choices", ">", "num_obs", ":", "msg", "=", "\"One or more observations has chosen multiple alternatives\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Checks that the total number of recorded choices equals the total number of observations. If this is not the case, raise helpful ValueError messages. Parameters ---------- obs_id_col : str. Denotes the column in `df` that contains the observation ID values for each row. choice_col : str. Denotes the column in `long_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. df : pandas dataframe. The dataframe whose choices and observations will be checked. Returns ------- None.
[ "Checks", "that", "the", "total", "number", "of", "recorded", "choices", "equals", "the", "total", "number", "of", "observations", ".", "If", "this", "is", "not", "the", "case", "raise", "helpful", "ValueError", "messages", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L494-L526
820
timothyb0912/pylogit
pylogit/choice_tools.py
check_type_and_values_of_alt_name_dict
def check_type_and_values_of_alt_name_dict(alt_name_dict, alt_id_col, df): """ Ensures that `alt_name_dict` is a dictionary and that its keys are in the alternative id column of `df`. Raises helpful errors if either condition is not met. Parameters ---------- alt_name_dict : dict. A dictionary whose keys are the possible values in `df[alt_id_col].unique()`. The values should be the name that one wants to associate with each alternative id. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that contains the alternative IDs. Returns ------- None. """ if not isinstance(alt_name_dict, dict): msg = "alt_name_dict should be a dictionary. Passed value was a {}" raise TypeError(msg.format(type(alt_name_dict))) if not all([x in df[alt_id_col].values for x in alt_name_dict.keys()]): msg = "One or more of alt_name_dict's keys are not " msg_2 = "in long_data[alt_id_col]" raise ValueError(msg + msg_2) return None
python
def check_type_and_values_of_alt_name_dict(alt_name_dict, alt_id_col, df): """ Ensures that `alt_name_dict` is a dictionary and that its keys are in the alternative id column of `df`. Raises helpful errors if either condition is not met. Parameters ---------- alt_name_dict : dict. A dictionary whose keys are the possible values in `df[alt_id_col].unique()`. The values should be the name that one wants to associate with each alternative id. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that contains the alternative IDs. Returns ------- None. """ if not isinstance(alt_name_dict, dict): msg = "alt_name_dict should be a dictionary. Passed value was a {}" raise TypeError(msg.format(type(alt_name_dict))) if not all([x in df[alt_id_col].values for x in alt_name_dict.keys()]): msg = "One or more of alt_name_dict's keys are not " msg_2 = "in long_data[alt_id_col]" raise ValueError(msg + msg_2) return None
[ "def", "check_type_and_values_of_alt_name_dict", "(", "alt_name_dict", ",", "alt_id_col", ",", "df", ")", ":", "if", "not", "isinstance", "(", "alt_name_dict", ",", "dict", ")", ":", "msg", "=", "\"alt_name_dict should be a dictionary. Passed value was a {}\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "type", "(", "alt_name_dict", ")", ")", ")", "if", "not", "all", "(", "[", "x", "in", "df", "[", "alt_id_col", "]", ".", "values", "for", "x", "in", "alt_name_dict", ".", "keys", "(", ")", "]", ")", ":", "msg", "=", "\"One or more of alt_name_dict's keys are not \"", "msg_2", "=", "\"in long_data[alt_id_col]\"", "raise", "ValueError", "(", "msg", "+", "msg_2", ")", "return", "None" ]
Ensures that `alt_name_dict` is a dictionary and that its keys are in the alternative id column of `df`. Raises helpful errors if either condition is not met. Parameters ---------- alt_name_dict : dict. A dictionary whose keys are the possible values in `df[alt_id_col].unique()`. The values should be the name that one wants to associate with each alternative id. alt_id_col : str. Denotes the column in `df` that contains the alternative ID values for each row. df : pandas dataframe. The dataframe of long format data that contains the alternative IDs. Returns ------- None.
[ "Ensures", "that", "alt_name_dict", "is", "a", "dictionary", "and", "that", "its", "keys", "are", "in", "the", "alternative", "id", "column", "of", "df", ".", "Raises", "helpful", "errors", "if", "either", "condition", "is", "not", "met", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L529-L560
821
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_ridge_is_scalar_or_none
def ensure_ridge_is_scalar_or_none(ridge): """ Ensures that `ridge` is either None or a scalar value. Raises a helpful TypeError otherwise. Parameters ---------- ridge : int, float, long, or None. Scalar value or None, determining the L2-ridge regression penalty. Returns ------- None. """ if (ridge is not None) and not isinstance(ridge, Number): msg_1 = "ridge should be None or an int, float, or long." msg_2 = "The passed value of ridge had type: {}".format(type(ridge)) raise TypeError(msg_1 + msg_2) return None
python
def ensure_ridge_is_scalar_or_none(ridge): """ Ensures that `ridge` is either None or a scalar value. Raises a helpful TypeError otherwise. Parameters ---------- ridge : int, float, long, or None. Scalar value or None, determining the L2-ridge regression penalty. Returns ------- None. """ if (ridge is not None) and not isinstance(ridge, Number): msg_1 = "ridge should be None or an int, float, or long." msg_2 = "The passed value of ridge had type: {}".format(type(ridge)) raise TypeError(msg_1 + msg_2) return None
[ "def", "ensure_ridge_is_scalar_or_none", "(", "ridge", ")", ":", "if", "(", "ridge", "is", "not", "None", ")", "and", "not", "isinstance", "(", "ridge", ",", "Number", ")", ":", "msg_1", "=", "\"ridge should be None or an int, float, or long.\"", "msg_2", "=", "\"The passed value of ridge had type: {}\"", ".", "format", "(", "type", "(", "ridge", ")", ")", "raise", "TypeError", "(", "msg_1", "+", "msg_2", ")", "return", "None" ]
Ensures that `ridge` is either None or a scalar value. Raises a helpful TypeError otherwise. Parameters ---------- ridge : int, float, long, or None. Scalar value or None, determining the L2-ridge regression penalty. Returns ------- None.
[ "Ensures", "that", "ridge", "is", "either", "None", "or", "a", "scalar", "value", ".", "Raises", "a", "helpful", "TypeError", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L563-L582
822
timothyb0912/pylogit
pylogit/choice_tools.py
get_original_order_unique_ids
def get_original_order_unique_ids(id_array): """ Get the unique id's of id_array, in their original order of appearance. Parameters ---------- id_array : 1D ndarray. Should contain the ids that we want to extract the unique values from. Returns ------- original_order_unique_ids : 1D ndarray. Contains the unique ids from `id_array`, in their original order of appearance. """ assert isinstance(id_array, np.ndarray) assert len(id_array.shape) == 1 # Get the indices of the unique IDs in their order of appearance # Note the [1] is because the np.unique() call will return both the sorted # unique IDs and the indices original_unique_id_indices =\ np.sort(np.unique(id_array, return_index=True)[1]) # Get the unique ids, in their original order of appearance original_order_unique_ids = id_array[original_unique_id_indices] return original_order_unique_ids
python
def get_original_order_unique_ids(id_array): """ Get the unique id's of id_array, in their original order of appearance. Parameters ---------- id_array : 1D ndarray. Should contain the ids that we want to extract the unique values from. Returns ------- original_order_unique_ids : 1D ndarray. Contains the unique ids from `id_array`, in their original order of appearance. """ assert isinstance(id_array, np.ndarray) assert len(id_array.shape) == 1 # Get the indices of the unique IDs in their order of appearance # Note the [1] is because the np.unique() call will return both the sorted # unique IDs and the indices original_unique_id_indices =\ np.sort(np.unique(id_array, return_index=True)[1]) # Get the unique ids, in their original order of appearance original_order_unique_ids = id_array[original_unique_id_indices] return original_order_unique_ids
[ "def", "get_original_order_unique_ids", "(", "id_array", ")", ":", "assert", "isinstance", "(", "id_array", ",", "np", ".", "ndarray", ")", "assert", "len", "(", "id_array", ".", "shape", ")", "==", "1", "# Get the indices of the unique IDs in their order of appearance", "# Note the [1] is because the np.unique() call will return both the sorted", "# unique IDs and the indices", "original_unique_id_indices", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "id_array", ",", "return_index", "=", "True", ")", "[", "1", "]", ")", "# Get the unique ids, in their original order of appearance", "original_order_unique_ids", "=", "id_array", "[", "original_unique_id_indices", "]", "return", "original_order_unique_ids" ]
Get the unique id's of id_array, in their original order of appearance. Parameters ---------- id_array : 1D ndarray. Should contain the ids that we want to extract the unique values from. Returns ------- original_order_unique_ids : 1D ndarray. Contains the unique ids from `id_array`, in their original order of appearance.
[ "Get", "the", "unique", "id", "s", "of", "id_array", "in", "their", "original", "order", "of", "appearance", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L718-L745
823
timothyb0912/pylogit
pylogit/choice_tools.py
create_sparse_mapping
def create_sparse_mapping(id_array, unique_ids=None): """ Will create a scipy.sparse compressed-sparse-row matrix that maps each row represented by an element in id_array to the corresponding value of the unique ids in id_array. Parameters ---------- id_array : 1D ndarray of ints. Each element should represent some id related to the corresponding row. unique_ids : 1D ndarray of ints, or None, optional. If not None, each element should be present in `id_array`. The elements in `unique_ids` should be present in the order in which one wishes them to appear in the columns of the resulting sparse array. For the `row_to_obs` and `row_to_mixers` mappings, this should be the order of appearance in `id_array`. If None, then the unique_ids will be created from `id_array`, in the order of their appearance in `id_array`. Returns ------- mapping : 2D scipy.sparse CSR matrix. Will contain only zeros and ones. `mapping[i, j] == 1` where `id_array[i] == unique_ids[j]`. The id's corresponding to each column are given by `unique_ids`. The rows correspond to the elements of `id_array`. """ # Create unique_ids if necessary if unique_ids is None: unique_ids = get_original_order_unique_ids(id_array) # Check function arguments for validity assert isinstance(unique_ids, np.ndarray) assert isinstance(id_array, np.ndarray) assert unique_ids.ndim == 1 assert id_array.ndim == 1 # Figure out which ids in id_array are represented in unique_ids represented_ids = np.in1d(id_array, unique_ids) # Determine the number of rows in id_array that are in unique_ids num_non_zero_rows = represented_ids.sum() # Figure out the dimensions of the resulting sparse matrix num_rows = id_array.size num_cols = unique_ids.size # Specify the non-zero values that will be present in the sparse matrix. data = np.ones(num_non_zero_rows, dtype=int) # Specify which rows will have non-zero entries in the sparse matrix. row_indices = np.arange(num_rows)[represented_ids] # Map the unique id's to their respective columns unique_id_dict = dict(zip(unique_ids, np.arange(num_cols))) # Figure out the column indices of the non-zero entries, and do so in a way # that avoids a key error (i.e. only look up ids that are represented) col_indices =\ np.array([unique_id_dict[x] for x in id_array[represented_ids]]) # Create and return the sparse matrix return csr_matrix((data, (row_indices, col_indices)), shape=(num_rows, num_cols))
python
def create_sparse_mapping(id_array, unique_ids=None): """ Will create a scipy.sparse compressed-sparse-row matrix that maps each row represented by an element in id_array to the corresponding value of the unique ids in id_array. Parameters ---------- id_array : 1D ndarray of ints. Each element should represent some id related to the corresponding row. unique_ids : 1D ndarray of ints, or None, optional. If not None, each element should be present in `id_array`. The elements in `unique_ids` should be present in the order in which one wishes them to appear in the columns of the resulting sparse array. For the `row_to_obs` and `row_to_mixers` mappings, this should be the order of appearance in `id_array`. If None, then the unique_ids will be created from `id_array`, in the order of their appearance in `id_array`. Returns ------- mapping : 2D scipy.sparse CSR matrix. Will contain only zeros and ones. `mapping[i, j] == 1` where `id_array[i] == unique_ids[j]`. The id's corresponding to each column are given by `unique_ids`. The rows correspond to the elements of `id_array`. """ # Create unique_ids if necessary if unique_ids is None: unique_ids = get_original_order_unique_ids(id_array) # Check function arguments for validity assert isinstance(unique_ids, np.ndarray) assert isinstance(id_array, np.ndarray) assert unique_ids.ndim == 1 assert id_array.ndim == 1 # Figure out which ids in id_array are represented in unique_ids represented_ids = np.in1d(id_array, unique_ids) # Determine the number of rows in id_array that are in unique_ids num_non_zero_rows = represented_ids.sum() # Figure out the dimensions of the resulting sparse matrix num_rows = id_array.size num_cols = unique_ids.size # Specify the non-zero values that will be present in the sparse matrix. data = np.ones(num_non_zero_rows, dtype=int) # Specify which rows will have non-zero entries in the sparse matrix. row_indices = np.arange(num_rows)[represented_ids] # Map the unique id's to their respective columns unique_id_dict = dict(zip(unique_ids, np.arange(num_cols))) # Figure out the column indices of the non-zero entries, and do so in a way # that avoids a key error (i.e. only look up ids that are represented) col_indices =\ np.array([unique_id_dict[x] for x in id_array[represented_ids]]) # Create and return the sparse matrix return csr_matrix((data, (row_indices, col_indices)), shape=(num_rows, num_cols))
[ "def", "create_sparse_mapping", "(", "id_array", ",", "unique_ids", "=", "None", ")", ":", "# Create unique_ids if necessary", "if", "unique_ids", "is", "None", ":", "unique_ids", "=", "get_original_order_unique_ids", "(", "id_array", ")", "# Check function arguments for validity", "assert", "isinstance", "(", "unique_ids", ",", "np", ".", "ndarray", ")", "assert", "isinstance", "(", "id_array", ",", "np", ".", "ndarray", ")", "assert", "unique_ids", ".", "ndim", "==", "1", "assert", "id_array", ".", "ndim", "==", "1", "# Figure out which ids in id_array are represented in unique_ids", "represented_ids", "=", "np", ".", "in1d", "(", "id_array", ",", "unique_ids", ")", "# Determine the number of rows in id_array that are in unique_ids", "num_non_zero_rows", "=", "represented_ids", ".", "sum", "(", ")", "# Figure out the dimensions of the resulting sparse matrix", "num_rows", "=", "id_array", ".", "size", "num_cols", "=", "unique_ids", ".", "size", "# Specify the non-zero values that will be present in the sparse matrix.", "data", "=", "np", ".", "ones", "(", "num_non_zero_rows", ",", "dtype", "=", "int", ")", "# Specify which rows will have non-zero entries in the sparse matrix.", "row_indices", "=", "np", ".", "arange", "(", "num_rows", ")", "[", "represented_ids", "]", "# Map the unique id's to their respective columns", "unique_id_dict", "=", "dict", "(", "zip", "(", "unique_ids", ",", "np", ".", "arange", "(", "num_cols", ")", ")", ")", "# Figure out the column indices of the non-zero entries, and do so in a way", "# that avoids a key error (i.e. only look up ids that are represented)", "col_indices", "=", "np", ".", "array", "(", "[", "unique_id_dict", "[", "x", "]", "for", "x", "in", "id_array", "[", "represented_ids", "]", "]", ")", "# Create and return the sparse matrix", "return", "csr_matrix", "(", "(", "data", ",", "(", "row_indices", ",", "col_indices", ")", ")", ",", "shape", "=", "(", "num_rows", ",", "num_cols", ")", ")" ]
Will create a scipy.sparse compressed-sparse-row matrix that maps each row represented by an element in id_array to the corresponding value of the unique ids in id_array. Parameters ---------- id_array : 1D ndarray of ints. Each element should represent some id related to the corresponding row. unique_ids : 1D ndarray of ints, or None, optional. If not None, each element should be present in `id_array`. The elements in `unique_ids` should be present in the order in which one wishes them to appear in the columns of the resulting sparse array. For the `row_to_obs` and `row_to_mixers` mappings, this should be the order of appearance in `id_array`. If None, then the unique_ids will be created from `id_array`, in the order of their appearance in `id_array`. Returns ------- mapping : 2D scipy.sparse CSR matrix. Will contain only zeros and ones. `mapping[i, j] == 1` where `id_array[i] == unique_ids[j]`. The id's corresponding to each column are given by `unique_ids`. The rows correspond to the elements of `id_array`.
[ "Will", "create", "a", "scipy", ".", "sparse", "compressed", "-", "sparse", "-", "row", "matrix", "that", "maps", "each", "row", "represented", "by", "an", "element", "in", "id_array", "to", "the", "corresponding", "value", "of", "the", "unique", "ids", "in", "id_array", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L776-L832
824
timothyb0912/pylogit
pylogit/choice_tools.py
check_wide_data_for_blank_choices
def check_wide_data_for_blank_choices(choice_col, wide_data): """ Checks `wide_data` for null values in the choice column, and raises a helpful ValueError if null values are found. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that is used to record each observation's choice. wide_data : pandas dataframe. Contains one row for each observation. Should contain `choice_col`. Returns ------- None. """ if wide_data[choice_col].isnull().any(): msg_1 = "One or more of the values in wide_data[choice_col] is null." msg_2 = " Remove null values in the choice column or fill them in." raise ValueError(msg_1 + msg_2) return None
python
def check_wide_data_for_blank_choices(choice_col, wide_data): """ Checks `wide_data` for null values in the choice column, and raises a helpful ValueError if null values are found. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that is used to record each observation's choice. wide_data : pandas dataframe. Contains one row for each observation. Should contain `choice_col`. Returns ------- None. """ if wide_data[choice_col].isnull().any(): msg_1 = "One or more of the values in wide_data[choice_col] is null." msg_2 = " Remove null values in the choice column or fill them in." raise ValueError(msg_1 + msg_2) return None
[ "def", "check_wide_data_for_blank_choices", "(", "choice_col", ",", "wide_data", ")", ":", "if", "wide_data", "[", "choice_col", "]", ".", "isnull", "(", ")", ".", "any", "(", ")", ":", "msg_1", "=", "\"One or more of the values in wide_data[choice_col] is null.\"", "msg_2", "=", "\" Remove null values in the choice column or fill them in.\"", "raise", "ValueError", "(", "msg_1", "+", "msg_2", ")", "return", "None" ]
Checks `wide_data` for null values in the choice column, and raises a helpful ValueError if null values are found. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that is used to record each observation's choice. wide_data : pandas dataframe. Contains one row for each observation. Should contain `choice_col`. Returns ------- None.
[ "Checks", "wide_data", "for", "null", "values", "in", "the", "choice", "column", "and", "raises", "a", "helpful", "ValueError", "if", "null", "values", "are", "found", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1258-L1280
825
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_unique_obs_ids_in_wide_data
def ensure_unique_obs_ids_in_wide_data(obs_id_col, wide_data): """ Ensures that there is one observation per row in wide_data. Raises a helpful ValueError if otherwise. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `obs_id_col` column. Returns ------- None. """ if len(wide_data[obs_id_col].unique()) != wide_data.shape[0]: msg = "The values in wide_data[obs_id_col] are not unique, " msg_2 = "but they need to be." raise ValueError(msg + msg_2) return None
python
def ensure_unique_obs_ids_in_wide_data(obs_id_col, wide_data): """ Ensures that there is one observation per row in wide_data. Raises a helpful ValueError if otherwise. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `obs_id_col` column. Returns ------- None. """ if len(wide_data[obs_id_col].unique()) != wide_data.shape[0]: msg = "The values in wide_data[obs_id_col] are not unique, " msg_2 = "but they need to be." raise ValueError(msg + msg_2) return None
[ "def", "ensure_unique_obs_ids_in_wide_data", "(", "obs_id_col", ",", "wide_data", ")", ":", "if", "len", "(", "wide_data", "[", "obs_id_col", "]", ".", "unique", "(", ")", ")", "!=", "wide_data", ".", "shape", "[", "0", "]", ":", "msg", "=", "\"The values in wide_data[obs_id_col] are not unique, \"", "msg_2", "=", "\"but they need to be.\"", "raise", "ValueError", "(", "msg", "+", "msg_2", ")", "return", "None" ]
Ensures that there is one observation per row in wide_data. Raises a helpful ValueError if otherwise. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `obs_id_col` column. Returns ------- None.
[ "Ensures", "that", "there", "is", "one", "observation", "per", "row", "in", "wide_data", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1283-L1306
826
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_chosen_alternatives_are_in_user_alt_ids
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col, wide_data, availability_vars): """ Ensures that all chosen alternatives in `wide_df` are present in the `availability_vars` dict. Raises a helpful ValueError if not. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `choice_col` column. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. Returns ------- None. """ if not wide_data[choice_col].isin(availability_vars.keys()).all(): msg = "One or more values in wide_data[choice_col] is not in the user " msg_2 = "provided alternative ids in availability_vars.keys()" raise ValueError(msg + msg_2) return None
python
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col, wide_data, availability_vars): """ Ensures that all chosen alternatives in `wide_df` are present in the `availability_vars` dict. Raises a helpful ValueError if not. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `choice_col` column. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. Returns ------- None. """ if not wide_data[choice_col].isin(availability_vars.keys()).all(): msg = "One or more values in wide_data[choice_col] is not in the user " msg_2 = "provided alternative ids in availability_vars.keys()" raise ValueError(msg + msg_2) return None
[ "def", "ensure_chosen_alternatives_are_in_user_alt_ids", "(", "choice_col", ",", "wide_data", ",", "availability_vars", ")", ":", "if", "not", "wide_data", "[", "choice_col", "]", ".", "isin", "(", "availability_vars", ".", "keys", "(", ")", ")", ".", "all", "(", ")", ":", "msg", "=", "\"One or more values in wide_data[choice_col] is not in the user \"", "msg_2", "=", "\"provided alternative ids in availability_vars.keys()\"", "raise", "ValueError", "(", "msg", "+", "msg_2", ")", "return", "None" ]
Ensures that all chosen alternatives in `wide_df` are present in the `availability_vars` dict. Raises a helpful ValueError if not. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `choice_col` column. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. Returns ------- None.
[ "Ensures", "that", "all", "chosen", "alternatives", "in", "wide_df", "are", "present", "in", "the", "availability_vars", "dict", ".", "Raises", "a", "helpful", "ValueError", "if", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1309-L1342
827
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_each_wide_obs_chose_an_available_alternative
def ensure_each_wide_obs_chose_an_available_alternative(obs_id_col, choice_col, availability_vars, wide_data): """ Checks whether or not each observation with a restricted choice set chose an alternative that was personally available to him or her. Will raise a helpful ValueError if this is not the case. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. wide_data : pandas dataframe. Contains one row for each observation. Should have the specified `[obs_id_col, choice_col] + availability_vars.values()` columns. Returns ------- None """ # Determine the various availability values for each observation wide_availability_values = wide_data[list( availability_vars.values())].values # Isolate observations for whom one or more alternatives are unavailable unavailable_condition = ((wide_availability_values == 0).sum(axis=1) .astype(bool)) # Iterate over the observations with one or more unavailable alternatives # Check that each such observation's chosen alternative was available problem_obs = [] for idx, row in wide_data.loc[unavailable_condition].iterrows(): if row.at[availability_vars[row.at[choice_col]]] != 1: problem_obs.append(row.at[obs_id_col]) if problem_obs != []: msg = "The following observations chose unavailable alternatives:\n{}" raise ValueError(msg.format(problem_obs)) return None
python
def ensure_each_wide_obs_chose_an_available_alternative(obs_id_col, choice_col, availability_vars, wide_data): """ Checks whether or not each observation with a restricted choice set chose an alternative that was personally available to him or her. Will raise a helpful ValueError if this is not the case. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. wide_data : pandas dataframe. Contains one row for each observation. Should have the specified `[obs_id_col, choice_col] + availability_vars.values()` columns. Returns ------- None """ # Determine the various availability values for each observation wide_availability_values = wide_data[list( availability_vars.values())].values # Isolate observations for whom one or more alternatives are unavailable unavailable_condition = ((wide_availability_values == 0).sum(axis=1) .astype(bool)) # Iterate over the observations with one or more unavailable alternatives # Check that each such observation's chosen alternative was available problem_obs = [] for idx, row in wide_data.loc[unavailable_condition].iterrows(): if row.at[availability_vars[row.at[choice_col]]] != 1: problem_obs.append(row.at[obs_id_col]) if problem_obs != []: msg = "The following observations chose unavailable alternatives:\n{}" raise ValueError(msg.format(problem_obs)) return None
[ "def", "ensure_each_wide_obs_chose_an_available_alternative", "(", "obs_id_col", ",", "choice_col", ",", "availability_vars", ",", "wide_data", ")", ":", "# Determine the various availability values for each observation", "wide_availability_values", "=", "wide_data", "[", "list", "(", "availability_vars", ".", "values", "(", ")", ")", "]", ".", "values", "# Isolate observations for whom one or more alternatives are unavailable", "unavailable_condition", "=", "(", "(", "wide_availability_values", "==", "0", ")", ".", "sum", "(", "axis", "=", "1", ")", ".", "astype", "(", "bool", ")", ")", "# Iterate over the observations with one or more unavailable alternatives", "# Check that each such observation's chosen alternative was available", "problem_obs", "=", "[", "]", "for", "idx", ",", "row", "in", "wide_data", ".", "loc", "[", "unavailable_condition", "]", ".", "iterrows", "(", ")", ":", "if", "row", ".", "at", "[", "availability_vars", "[", "row", ".", "at", "[", "choice_col", "]", "]", "]", "!=", "1", ":", "problem_obs", ".", "append", "(", "row", ".", "at", "[", "obs_id_col", "]", ")", "if", "problem_obs", "!=", "[", "]", ":", "msg", "=", "\"The following observations chose unavailable alternatives:\\n{}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "problem_obs", ")", ")", "return", "None" ]
Checks whether or not each observation with a restricted choice set chose an alternative that was personally available to him or her. Will raise a helpful ValueError if this is not the case. Parameters ---------- obs_id_col : str. Denotes the column in `wide_data` that contains the observation ID values for each row. choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. wide_data : pandas dataframe. Contains one row for each observation. Should have the specified `[obs_id_col, choice_col] + availability_vars.values()` columns. Returns ------- None
[ "Checks", "whether", "or", "not", "each", "observation", "with", "a", "restricted", "choice", "set", "chose", "an", "alternative", "that", "was", "personally", "available", "to", "him", "or", "her", ".", "Will", "raise", "a", "helpful", "ValueError", "if", "this", "is", "not", "the", "case", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1345-L1397
828
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_all_wide_alt_ids_are_chosen
def ensure_all_wide_alt_ids_are_chosen(choice_col, alt_specific_vars, availability_vars, wide_data): """ Checks to make sure all user-specified alternative id's, both in `alt_specific_vars` and `availability_vars` are observed in the choice column of `wide_data`. """ sorted_alt_ids = np.sort(wide_data[choice_col].unique()) try: problem_ids = [x for x in availability_vars if x not in sorted_alt_ids] problem_type = "availability_vars" assert problem_ids == [] problem_ids = [] for new_column in alt_specific_vars: for alt_id in alt_specific_vars[new_column]: if alt_id not in sorted_alt_ids and alt_id not in problem_ids: problem_ids.append(alt_id) problem_type = "alt_specific_vars" assert problem_ids == [] except AssertionError: msg = "The following alternative ids from {} are not " msg_2 = "observed in wide_data[choice_col]:\n{}" raise ValueError(msg.format(problem_type) + msg_2.format(problem_ids)) return None
python
def ensure_all_wide_alt_ids_are_chosen(choice_col, alt_specific_vars, availability_vars, wide_data): """ Checks to make sure all user-specified alternative id's, both in `alt_specific_vars` and `availability_vars` are observed in the choice column of `wide_data`. """ sorted_alt_ids = np.sort(wide_data[choice_col].unique()) try: problem_ids = [x for x in availability_vars if x not in sorted_alt_ids] problem_type = "availability_vars" assert problem_ids == [] problem_ids = [] for new_column in alt_specific_vars: for alt_id in alt_specific_vars[new_column]: if alt_id not in sorted_alt_ids and alt_id not in problem_ids: problem_ids.append(alt_id) problem_type = "alt_specific_vars" assert problem_ids == [] except AssertionError: msg = "The following alternative ids from {} are not " msg_2 = "observed in wide_data[choice_col]:\n{}" raise ValueError(msg.format(problem_type) + msg_2.format(problem_ids)) return None
[ "def", "ensure_all_wide_alt_ids_are_chosen", "(", "choice_col", ",", "alt_specific_vars", ",", "availability_vars", ",", "wide_data", ")", ":", "sorted_alt_ids", "=", "np", ".", "sort", "(", "wide_data", "[", "choice_col", "]", ".", "unique", "(", ")", ")", "try", ":", "problem_ids", "=", "[", "x", "for", "x", "in", "availability_vars", "if", "x", "not", "in", "sorted_alt_ids", "]", "problem_type", "=", "\"availability_vars\"", "assert", "problem_ids", "==", "[", "]", "problem_ids", "=", "[", "]", "for", "new_column", "in", "alt_specific_vars", ":", "for", "alt_id", "in", "alt_specific_vars", "[", "new_column", "]", ":", "if", "alt_id", "not", "in", "sorted_alt_ids", "and", "alt_id", "not", "in", "problem_ids", ":", "problem_ids", ".", "append", "(", "alt_id", ")", "problem_type", "=", "\"alt_specific_vars\"", "assert", "problem_ids", "==", "[", "]", "except", "AssertionError", ":", "msg", "=", "\"The following alternative ids from {} are not \"", "msg_2", "=", "\"observed in wide_data[choice_col]:\\n{}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "problem_type", ")", "+", "msg_2", ".", "format", "(", "problem_ids", ")", ")", "return", "None" ]
Checks to make sure all user-specified alternative id's, both in `alt_specific_vars` and `availability_vars` are observed in the choice column of `wide_data`.
[ "Checks", "to", "make", "sure", "all", "user", "-", "specified", "alternative", "id", "s", "both", "in", "alt_specific_vars", "and", "availability_vars", "are", "observed", "in", "the", "choice", "column", "of", "wide_data", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1400-L1428
829
timothyb0912/pylogit
pylogit/choice_tools.py
ensure_contiguity_in_observation_rows
def ensure_contiguity_in_observation_rows(obs_id_vector): """ Ensures that all rows pertaining to a given choice situation are located next to one another. Raises a helpful ValueError otherwise. This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id. Parameters ---------- rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. obs_id_vector : 1D ndarray of ints. Should contain the id (i.e. a unique integer) that corresponds to each choice situation in the dataset. Returns ------- None. """ # Check that the choice situation id for each row is larger than or equal # to the choice situation id of the preceding row. contiguity_check_array = (obs_id_vector[1:] - obs_id_vector[:-1]) >= 0 if not contiguity_check_array.all(): problem_ids = obs_id_vector[np.where(~contiguity_check_array)] msg_1 = "All rows pertaining to a given choice situation must be " msg_2 = "contiguous. \nRows pertaining to the following observation " msg_3 = "id's are not contiguous: \n{}" raise ValueError(msg_1 + msg_2 + msg_3.format(problem_ids.tolist())) else: return None
python
def ensure_contiguity_in_observation_rows(obs_id_vector): """ Ensures that all rows pertaining to a given choice situation are located next to one another. Raises a helpful ValueError otherwise. This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id. Parameters ---------- rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. obs_id_vector : 1D ndarray of ints. Should contain the id (i.e. a unique integer) that corresponds to each choice situation in the dataset. Returns ------- None. """ # Check that the choice situation id for each row is larger than or equal # to the choice situation id of the preceding row. contiguity_check_array = (obs_id_vector[1:] - obs_id_vector[:-1]) >= 0 if not contiguity_check_array.all(): problem_ids = obs_id_vector[np.where(~contiguity_check_array)] msg_1 = "All rows pertaining to a given choice situation must be " msg_2 = "contiguous. \nRows pertaining to the following observation " msg_3 = "id's are not contiguous: \n{}" raise ValueError(msg_1 + msg_2 + msg_3.format(problem_ids.tolist())) else: return None
[ "def", "ensure_contiguity_in_observation_rows", "(", "obs_id_vector", ")", ":", "# Check that the choice situation id for each row is larger than or equal", "# to the choice situation id of the preceding row.", "contiguity_check_array", "=", "(", "obs_id_vector", "[", "1", ":", "]", "-", "obs_id_vector", "[", ":", "-", "1", "]", ")", ">=", "0", "if", "not", "contiguity_check_array", ".", "all", "(", ")", ":", "problem_ids", "=", "obs_id_vector", "[", "np", ".", "where", "(", "~", "contiguity_check_array", ")", "]", "msg_1", "=", "\"All rows pertaining to a given choice situation must be \"", "msg_2", "=", "\"contiguous. \\nRows pertaining to the following observation \"", "msg_3", "=", "\"id's are not contiguous: \\n{}\"", "raise", "ValueError", "(", "msg_1", "+", "msg_2", "+", "msg_3", ".", "format", "(", "problem_ids", ".", "tolist", "(", ")", ")", ")", "else", ":", "return", "None" ]
Ensures that all rows pertaining to a given choice situation are located next to one another. Raises a helpful ValueError otherwise. This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id. Parameters ---------- rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. obs_id_vector : 1D ndarray of ints. Should contain the id (i.e. a unique integer) that corresponds to each choice situation in the dataset. Returns ------- None.
[ "Ensures", "that", "all", "rows", "pertaining", "to", "a", "given", "choice", "situation", "are", "located", "next", "to", "one", "another", ".", "Raises", "a", "helpful", "ValueError", "otherwise", ".", "This", "check", "is", "needed", "because", "the", "hessian", "calculation", "function", "requires", "the", "design", "matrix", "to", "have", "contiguity", "in", "rows", "with", "the", "same", "observation", "id", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_tools.py#L1431-L1461
830
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
relate_obs_ids_to_chosen_alts
def relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array): """ Creates a dictionary that relates each unique alternative id to the set of observations ids that chose the given alternative. Parameters ---------- obs_id_array : 1D ndarray of ints. Should be a long-format array of observation ids. Each element should correspond to the unique id of the unit of observation that corresponds to the given row of the long-format data. Note that each unit of observation may have more than one associated choice situation. alt_id_array : 1D ndarray of ints. Should be a long-format array of alternative ids. Each element should denote the unique id of the alternative that corresponds to the given row of the long format data. choice_array : 1D ndarray of ints. Each element should be either a one or a zero, indicating whether the alternative on the given row of the long format data was chosen or not. Returns ------- chosen_alts_to_obs_ids : dict. Each key will be a unique value from `alt_id_array`. Each key's value will be a 1D ndarray that contains the sorted, unique observation ids of those observational units that chose the given alternative. """ # Figure out which units of observation chose each alternative. chosen_alts_to_obs_ids = {} for alt_id in np.sort(np.unique(alt_id_array)): # Determine which observations chose the current alternative. selection_condition =\ np.where((alt_id_array == alt_id) & (choice_array == 1)) # Store the sorted, unique ids that chose the current alternative. chosen_alts_to_obs_ids[alt_id] =\ np.sort(np.unique(obs_id_array[selection_condition])) # Return the desired dictionary. return chosen_alts_to_obs_ids
python
def relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array): """ Creates a dictionary that relates each unique alternative id to the set of observations ids that chose the given alternative. Parameters ---------- obs_id_array : 1D ndarray of ints. Should be a long-format array of observation ids. Each element should correspond to the unique id of the unit of observation that corresponds to the given row of the long-format data. Note that each unit of observation may have more than one associated choice situation. alt_id_array : 1D ndarray of ints. Should be a long-format array of alternative ids. Each element should denote the unique id of the alternative that corresponds to the given row of the long format data. choice_array : 1D ndarray of ints. Each element should be either a one or a zero, indicating whether the alternative on the given row of the long format data was chosen or not. Returns ------- chosen_alts_to_obs_ids : dict. Each key will be a unique value from `alt_id_array`. Each key's value will be a 1D ndarray that contains the sorted, unique observation ids of those observational units that chose the given alternative. """ # Figure out which units of observation chose each alternative. chosen_alts_to_obs_ids = {} for alt_id in np.sort(np.unique(alt_id_array)): # Determine which observations chose the current alternative. selection_condition =\ np.where((alt_id_array == alt_id) & (choice_array == 1)) # Store the sorted, unique ids that chose the current alternative. chosen_alts_to_obs_ids[alt_id] =\ np.sort(np.unique(obs_id_array[selection_condition])) # Return the desired dictionary. return chosen_alts_to_obs_ids
[ "def", "relate_obs_ids_to_chosen_alts", "(", "obs_id_array", ",", "alt_id_array", ",", "choice_array", ")", ":", "# Figure out which units of observation chose each alternative.", "chosen_alts_to_obs_ids", "=", "{", "}", "for", "alt_id", "in", "np", ".", "sort", "(", "np", ".", "unique", "(", "alt_id_array", ")", ")", ":", "# Determine which observations chose the current alternative.", "selection_condition", "=", "np", ".", "where", "(", "(", "alt_id_array", "==", "alt_id", ")", "&", "(", "choice_array", "==", "1", ")", ")", "# Store the sorted, unique ids that chose the current alternative.", "chosen_alts_to_obs_ids", "[", "alt_id", "]", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "obs_id_array", "[", "selection_condition", "]", ")", ")", "# Return the desired dictionary.", "return", "chosen_alts_to_obs_ids" ]
Creates a dictionary that relates each unique alternative id to the set of observations ids that chose the given alternative. Parameters ---------- obs_id_array : 1D ndarray of ints. Should be a long-format array of observation ids. Each element should correspond to the unique id of the unit of observation that corresponds to the given row of the long-format data. Note that each unit of observation may have more than one associated choice situation. alt_id_array : 1D ndarray of ints. Should be a long-format array of alternative ids. Each element should denote the unique id of the alternative that corresponds to the given row of the long format data. choice_array : 1D ndarray of ints. Each element should be either a one or a zero, indicating whether the alternative on the given row of the long format data was chosen or not. Returns ------- chosen_alts_to_obs_ids : dict. Each key will be a unique value from `alt_id_array`. Each key's value will be a 1D ndarray that contains the sorted, unique observation ids of those observational units that chose the given alternative.
[ "Creates", "a", "dictionary", "that", "relates", "each", "unique", "alternative", "id", "to", "the", "set", "of", "observations", "ids", "that", "chose", "the", "given", "alternative", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L13-L55
831
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
create_cross_sectional_bootstrap_samples
def create_cross_sectional_bootstrap_samples(obs_id_array, alt_id_array, choice_array, num_samples, seed=None): """ Determines the unique observations that will be present in each bootstrap sample. This function DOES NOT create the new design matrices or a new long-format dataframe for each bootstrap sample. Note that these will be correct bootstrap samples for cross-sectional datasets. This function will not work correctly for panel datasets. Parameters ---------- obs_id_array : 1D ndarray of ints. Each element should denote a unique observation id for the corresponding row of the long format array. alt_id_array : 1D ndarray of ints. Each element should denote a unique alternative id for the corresponding row of the long format array. choice_array : 1D ndarray of ints. Each element should be a one or a zero. The values should denote a whether or not the corresponding alternative in `alt_id_array` was chosen by the observational unit in the corresponding row of `obs_id_array.` num_samples : int. Denotes the number of bootstrap samples that need to be drawn. seed : non-negative int or None, optional. Denotes the random seed to be used in order to ensure reproducibility of the bootstrap sample generation. Default is None. If None, no seed will be used and the generation of the bootstrap samples will (in general) not be reproducible. Returns ------- ids_per_sample : 2D ndarray. Each row represents a complete bootstrap sample. Each column denotes a selected bootstrap observation that comprises the bootstrap sample. The elements of the array denote the observation ids of the chosen observational units. """ # Determine the units of observation that chose each alternative. chosen_alts_to_obs_ids =\ relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array) # Determine the number of unique units of observation per group and overall num_obs_per_group, tot_num_obs =\ get_num_obs_choosing_each_alternative(chosen_alts_to_obs_ids) # Initialize the array that will store the observation ids for each sample ids_per_sample = np.empty((num_samples, tot_num_obs), dtype=float) if seed is not None: # Check the validity of the seed argument. if not isinstance(seed, int): msg = "`boot_seed` MUST be an int." raise ValueError(msg) # If desiring reproducibility, set the random seed within numpy np.random.seed(seed) # Initialize a variable to keep track of what column we're on. col_idx = 0 for alt_id in num_obs_per_group: # Get the set of observations that chose the current alternative. relevant_ids = chosen_alts_to_obs_ids[alt_id] # Determine the number of needed resampled ids. resample_size = num_obs_per_group[alt_id] # Resample, with replacement, observations who chose this alternative. current_ids = (np.random.choice(relevant_ids, size=resample_size * num_samples, replace=True) .reshape((num_samples, resample_size))) # Determine the last column index to use when storing the resampled ids end_col = col_idx + resample_size # Assign the sampled ids to the correct columns of ids_per_sample ids_per_sample[:, col_idx:end_col] = current_ids # Update the column index col_idx += resample_size # Return the resampled observation ids. return ids_per_sample
python
def create_cross_sectional_bootstrap_samples(obs_id_array, alt_id_array, choice_array, num_samples, seed=None): """ Determines the unique observations that will be present in each bootstrap sample. This function DOES NOT create the new design matrices or a new long-format dataframe for each bootstrap sample. Note that these will be correct bootstrap samples for cross-sectional datasets. This function will not work correctly for panel datasets. Parameters ---------- obs_id_array : 1D ndarray of ints. Each element should denote a unique observation id for the corresponding row of the long format array. alt_id_array : 1D ndarray of ints. Each element should denote a unique alternative id for the corresponding row of the long format array. choice_array : 1D ndarray of ints. Each element should be a one or a zero. The values should denote a whether or not the corresponding alternative in `alt_id_array` was chosen by the observational unit in the corresponding row of `obs_id_array.` num_samples : int. Denotes the number of bootstrap samples that need to be drawn. seed : non-negative int or None, optional. Denotes the random seed to be used in order to ensure reproducibility of the bootstrap sample generation. Default is None. If None, no seed will be used and the generation of the bootstrap samples will (in general) not be reproducible. Returns ------- ids_per_sample : 2D ndarray. Each row represents a complete bootstrap sample. Each column denotes a selected bootstrap observation that comprises the bootstrap sample. The elements of the array denote the observation ids of the chosen observational units. """ # Determine the units of observation that chose each alternative. chosen_alts_to_obs_ids =\ relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array) # Determine the number of unique units of observation per group and overall num_obs_per_group, tot_num_obs =\ get_num_obs_choosing_each_alternative(chosen_alts_to_obs_ids) # Initialize the array that will store the observation ids for each sample ids_per_sample = np.empty((num_samples, tot_num_obs), dtype=float) if seed is not None: # Check the validity of the seed argument. if not isinstance(seed, int): msg = "`boot_seed` MUST be an int." raise ValueError(msg) # If desiring reproducibility, set the random seed within numpy np.random.seed(seed) # Initialize a variable to keep track of what column we're on. col_idx = 0 for alt_id in num_obs_per_group: # Get the set of observations that chose the current alternative. relevant_ids = chosen_alts_to_obs_ids[alt_id] # Determine the number of needed resampled ids. resample_size = num_obs_per_group[alt_id] # Resample, with replacement, observations who chose this alternative. current_ids = (np.random.choice(relevant_ids, size=resample_size * num_samples, replace=True) .reshape((num_samples, resample_size))) # Determine the last column index to use when storing the resampled ids end_col = col_idx + resample_size # Assign the sampled ids to the correct columns of ids_per_sample ids_per_sample[:, col_idx:end_col] = current_ids # Update the column index col_idx += resample_size # Return the resampled observation ids. return ids_per_sample
[ "def", "create_cross_sectional_bootstrap_samples", "(", "obs_id_array", ",", "alt_id_array", ",", "choice_array", ",", "num_samples", ",", "seed", "=", "None", ")", ":", "# Determine the units of observation that chose each alternative.", "chosen_alts_to_obs_ids", "=", "relate_obs_ids_to_chosen_alts", "(", "obs_id_array", ",", "alt_id_array", ",", "choice_array", ")", "# Determine the number of unique units of observation per group and overall", "num_obs_per_group", ",", "tot_num_obs", "=", "get_num_obs_choosing_each_alternative", "(", "chosen_alts_to_obs_ids", ")", "# Initialize the array that will store the observation ids for each sample", "ids_per_sample", "=", "np", ".", "empty", "(", "(", "num_samples", ",", "tot_num_obs", ")", ",", "dtype", "=", "float", ")", "if", "seed", "is", "not", "None", ":", "# Check the validity of the seed argument.", "if", "not", "isinstance", "(", "seed", ",", "int", ")", ":", "msg", "=", "\"`boot_seed` MUST be an int.\"", "raise", "ValueError", "(", "msg", ")", "# If desiring reproducibility, set the random seed within numpy", "np", ".", "random", ".", "seed", "(", "seed", ")", "# Initialize a variable to keep track of what column we're on.", "col_idx", "=", "0", "for", "alt_id", "in", "num_obs_per_group", ":", "# Get the set of observations that chose the current alternative.", "relevant_ids", "=", "chosen_alts_to_obs_ids", "[", "alt_id", "]", "# Determine the number of needed resampled ids.", "resample_size", "=", "num_obs_per_group", "[", "alt_id", "]", "# Resample, with replacement, observations who chose this alternative.", "current_ids", "=", "(", "np", ".", "random", ".", "choice", "(", "relevant_ids", ",", "size", "=", "resample_size", "*", "num_samples", ",", "replace", "=", "True", ")", ".", "reshape", "(", "(", "num_samples", ",", "resample_size", ")", ")", ")", "# Determine the last column index to use when storing the resampled ids", "end_col", "=", "col_idx", "+", "resample_size", "# Assign the sampled ids to the correct columns of ids_per_sample", "ids_per_sample", "[", ":", ",", "col_idx", ":", "end_col", "]", "=", "current_ids", "# Update the column index", "col_idx", "+=", "resample_size", "# Return the resampled observation ids.", "return", "ids_per_sample" ]
Determines the unique observations that will be present in each bootstrap sample. This function DOES NOT create the new design matrices or a new long-format dataframe for each bootstrap sample. Note that these will be correct bootstrap samples for cross-sectional datasets. This function will not work correctly for panel datasets. Parameters ---------- obs_id_array : 1D ndarray of ints. Each element should denote a unique observation id for the corresponding row of the long format array. alt_id_array : 1D ndarray of ints. Each element should denote a unique alternative id for the corresponding row of the long format array. choice_array : 1D ndarray of ints. Each element should be a one or a zero. The values should denote a whether or not the corresponding alternative in `alt_id_array` was chosen by the observational unit in the corresponding row of `obs_id_array.` num_samples : int. Denotes the number of bootstrap samples that need to be drawn. seed : non-negative int or None, optional. Denotes the random seed to be used in order to ensure reproducibility of the bootstrap sample generation. Default is None. If None, no seed will be used and the generation of the bootstrap samples will (in general) not be reproducible. Returns ------- ids_per_sample : 2D ndarray. Each row represents a complete bootstrap sample. Each column denotes a selected bootstrap observation that comprises the bootstrap sample. The elements of the array denote the observation ids of the chosen observational units.
[ "Determines", "the", "unique", "observations", "that", "will", "be", "present", "in", "each", "bootstrap", "sample", ".", "This", "function", "DOES", "NOT", "create", "the", "new", "design", "matrices", "or", "a", "new", "long", "-", "format", "dataframe", "for", "each", "bootstrap", "sample", ".", "Note", "that", "these", "will", "be", "correct", "bootstrap", "samples", "for", "cross", "-", "sectional", "datasets", ".", "This", "function", "will", "not", "work", "correctly", "for", "panel", "datasets", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L95-L177
832
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
create_bootstrap_id_array
def create_bootstrap_id_array(obs_id_per_sample): """ Creates a 2D ndarray that contains the 'bootstrap ids' for each replication of each unit of observation that is an the set of bootstrap samples. Parameters ---------- obs_id_per_sample : 2D ndarray of ints. Should have one row for each bootsrap sample. Should have one column for each observational unit that is serving as a new bootstrap observational unit. Returns ------- bootstrap_id_array : 2D ndarray of ints. Will have the same shape as `obs_id_per_sample`. Each element will denote the fake observational id in the new bootstrap dataset. """ # Determine the shape of the object to be returned. n_rows, n_cols = obs_id_per_sample.shape # Create the array of bootstrap ids. bootstrap_id_array =\ np.tile(np.arange(n_cols) + 1, n_rows).reshape((n_rows, n_cols)) # Return the desired object return bootstrap_id_array
python
def create_bootstrap_id_array(obs_id_per_sample): """ Creates a 2D ndarray that contains the 'bootstrap ids' for each replication of each unit of observation that is an the set of bootstrap samples. Parameters ---------- obs_id_per_sample : 2D ndarray of ints. Should have one row for each bootsrap sample. Should have one column for each observational unit that is serving as a new bootstrap observational unit. Returns ------- bootstrap_id_array : 2D ndarray of ints. Will have the same shape as `obs_id_per_sample`. Each element will denote the fake observational id in the new bootstrap dataset. """ # Determine the shape of the object to be returned. n_rows, n_cols = obs_id_per_sample.shape # Create the array of bootstrap ids. bootstrap_id_array =\ np.tile(np.arange(n_cols) + 1, n_rows).reshape((n_rows, n_cols)) # Return the desired object return bootstrap_id_array
[ "def", "create_bootstrap_id_array", "(", "obs_id_per_sample", ")", ":", "# Determine the shape of the object to be returned.", "n_rows", ",", "n_cols", "=", "obs_id_per_sample", ".", "shape", "# Create the array of bootstrap ids.", "bootstrap_id_array", "=", "np", ".", "tile", "(", "np", ".", "arange", "(", "n_cols", ")", "+", "1", ",", "n_rows", ")", ".", "reshape", "(", "(", "n_rows", ",", "n_cols", ")", ")", "# Return the desired object", "return", "bootstrap_id_array" ]
Creates a 2D ndarray that contains the 'bootstrap ids' for each replication of each unit of observation that is an the set of bootstrap samples. Parameters ---------- obs_id_per_sample : 2D ndarray of ints. Should have one row for each bootsrap sample. Should have one column for each observational unit that is serving as a new bootstrap observational unit. Returns ------- bootstrap_id_array : 2D ndarray of ints. Will have the same shape as `obs_id_per_sample`. Each element will denote the fake observational id in the new bootstrap dataset.
[ "Creates", "a", "2D", "ndarray", "that", "contains", "the", "bootstrap", "ids", "for", "each", "replication", "of", "each", "unit", "of", "observation", "that", "is", "an", "the", "set", "of", "bootstrap", "samples", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L180-L204
833
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
check_column_existence
def check_column_existence(col_name, df, presence=True): """ Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None. """ if presence: if col_name not in df.columns: msg = "Ensure that `{}` is in `df.columns`." raise ValueError(msg.format(col_name)) else: if col_name in df.columns: msg = "Ensure that `{}` is not in `df.columns`." raise ValueError(msg.format(col_name)) return None
python
def check_column_existence(col_name, df, presence=True): """ Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None. """ if presence: if col_name not in df.columns: msg = "Ensure that `{}` is in `df.columns`." raise ValueError(msg.format(col_name)) else: if col_name in df.columns: msg = "Ensure that `{}` is not in `df.columns`." raise ValueError(msg.format(col_name)) return None
[ "def", "check_column_existence", "(", "col_name", ",", "df", ",", "presence", "=", "True", ")", ":", "if", "presence", ":", "if", "col_name", "not", "in", "df", ".", "columns", ":", "msg", "=", "\"Ensure that `{}` is in `df.columns`.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "col_name", ")", ")", "else", ":", "if", "col_name", "in", "df", ".", "columns", ":", "msg", "=", "\"Ensure that `{}` is not in `df.columns`.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "col_name", ")", ")", "return", "None" ]
Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None.
[ "Checks", "whether", "or", "not", "col_name", "is", "in", "df", "and", "raises", "a", "helpful", "error", "msg", "if", "the", "desired", "condition", "is", "not", "met", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L245-L273
834
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
ensure_resampled_obs_ids_in_df
def ensure_resampled_obs_ids_in_df(resampled_obs_ids, orig_obs_id_array): """ Checks whether all ids in `resampled_obs_ids` are in `orig_obs_id_array`. Raises a helpful ValueError if not. Parameters ---------- resampled_obs_ids : 1D ndarray of ints. Should contain the observation ids of the observational units that will be used in the current bootstrap sample. orig_obs_id_array : 1D ndarray of ints. Should countain the observation ids of the observational units in the original dataframe containing the data for this model. Returns ------- None. """ if not np.in1d(resampled_obs_ids, orig_obs_id_array).all(): msg =\ "All values in `resampled_obs_ids` MUST be in `orig_obs_id_array`." raise ValueError(msg) return None
python
def ensure_resampled_obs_ids_in_df(resampled_obs_ids, orig_obs_id_array): """ Checks whether all ids in `resampled_obs_ids` are in `orig_obs_id_array`. Raises a helpful ValueError if not. Parameters ---------- resampled_obs_ids : 1D ndarray of ints. Should contain the observation ids of the observational units that will be used in the current bootstrap sample. orig_obs_id_array : 1D ndarray of ints. Should countain the observation ids of the observational units in the original dataframe containing the data for this model. Returns ------- None. """ if not np.in1d(resampled_obs_ids, orig_obs_id_array).all(): msg =\ "All values in `resampled_obs_ids` MUST be in `orig_obs_id_array`." raise ValueError(msg) return None
[ "def", "ensure_resampled_obs_ids_in_df", "(", "resampled_obs_ids", ",", "orig_obs_id_array", ")", ":", "if", "not", "np", ".", "in1d", "(", "resampled_obs_ids", ",", "orig_obs_id_array", ")", ".", "all", "(", ")", ":", "msg", "=", "\"All values in `resampled_obs_ids` MUST be in `orig_obs_id_array`.\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Checks whether all ids in `resampled_obs_ids` are in `orig_obs_id_array`. Raises a helpful ValueError if not. Parameters ---------- resampled_obs_ids : 1D ndarray of ints. Should contain the observation ids of the observational units that will be used in the current bootstrap sample. orig_obs_id_array : 1D ndarray of ints. Should countain the observation ids of the observational units in the original dataframe containing the data for this model. Returns ------- None.
[ "Checks", "whether", "all", "ids", "in", "resampled_obs_ids", "are", "in", "orig_obs_id_array", ".", "Raises", "a", "helpful", "ValueError", "if", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L276-L298
835
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
create_bootstrap_dataframe
def create_bootstrap_dataframe(orig_df, obs_id_col, resampled_obs_ids_1d, groupby_dict, boot_id_col="bootstrap_id"): """ Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. resampled_obs_ids_1d : 1D ndarray of ints. Each value should represent the alternative id of a given bootstrap replicate. groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. boot_id_col : str, optional. Denotes the new column that will be created to specify the bootstrap observation ids for choice model estimation. Returns ------- bootstrap_df : pandas Dataframe. Will contain all the same columns as `orig_df` as well as the additional `boot_id_col`. For each value in `resampled_obs_ids_1d`, `bootstrap_df` will contain the long format rows from `orig_df` that have the given observation id. """ # Check the validity of the passed arguments. check_column_existence(obs_id_col, orig_df, presence=True) check_column_existence(boot_id_col, orig_df, presence=False) # Alias the observation id column obs_id_values = orig_df[obs_id_col].values # Check the validity of the resampled observation ids. ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values) # Initialize a list to store the component dataframes that will be # concatenated to form the final bootstrap_df component_dfs = [] # Populate component_dfs for boot_id, obs_id in enumerate(resampled_obs_ids_1d): # Extract the dataframe that we desire. extracted_df = groupby_dict[obs_id].copy() # Add the bootstrap id value. extracted_df[boot_id_col] = boot_id + 1 # Store the component dataframe component_dfs.append(extracted_df) # Create and return the desired dataframe. bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True) return bootstrap_df
python
def create_bootstrap_dataframe(orig_df, obs_id_col, resampled_obs_ids_1d, groupby_dict, boot_id_col="bootstrap_id"): """ Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. resampled_obs_ids_1d : 1D ndarray of ints. Each value should represent the alternative id of a given bootstrap replicate. groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. boot_id_col : str, optional. Denotes the new column that will be created to specify the bootstrap observation ids for choice model estimation. Returns ------- bootstrap_df : pandas Dataframe. Will contain all the same columns as `orig_df` as well as the additional `boot_id_col`. For each value in `resampled_obs_ids_1d`, `bootstrap_df` will contain the long format rows from `orig_df` that have the given observation id. """ # Check the validity of the passed arguments. check_column_existence(obs_id_col, orig_df, presence=True) check_column_existence(boot_id_col, orig_df, presence=False) # Alias the observation id column obs_id_values = orig_df[obs_id_col].values # Check the validity of the resampled observation ids. ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values) # Initialize a list to store the component dataframes that will be # concatenated to form the final bootstrap_df component_dfs = [] # Populate component_dfs for boot_id, obs_id in enumerate(resampled_obs_ids_1d): # Extract the dataframe that we desire. extracted_df = groupby_dict[obs_id].copy() # Add the bootstrap id value. extracted_df[boot_id_col] = boot_id + 1 # Store the component dataframe component_dfs.append(extracted_df) # Create and return the desired dataframe. bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True) return bootstrap_df
[ "def", "create_bootstrap_dataframe", "(", "orig_df", ",", "obs_id_col", ",", "resampled_obs_ids_1d", ",", "groupby_dict", ",", "boot_id_col", "=", "\"bootstrap_id\"", ")", ":", "# Check the validity of the passed arguments.", "check_column_existence", "(", "obs_id_col", ",", "orig_df", ",", "presence", "=", "True", ")", "check_column_existence", "(", "boot_id_col", ",", "orig_df", ",", "presence", "=", "False", ")", "# Alias the observation id column", "obs_id_values", "=", "orig_df", "[", "obs_id_col", "]", ".", "values", "# Check the validity of the resampled observation ids.", "ensure_resampled_obs_ids_in_df", "(", "resampled_obs_ids_1d", ",", "obs_id_values", ")", "# Initialize a list to store the component dataframes that will be", "# concatenated to form the final bootstrap_df", "component_dfs", "=", "[", "]", "# Populate component_dfs", "for", "boot_id", ",", "obs_id", "in", "enumerate", "(", "resampled_obs_ids_1d", ")", ":", "# Extract the dataframe that we desire.", "extracted_df", "=", "groupby_dict", "[", "obs_id", "]", ".", "copy", "(", ")", "# Add the bootstrap id value.", "extracted_df", "[", "boot_id_col", "]", "=", "boot_id", "+", "1", "# Store the component dataframe", "component_dfs", ".", "append", "(", "extracted_df", ")", "# Create and return the desired dataframe.", "bootstrap_df", "=", "pd", ".", "concat", "(", "component_dfs", ",", "axis", "=", "0", ",", "ignore_index", "=", "True", ")", "return", "bootstrap_df" ]
Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. resampled_obs_ids_1d : 1D ndarray of ints. Each value should represent the alternative id of a given bootstrap replicate. groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. boot_id_col : str, optional. Denotes the new column that will be created to specify the bootstrap observation ids for choice model estimation. Returns ------- bootstrap_df : pandas Dataframe. Will contain all the same columns as `orig_df` as well as the additional `boot_id_col`. For each value in `resampled_obs_ids_1d`, `bootstrap_df` will contain the long format rows from `orig_df` that have the given observation id.
[ "Will", "create", "the", "altered", "dataframe", "of", "data", "needed", "to", "estimate", "a", "choice", "model", "with", "the", "particular", "observations", "that", "belong", "to", "the", "current", "bootstrap", "sample", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L301-L360
836
timothyb0912/pylogit
pylogit/bootstrap.py
get_param_names
def get_param_names(model_obj): """ Extracts all the names to be displayed for the estimated parameters. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. Returns ------- all_names : list of strings. There will be one element for each estimated parameter. The order of the parameter names will be `['nest_parameters', 'shape_parameters', 'outside_intercepts', 'index_coefficients']`. """ # Get the index coefficient names all_names = deepcopy(model_obj.ind_var_names) # Add the intercept names if any exist if model_obj.intercept_names is not None: all_names = model_obj.intercept_names + all_names # Add the shape names if any exist if model_obj.shape_names is not None: all_names = model_obj.shape_names + all_names # Add the nest names if any exist if model_obj.nest_names is not None: all_names = model_obj.nest_names + all_names return all_names
python
def get_param_names(model_obj): """ Extracts all the names to be displayed for the estimated parameters. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. Returns ------- all_names : list of strings. There will be one element for each estimated parameter. The order of the parameter names will be `['nest_parameters', 'shape_parameters', 'outside_intercepts', 'index_coefficients']`. """ # Get the index coefficient names all_names = deepcopy(model_obj.ind_var_names) # Add the intercept names if any exist if model_obj.intercept_names is not None: all_names = model_obj.intercept_names + all_names # Add the shape names if any exist if model_obj.shape_names is not None: all_names = model_obj.shape_names + all_names # Add the nest names if any exist if model_obj.nest_names is not None: all_names = model_obj.nest_names + all_names return all_names
[ "def", "get_param_names", "(", "model_obj", ")", ":", "# Get the index coefficient names", "all_names", "=", "deepcopy", "(", "model_obj", ".", "ind_var_names", ")", "# Add the intercept names if any exist", "if", "model_obj", ".", "intercept_names", "is", "not", "None", ":", "all_names", "=", "model_obj", ".", "intercept_names", "+", "all_names", "# Add the shape names if any exist", "if", "model_obj", ".", "shape_names", "is", "not", "None", ":", "all_names", "=", "model_obj", ".", "shape_names", "+", "all_names", "# Add the nest names if any exist", "if", "model_obj", ".", "nest_names", "is", "not", "None", ":", "all_names", "=", "model_obj", ".", "nest_names", "+", "all_names", "return", "all_names" ]
Extracts all the names to be displayed for the estimated parameters. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. Returns ------- all_names : list of strings. There will be one element for each estimated parameter. The order of the parameter names will be `['nest_parameters', 'shape_parameters', 'outside_intercepts', 'index_coefficients']`.
[ "Extracts", "all", "the", "names", "to", "be", "displayed", "for", "the", "estimated", "parameters", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L46-L75
837
timothyb0912/pylogit
pylogit/bootstrap.py
get_param_list_for_prediction
def get_param_list_for_prediction(model_obj, replicates): """ Create the `param_list` argument for use with `model_obj.predict`. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. This model should have already undergone a complete estimation process. I.e. its `fit_mle` method should have been called without `just_point=True`. replicates : 2D ndarray. Should represent the set of parameter values that we now wish to partition for use with the `model_obj.predict` method. Returns ------- param_list : list. Contains four elements, each being a numpy array. Either all of the arrays should be 1D or all of the arrays should be 2D. If 2D, the arrays should have the same number of columns. Each column being a particular set of parameter values that one wants to predict with. The first element in the list should be the index coefficients. The second element should contain the 'outside' intercept parameters if there are any, or None otherwise. The third element should contain the shape parameters if there are any or None otherwise. The fourth element should contain the nest coefficients if there are any or None otherwise. Default == None. """ # Check the validity of the passed arguments ensure_samples_is_ndim_ndarray(replicates, ndim=2, name='replicates') # Determine the number of index coefficients, outside intercepts, # shape parameters, and nest parameters num_idx_coefs = len(model_obj.ind_var_names) intercept_names = model_obj.intercept_names num_outside_intercepts =\ 0 if intercept_names is None else len(intercept_names) shape_names = model_obj.shape_names num_shapes = 0 if shape_names is None else len(shape_names) nest_names = model_obj.nest_names num_nests = 0 if nest_names is None else len(nest_names) parameter_numbers =\ [num_nests, num_shapes, num_outside_intercepts, num_idx_coefs] current_idx = 0 param_list = [] for param_num in parameter_numbers: if param_num == 0: param_list.insert(0, None) continue upper_idx = current_idx + param_num param_list.insert(0, replicates[:, current_idx:upper_idx].T) current_idx += param_num return param_list
python
def get_param_list_for_prediction(model_obj, replicates): """ Create the `param_list` argument for use with `model_obj.predict`. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. This model should have already undergone a complete estimation process. I.e. its `fit_mle` method should have been called without `just_point=True`. replicates : 2D ndarray. Should represent the set of parameter values that we now wish to partition for use with the `model_obj.predict` method. Returns ------- param_list : list. Contains four elements, each being a numpy array. Either all of the arrays should be 1D or all of the arrays should be 2D. If 2D, the arrays should have the same number of columns. Each column being a particular set of parameter values that one wants to predict with. The first element in the list should be the index coefficients. The second element should contain the 'outside' intercept parameters if there are any, or None otherwise. The third element should contain the shape parameters if there are any or None otherwise. The fourth element should contain the nest coefficients if there are any or None otherwise. Default == None. """ # Check the validity of the passed arguments ensure_samples_is_ndim_ndarray(replicates, ndim=2, name='replicates') # Determine the number of index coefficients, outside intercepts, # shape parameters, and nest parameters num_idx_coefs = len(model_obj.ind_var_names) intercept_names = model_obj.intercept_names num_outside_intercepts =\ 0 if intercept_names is None else len(intercept_names) shape_names = model_obj.shape_names num_shapes = 0 if shape_names is None else len(shape_names) nest_names = model_obj.nest_names num_nests = 0 if nest_names is None else len(nest_names) parameter_numbers =\ [num_nests, num_shapes, num_outside_intercepts, num_idx_coefs] current_idx = 0 param_list = [] for param_num in parameter_numbers: if param_num == 0: param_list.insert(0, None) continue upper_idx = current_idx + param_num param_list.insert(0, replicates[:, current_idx:upper_idx].T) current_idx += param_num return param_list
[ "def", "get_param_list_for_prediction", "(", "model_obj", ",", "replicates", ")", ":", "# Check the validity of the passed arguments", "ensure_samples_is_ndim_ndarray", "(", "replicates", ",", "ndim", "=", "2", ",", "name", "=", "'replicates'", ")", "# Determine the number of index coefficients, outside intercepts,", "# shape parameters, and nest parameters", "num_idx_coefs", "=", "len", "(", "model_obj", ".", "ind_var_names", ")", "intercept_names", "=", "model_obj", ".", "intercept_names", "num_outside_intercepts", "=", "0", "if", "intercept_names", "is", "None", "else", "len", "(", "intercept_names", ")", "shape_names", "=", "model_obj", ".", "shape_names", "num_shapes", "=", "0", "if", "shape_names", "is", "None", "else", "len", "(", "shape_names", ")", "nest_names", "=", "model_obj", ".", "nest_names", "num_nests", "=", "0", "if", "nest_names", "is", "None", "else", "len", "(", "nest_names", ")", "parameter_numbers", "=", "[", "num_nests", ",", "num_shapes", ",", "num_outside_intercepts", ",", "num_idx_coefs", "]", "current_idx", "=", "0", "param_list", "=", "[", "]", "for", "param_num", "in", "parameter_numbers", ":", "if", "param_num", "==", "0", ":", "param_list", ".", "insert", "(", "0", ",", "None", ")", "continue", "upper_idx", "=", "current_idx", "+", "param_num", "param_list", ".", "insert", "(", "0", ",", "replicates", "[", ":", ",", "current_idx", ":", "upper_idx", "]", ".", "T", ")", "current_idx", "+=", "param_num", "return", "param_list" ]
Create the `param_list` argument for use with `model_obj.predict`. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. This model should have already undergone a complete estimation process. I.e. its `fit_mle` method should have been called without `just_point=True`. replicates : 2D ndarray. Should represent the set of parameter values that we now wish to partition for use with the `model_obj.predict` method. Returns ------- param_list : list. Contains four elements, each being a numpy array. Either all of the arrays should be 1D or all of the arrays should be 2D. If 2D, the arrays should have the same number of columns. Each column being a particular set of parameter values that one wants to predict with. The first element in the list should be the index coefficients. The second element should contain the 'outside' intercept parameters if there are any, or None otherwise. The third element should contain the shape parameters if there are any or None otherwise. The fourth element should contain the nest coefficients if there are any or None otherwise. Default == None.
[ "Create", "the", "param_list", "argument", "for", "use", "with", "model_obj", ".", "predict", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L78-L135
838
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.generate_bootstrap_replicates
def generate_bootstrap_replicates(self, num_samples, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None, boot_seed=None, weights=None): """ Generates the bootstrap replicates for one's given model and dataset. Parameters ---------- num_samples : positive int. Specifies the number of bootstrap samples that are to be drawn. mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. boot_seed = non-negative int or None, optional. Denotes the random seed to be used when generating the bootstrap samples. If None, the sample generation process will generally be non-reproducible. Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Check the passed arguments for validity. # Create an array of the observation ids obs_id_array = self.model_obj.data[self.model_obj.obs_id_col].values # Alias the alternative IDs and the Choice Array alt_id_array = self.model_obj.alt_IDs choice_array = self.model_obj.choices # Determine how many parameters are being estimated. num_params = self.mle_params.shape[0] # Figure out which observations are in each bootstrap sample. obs_id_per_sample =\ bs.create_cross_sectional_bootstrap_samples(obs_id_array, alt_id_array, choice_array, num_samples, seed=boot_seed) # Get the dictionary of sub-dataframes for each observation id dfs_by_obs_id =\ bs.create_deepcopied_groupby_dict(self.model_obj.data, self.model_obj.obs_id_col) # Create a column name for the bootstrap id columns. boot_id_col = "bootstrap_id" # Initialize an array to store the bootstrapped point estimates. point_estimates = np.empty((num_samples, num_params), dtype=float) # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(num_samples), desc="Creating Bootstrap Replicates", total=num_samples) # Iterate through the bootstrap samples and perform the MLE for row in iterable_for_iteration: # Get the bootstrapped dataframe bootstrap_df =\ bs.create_bootstrap_dataframe(self.model_obj.data, self.model_obj.obs_id_col, obs_id_per_sample[row, :], dfs_by_obs_id, boot_id_col=boot_id_col) # Go through the necessary estimation routine to bootstrap the MLE. current_results =\ retrieve_point_est(self.model_obj, bootstrap_df, boot_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the bootstrapped point estimate. point_estimates[row] = current_results["x"] # Store the point estimates as a pandas dataframe self.bootstrap_replicates =\ pd.DataFrame(point_estimates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None
python
def generate_bootstrap_replicates(self, num_samples, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None, boot_seed=None, weights=None): """ Generates the bootstrap replicates for one's given model and dataset. Parameters ---------- num_samples : positive int. Specifies the number of bootstrap samples that are to be drawn. mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. boot_seed = non-negative int or None, optional. Denotes the random seed to be used when generating the bootstrap samples. If None, the sample generation process will generally be non-reproducible. Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Check the passed arguments for validity. # Create an array of the observation ids obs_id_array = self.model_obj.data[self.model_obj.obs_id_col].values # Alias the alternative IDs and the Choice Array alt_id_array = self.model_obj.alt_IDs choice_array = self.model_obj.choices # Determine how many parameters are being estimated. num_params = self.mle_params.shape[0] # Figure out which observations are in each bootstrap sample. obs_id_per_sample =\ bs.create_cross_sectional_bootstrap_samples(obs_id_array, alt_id_array, choice_array, num_samples, seed=boot_seed) # Get the dictionary of sub-dataframes for each observation id dfs_by_obs_id =\ bs.create_deepcopied_groupby_dict(self.model_obj.data, self.model_obj.obs_id_col) # Create a column name for the bootstrap id columns. boot_id_col = "bootstrap_id" # Initialize an array to store the bootstrapped point estimates. point_estimates = np.empty((num_samples, num_params), dtype=float) # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(num_samples), desc="Creating Bootstrap Replicates", total=num_samples) # Iterate through the bootstrap samples and perform the MLE for row in iterable_for_iteration: # Get the bootstrapped dataframe bootstrap_df =\ bs.create_bootstrap_dataframe(self.model_obj.data, self.model_obj.obs_id_col, obs_id_per_sample[row, :], dfs_by_obs_id, boot_id_col=boot_id_col) # Go through the necessary estimation routine to bootstrap the MLE. current_results =\ retrieve_point_est(self.model_obj, bootstrap_df, boot_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the bootstrapped point estimate. point_estimates[row] = current_results["x"] # Store the point estimates as a pandas dataframe self.bootstrap_replicates =\ pd.DataFrame(point_estimates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None
[ "def", "generate_bootstrap_replicates", "(", "self", ",", "num_samples", ",", "mnl_obj", "=", "None", ",", "mnl_init_vals", "=", "None", ",", "mnl_fit_kwargs", "=", "None", ",", "extract_init_vals", "=", "None", ",", "print_res", "=", "False", ",", "method", "=", "\"BFGS\"", ",", "loss_tol", "=", "1e-06", ",", "gradient_tol", "=", "1e-06", ",", "maxiter", "=", "1000", ",", "ridge", "=", "None", ",", "constrained_pos", "=", "None", ",", "boot_seed", "=", "None", ",", "weights", "=", "None", ")", ":", "print", "(", "\"Generating Bootstrap Replicates\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Check the passed arguments for validity.", "# Create an array of the observation ids", "obs_id_array", "=", "self", ".", "model_obj", ".", "data", "[", "self", ".", "model_obj", ".", "obs_id_col", "]", ".", "values", "# Alias the alternative IDs and the Choice Array", "alt_id_array", "=", "self", ".", "model_obj", ".", "alt_IDs", "choice_array", "=", "self", ".", "model_obj", ".", "choices", "# Determine how many parameters are being estimated.", "num_params", "=", "self", ".", "mle_params", ".", "shape", "[", "0", "]", "# Figure out which observations are in each bootstrap sample.", "obs_id_per_sample", "=", "bs", ".", "create_cross_sectional_bootstrap_samples", "(", "obs_id_array", ",", "alt_id_array", ",", "choice_array", ",", "num_samples", ",", "seed", "=", "boot_seed", ")", "# Get the dictionary of sub-dataframes for each observation id", "dfs_by_obs_id", "=", "bs", ".", "create_deepcopied_groupby_dict", "(", "self", ".", "model_obj", ".", "data", ",", "self", ".", "model_obj", ".", "obs_id_col", ")", "# Create a column name for the bootstrap id columns.", "boot_id_col", "=", "\"bootstrap_id\"", "# Initialize an array to store the bootstrapped point estimates.", "point_estimates", "=", "np", ".", "empty", "(", "(", "num_samples", ",", "num_params", ")", ",", "dtype", "=", "float", ")", "# Get keyword arguments for final model estimation with new data.", "fit_kwargs", "=", "{", "\"print_res\"", ":", "print_res", ",", "\"method\"", ":", "method", ",", "\"loss_tol\"", ":", "loss_tol", ",", "\"gradient_tol\"", ":", "gradient_tol", ",", "\"maxiter\"", ":", "maxiter", ",", "\"ridge\"", ":", "ridge", ",", "\"constrained_pos\"", ":", "constrained_pos", ",", "\"just_point\"", ":", "True", "}", "# Get the specification and name dictionary of the MNL model.", "mnl_spec", "=", "None", "if", "mnl_obj", "is", "None", "else", "mnl_obj", ".", "specification", "mnl_names", "=", "None", "if", "mnl_obj", "is", "None", "else", "mnl_obj", ".", "name_spec", "# Create an iterable for iteration", "iterable_for_iteration", "=", "PROGRESS", "(", "xrange", "(", "num_samples", ")", ",", "desc", "=", "\"Creating Bootstrap Replicates\"", ",", "total", "=", "num_samples", ")", "# Iterate through the bootstrap samples and perform the MLE", "for", "row", "in", "iterable_for_iteration", ":", "# Get the bootstrapped dataframe", "bootstrap_df", "=", "bs", ".", "create_bootstrap_dataframe", "(", "self", ".", "model_obj", ".", "data", ",", "self", ".", "model_obj", ".", "obs_id_col", ",", "obs_id_per_sample", "[", "row", ",", ":", "]", ",", "dfs_by_obs_id", ",", "boot_id_col", "=", "boot_id_col", ")", "# Go through the necessary estimation routine to bootstrap the MLE.", "current_results", "=", "retrieve_point_est", "(", "self", ".", "model_obj", ",", "bootstrap_df", ",", "boot_id_col", ",", "num_params", ",", "mnl_spec", ",", "mnl_names", ",", "mnl_init_vals", ",", "mnl_fit_kwargs", ",", "extract_init_vals", "=", "extract_init_vals", ",", "*", "*", "fit_kwargs", ")", "# Store the bootstrapped point estimate.", "point_estimates", "[", "row", "]", "=", "current_results", "[", "\"x\"", "]", "# Store the point estimates as a pandas dataframe", "self", ".", "bootstrap_replicates", "=", "pd", ".", "DataFrame", "(", "point_estimates", ",", "columns", "=", "self", ".", "mle_params", ".", "index", ")", "# Print a 'finished' message for users", "print", "(", "\"Finished Generating Bootstrap Replicates\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "return", "None" ]
Generates the bootstrap replicates for one's given model and dataset. Parameters ---------- num_samples : positive int. Specifies the number of bootstrap samples that are to be drawn. mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. boot_seed = non-negative int or None, optional. Denotes the random seed to be used when generating the bootstrap samples. If None, the sample generation process will generally be non-reproducible. Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute.
[ "Generates", "the", "bootstrap", "replicates", "for", "one", "s", "given", "model", "and", "dataset", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L183-L356
839
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.generate_jackknife_replicates
def generate_jackknife_replicates(self, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None): """ Generates the jackknife replicates for one's given model and dataset. Parameters ---------- mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Take note of the observation id column that is to be used obs_id_col = self.model_obj.obs_id_col # Get the array of original observation ids orig_obs_id_array =\ self.model_obj.data[obs_id_col].values # Get an array of the unique observation ids. unique_obs_ids = np.sort(np.unique(orig_obs_id_array)) # Determine how many observations are in one's dataset. num_obs = unique_obs_ids.size # Determine how many parameters are being estimated. num_params = self.mle_params.size # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Initialize the array of jackknife replicates point_replicates = np.empty((num_obs, num_params), dtype=float) # Create an iterable for iteration iterable_for_iteration = PROGRESS(enumerate(unique_obs_ids), desc="Creating Jackknife Replicates", total=unique_obs_ids.size) # Populate the array of jackknife replicates for pos, obs_id in iterable_for_iteration: # Create the dataframe without the current observation new_df = self.model_obj.data.loc[orig_obs_id_array != obs_id] # Get the point estimate for this new dataset current_results =\ retrieve_point_est(self.model_obj, new_df, obs_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the estimated parameters point_replicates[pos] = current_results['x'] # Store the jackknife replicates as a pandas dataframe self.jackknife_replicates =\ pd.DataFrame(point_replicates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None
python
def generate_jackknife_replicates(self, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None): """ Generates the jackknife replicates for one's given model and dataset. Parameters ---------- mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Take note of the observation id column that is to be used obs_id_col = self.model_obj.obs_id_col # Get the array of original observation ids orig_obs_id_array =\ self.model_obj.data[obs_id_col].values # Get an array of the unique observation ids. unique_obs_ids = np.sort(np.unique(orig_obs_id_array)) # Determine how many observations are in one's dataset. num_obs = unique_obs_ids.size # Determine how many parameters are being estimated. num_params = self.mle_params.size # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Initialize the array of jackknife replicates point_replicates = np.empty((num_obs, num_params), dtype=float) # Create an iterable for iteration iterable_for_iteration = PROGRESS(enumerate(unique_obs_ids), desc="Creating Jackknife Replicates", total=unique_obs_ids.size) # Populate the array of jackknife replicates for pos, obs_id in iterable_for_iteration: # Create the dataframe without the current observation new_df = self.model_obj.data.loc[orig_obs_id_array != obs_id] # Get the point estimate for this new dataset current_results =\ retrieve_point_est(self.model_obj, new_df, obs_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the estimated parameters point_replicates[pos] = current_results['x'] # Store the jackknife replicates as a pandas dataframe self.jackknife_replicates =\ pd.DataFrame(point_replicates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None
[ "def", "generate_jackknife_replicates", "(", "self", ",", "mnl_obj", "=", "None", ",", "mnl_init_vals", "=", "None", ",", "mnl_fit_kwargs", "=", "None", ",", "extract_init_vals", "=", "None", ",", "print_res", "=", "False", ",", "method", "=", "\"BFGS\"", ",", "loss_tol", "=", "1e-06", ",", "gradient_tol", "=", "1e-06", ",", "maxiter", "=", "1000", ",", "ridge", "=", "None", ",", "constrained_pos", "=", "None", ")", ":", "print", "(", "\"Generating Jackknife Replicates\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Take note of the observation id column that is to be used", "obs_id_col", "=", "self", ".", "model_obj", ".", "obs_id_col", "# Get the array of original observation ids", "orig_obs_id_array", "=", "self", ".", "model_obj", ".", "data", "[", "obs_id_col", "]", ".", "values", "# Get an array of the unique observation ids.", "unique_obs_ids", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "orig_obs_id_array", ")", ")", "# Determine how many observations are in one's dataset.", "num_obs", "=", "unique_obs_ids", ".", "size", "# Determine how many parameters are being estimated.", "num_params", "=", "self", ".", "mle_params", ".", "size", "# Get keyword arguments for final model estimation with new data.", "fit_kwargs", "=", "{", "\"print_res\"", ":", "print_res", ",", "\"method\"", ":", "method", ",", "\"loss_tol\"", ":", "loss_tol", ",", "\"gradient_tol\"", ":", "gradient_tol", ",", "\"maxiter\"", ":", "maxiter", ",", "\"ridge\"", ":", "ridge", ",", "\"constrained_pos\"", ":", "constrained_pos", ",", "\"just_point\"", ":", "True", "}", "# Get the specification and name dictionary of the MNL model.", "mnl_spec", "=", "None", "if", "mnl_obj", "is", "None", "else", "mnl_obj", ".", "specification", "mnl_names", "=", "None", "if", "mnl_obj", "is", "None", "else", "mnl_obj", ".", "name_spec", "# Initialize the array of jackknife replicates", "point_replicates", "=", "np", ".", "empty", "(", "(", "num_obs", ",", "num_params", ")", ",", "dtype", "=", "float", ")", "# Create an iterable for iteration", "iterable_for_iteration", "=", "PROGRESS", "(", "enumerate", "(", "unique_obs_ids", ")", ",", "desc", "=", "\"Creating Jackknife Replicates\"", ",", "total", "=", "unique_obs_ids", ".", "size", ")", "# Populate the array of jackknife replicates", "for", "pos", ",", "obs_id", "in", "iterable_for_iteration", ":", "# Create the dataframe without the current observation", "new_df", "=", "self", ".", "model_obj", ".", "data", ".", "loc", "[", "orig_obs_id_array", "!=", "obs_id", "]", "# Get the point estimate for this new dataset", "current_results", "=", "retrieve_point_est", "(", "self", ".", "model_obj", ",", "new_df", ",", "obs_id_col", ",", "num_params", ",", "mnl_spec", ",", "mnl_names", ",", "mnl_init_vals", ",", "mnl_fit_kwargs", ",", "extract_init_vals", "=", "extract_init_vals", ",", "*", "*", "fit_kwargs", ")", "# Store the estimated parameters", "point_replicates", "[", "pos", "]", "=", "current_results", "[", "'x'", "]", "# Store the jackknife replicates as a pandas dataframe", "self", ".", "jackknife_replicates", "=", "pd", ".", "DataFrame", "(", "point_replicates", ",", "columns", "=", "self", ".", "mle_params", ".", "index", ")", "# Print a 'finished' message for users", "print", "(", "\"Finished Generating Jackknife Replicates\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "return", "None" ]
Generates the jackknife replicates for one's given model and dataset. Parameters ---------- mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute.
[ "Generates", "the", "jackknife", "replicates", "for", "one", "s", "given", "model", "and", "dataset", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L358-L495
840
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.calc_log_likes_for_replicates
def calc_log_likes_for_replicates(self, replicates='bootstrap', num_draws=None, seed=None): """ Calculate the log-likelihood value of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. num_draws : int greater than zero or None, optional. Denotes the number of random draws for mixed logit estimation. If None, then no random draws will be made. Default == None. seed : int greater than zero or None, optional. Denotes the random seed to be used for mixed logit estimation. If None, then no random seed will be set. Default == None. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute. """ # Check the validity of the kwargs ensure_replicates_kwarg_validity(replicates) # Get the desired type of replicates replicate_vec = getattr(self, replicates + "_replicates").values # Determine the choice column choice_col = self.model_obj.choice_col # Split the control flow based on whether we're using a Nested Logit current_model_type = self.model_obj.model_type non_2d_predictions =\ [model_type_to_display_name["Nested Logit"], model_type_to_display_name["Mixed Logit"]] if current_model_type not in non_2d_predictions: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec) # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col) else: # Initialize a list of chosen probs chosen_probs_list = [] # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(replicate_vec.shape[0]), desc="Calculate Gradient Norms", total=replicate_vec.shape[0]) # Populate the list of chosen probabilities for each vector of # parameter values for idx in iterable_for_iteration: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec[idx][None, :]) # Use 1D parameters in the prediction function param_list =\ [x.ravel() if x is not None else x for x in param_list] # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col, num_draws=num_draws, seed=seed) # store those chosen prob_results chosen_probs_list.append(chosen_probs[:, None]) # Get the final array of chosen probs chosen_probs = np.concatenate(chosen_probs_list, axis=1) # Calculate the log_likelihood log_likelihoods = np.log(chosen_probs).sum(axis=0) # Store the log-likelihood values attribute_name = replicates + "_log_likelihoods" log_like_series = pd.Series(log_likelihoods, name=attribute_name) setattr(self, attribute_name, log_like_series) return log_likelihoods
python
def calc_log_likes_for_replicates(self, replicates='bootstrap', num_draws=None, seed=None): """ Calculate the log-likelihood value of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. num_draws : int greater than zero or None, optional. Denotes the number of random draws for mixed logit estimation. If None, then no random draws will be made. Default == None. seed : int greater than zero or None, optional. Denotes the random seed to be used for mixed logit estimation. If None, then no random seed will be set. Default == None. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute. """ # Check the validity of the kwargs ensure_replicates_kwarg_validity(replicates) # Get the desired type of replicates replicate_vec = getattr(self, replicates + "_replicates").values # Determine the choice column choice_col = self.model_obj.choice_col # Split the control flow based on whether we're using a Nested Logit current_model_type = self.model_obj.model_type non_2d_predictions =\ [model_type_to_display_name["Nested Logit"], model_type_to_display_name["Mixed Logit"]] if current_model_type not in non_2d_predictions: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec) # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col) else: # Initialize a list of chosen probs chosen_probs_list = [] # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(replicate_vec.shape[0]), desc="Calculate Gradient Norms", total=replicate_vec.shape[0]) # Populate the list of chosen probabilities for each vector of # parameter values for idx in iterable_for_iteration: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec[idx][None, :]) # Use 1D parameters in the prediction function param_list =\ [x.ravel() if x is not None else x for x in param_list] # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col, num_draws=num_draws, seed=seed) # store those chosen prob_results chosen_probs_list.append(chosen_probs[:, None]) # Get the final array of chosen probs chosen_probs = np.concatenate(chosen_probs_list, axis=1) # Calculate the log_likelihood log_likelihoods = np.log(chosen_probs).sum(axis=0) # Store the log-likelihood values attribute_name = replicates + "_log_likelihoods" log_like_series = pd.Series(log_likelihoods, name=attribute_name) setattr(self, attribute_name, log_like_series) return log_likelihoods
[ "def", "calc_log_likes_for_replicates", "(", "self", ",", "replicates", "=", "'bootstrap'", ",", "num_draws", "=", "None", ",", "seed", "=", "None", ")", ":", "# Check the validity of the kwargs", "ensure_replicates_kwarg_validity", "(", "replicates", ")", "# Get the desired type of replicates", "replicate_vec", "=", "getattr", "(", "self", ",", "replicates", "+", "\"_replicates\"", ")", ".", "values", "# Determine the choice column", "choice_col", "=", "self", ".", "model_obj", ".", "choice_col", "# Split the control flow based on whether we're using a Nested Logit", "current_model_type", "=", "self", ".", "model_obj", ".", "model_type", "non_2d_predictions", "=", "[", "model_type_to_display_name", "[", "\"Nested Logit\"", "]", ",", "model_type_to_display_name", "[", "\"Mixed Logit\"", "]", "]", "if", "current_model_type", "not", "in", "non_2d_predictions", ":", "# Get the param list for this set of replicates", "param_list", "=", "get_param_list_for_prediction", "(", "self", ".", "model_obj", ",", "replicate_vec", ")", "# Get the 'chosen_probs' using the desired set of replicates", "chosen_probs", "=", "self", ".", "model_obj", ".", "predict", "(", "self", ".", "model_obj", ".", "data", ",", "param_list", "=", "param_list", ",", "return_long_probs", "=", "False", ",", "choice_col", "=", "choice_col", ")", "else", ":", "# Initialize a list of chosen probs", "chosen_probs_list", "=", "[", "]", "# Create an iterable for iteration", "iterable_for_iteration", "=", "PROGRESS", "(", "xrange", "(", "replicate_vec", ".", "shape", "[", "0", "]", ")", ",", "desc", "=", "\"Calculate Gradient Norms\"", ",", "total", "=", "replicate_vec", ".", "shape", "[", "0", "]", ")", "# Populate the list of chosen probabilities for each vector of", "# parameter values", "for", "idx", "in", "iterable_for_iteration", ":", "# Get the param list for this set of replicates", "param_list", "=", "get_param_list_for_prediction", "(", "self", ".", "model_obj", ",", "replicate_vec", "[", "idx", "]", "[", "None", ",", ":", "]", ")", "# Use 1D parameters in the prediction function", "param_list", "=", "[", "x", ".", "ravel", "(", ")", "if", "x", "is", "not", "None", "else", "x", "for", "x", "in", "param_list", "]", "# Get the 'chosen_probs' using the desired set of replicates", "chosen_probs", "=", "self", ".", "model_obj", ".", "predict", "(", "self", ".", "model_obj", ".", "data", ",", "param_list", "=", "param_list", ",", "return_long_probs", "=", "False", ",", "choice_col", "=", "choice_col", ",", "num_draws", "=", "num_draws", ",", "seed", "=", "seed", ")", "# store those chosen prob_results", "chosen_probs_list", ".", "append", "(", "chosen_probs", "[", ":", ",", "None", "]", ")", "# Get the final array of chosen probs", "chosen_probs", "=", "np", ".", "concatenate", "(", "chosen_probs_list", ",", "axis", "=", "1", ")", "# Calculate the log_likelihood", "log_likelihoods", "=", "np", ".", "log", "(", "chosen_probs", ")", ".", "sum", "(", "axis", "=", "0", ")", "# Store the log-likelihood values", "attribute_name", "=", "replicates", "+", "\"_log_likelihoods\"", "log_like_series", "=", "pd", ".", "Series", "(", "log_likelihoods", ",", "name", "=", "attribute_name", ")", "setattr", "(", "self", ",", "attribute_name", ",", "log_like_series", ")", "return", "log_likelihoods" ]
Calculate the log-likelihood value of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. num_draws : int greater than zero or None, optional. Denotes the number of random draws for mixed logit estimation. If None, then no random draws will be made. Default == None. seed : int greater than zero or None, optional. Denotes the random seed to be used for mixed logit estimation. If None, then no random seed will be set. Default == None. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute.
[ "Calculate", "the", "log", "-", "likelihood", "value", "of", "one", "s", "replicates", "given", "one", "s", "dataset", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L497-L591
841
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.calc_gradient_norm_for_replicates
def calc_gradient_norm_for_replicates(self, replicates='bootstrap', ridge=None, constrained_pos=None, weights=None): """ Calculate the Euclidean-norm of the gradient of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. ridge : float or None, optional. Denotes the ridge penalty used when estimating the replicates, and to be used when calculating the gradient. If None, no ridge penalty is used. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `self.mle_params` Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute. """ # Check the validity of the kwargs ensure_replicates_kwarg_validity(replicates) # Create the estimation object estimation_obj =\ create_estimation_obj(self.model_obj, self.mle_params.values, ridge=ridge, constrained_pos=constrained_pos, weights=weights) # Prepare the estimation object to calculate the gradients if hasattr(estimation_obj, "set_derivatives"): estimation_obj.set_derivatives() # Get the array of parameter replicates replicate_array = getattr(self, replicates + "_replicates").values # Determine the number of replicates num_reps = replicate_array.shape[0] # Initialize an empty array to store the gradient norms gradient_norms = np.empty((num_reps,), dtype=float) # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(num_reps), desc="Calculating Gradient Norms", total=num_reps) # Iterate through the rows of the replicates and calculate and store # the gradient norm for each replicated parameter vector. for row in iterable_for_iteration: current_params = replicate_array[row] gradient = estimation_obj.convenience_calc_gradient(current_params) gradient_norms[row] = np.linalg.norm(gradient) return gradient_norms
python
def calc_gradient_norm_for_replicates(self, replicates='bootstrap', ridge=None, constrained_pos=None, weights=None): """ Calculate the Euclidean-norm of the gradient of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. ridge : float or None, optional. Denotes the ridge penalty used when estimating the replicates, and to be used when calculating the gradient. If None, no ridge penalty is used. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `self.mle_params` Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute. """ # Check the validity of the kwargs ensure_replicates_kwarg_validity(replicates) # Create the estimation object estimation_obj =\ create_estimation_obj(self.model_obj, self.mle_params.values, ridge=ridge, constrained_pos=constrained_pos, weights=weights) # Prepare the estimation object to calculate the gradients if hasattr(estimation_obj, "set_derivatives"): estimation_obj.set_derivatives() # Get the array of parameter replicates replicate_array = getattr(self, replicates + "_replicates").values # Determine the number of replicates num_reps = replicate_array.shape[0] # Initialize an empty array to store the gradient norms gradient_norms = np.empty((num_reps,), dtype=float) # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(num_reps), desc="Calculating Gradient Norms", total=num_reps) # Iterate through the rows of the replicates and calculate and store # the gradient norm for each replicated parameter vector. for row in iterable_for_iteration: current_params = replicate_array[row] gradient = estimation_obj.convenience_calc_gradient(current_params) gradient_norms[row] = np.linalg.norm(gradient) return gradient_norms
[ "def", "calc_gradient_norm_for_replicates", "(", "self", ",", "replicates", "=", "'bootstrap'", ",", "ridge", "=", "None", ",", "constrained_pos", "=", "None", ",", "weights", "=", "None", ")", ":", "# Check the validity of the kwargs", "ensure_replicates_kwarg_validity", "(", "replicates", ")", "# Create the estimation object", "estimation_obj", "=", "create_estimation_obj", "(", "self", ".", "model_obj", ",", "self", ".", "mle_params", ".", "values", ",", "ridge", "=", "ridge", ",", "constrained_pos", "=", "constrained_pos", ",", "weights", "=", "weights", ")", "# Prepare the estimation object to calculate the gradients", "if", "hasattr", "(", "estimation_obj", ",", "\"set_derivatives\"", ")", ":", "estimation_obj", ".", "set_derivatives", "(", ")", "# Get the array of parameter replicates", "replicate_array", "=", "getattr", "(", "self", ",", "replicates", "+", "\"_replicates\"", ")", ".", "values", "# Determine the number of replicates", "num_reps", "=", "replicate_array", ".", "shape", "[", "0", "]", "# Initialize an empty array to store the gradient norms", "gradient_norms", "=", "np", ".", "empty", "(", "(", "num_reps", ",", ")", ",", "dtype", "=", "float", ")", "# Create an iterable for iteration", "iterable_for_iteration", "=", "PROGRESS", "(", "xrange", "(", "num_reps", ")", ",", "desc", "=", "\"Calculating Gradient Norms\"", ",", "total", "=", "num_reps", ")", "# Iterate through the rows of the replicates and calculate and store", "# the gradient norm for each replicated parameter vector.", "for", "row", "in", "iterable_for_iteration", ":", "current_params", "=", "replicate_array", "[", "row", "]", "gradient", "=", "estimation_obj", ".", "convenience_calc_gradient", "(", "current_params", ")", "gradient_norms", "[", "row", "]", "=", "np", ".", "linalg", ".", "norm", "(", "gradient", ")", "return", "gradient_norms" ]
Calculate the Euclidean-norm of the gradient of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. ridge : float or None, optional. Denotes the ridge penalty used when estimating the replicates, and to be used when calculating the gradient. If None, no ridge penalty is used. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `self.mle_params` Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute.
[ "Calculate", "the", "Euclidean", "-", "norm", "of", "the", "gradient", "of", "one", "s", "replicates", "given", "one", "s", "dataset", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L593-L661
842
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.calc_percentile_interval
def calc_percentile_interval(self, conf_percentage): """ Calculates percentile bootstrap confidence intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- None. Will store the percentile intervals as `self.percentile_interval` Notes ----- Must have all ready called `self.generate_bootstrap_replicates`. """ # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the desired confidence intervals. conf_intervals =\ bc.calc_percentile_interval(self.bootstrap_replicates.values, conf_percentage) # Store the desired confidence intervals self.percentile_interval =\ pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
python
def calc_percentile_interval(self, conf_percentage): """ Calculates percentile bootstrap confidence intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- None. Will store the percentile intervals as `self.percentile_interval` Notes ----- Must have all ready called `self.generate_bootstrap_replicates`. """ # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the desired confidence intervals. conf_intervals =\ bc.calc_percentile_interval(self.bootstrap_replicates.values, conf_percentage) # Store the desired confidence intervals self.percentile_interval =\ pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
[ "def", "calc_percentile_interval", "(", "self", ",", "conf_percentage", ")", ":", "# Get the alpha % that corresponds to the given confidence percentage.", "alpha", "=", "bc", ".", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Create the column names for the dataframe of confidence intervals", "single_column_names", "=", "[", "'{:.3g}%'", ".", "format", "(", "alpha", "/", "2.0", ")", ",", "'{:.3g}%'", ".", "format", "(", "100", "-", "alpha", "/", "2.0", ")", "]", "# Calculate the desired confidence intervals.", "conf_intervals", "=", "bc", ".", "calc_percentile_interval", "(", "self", ".", "bootstrap_replicates", ".", "values", ",", "conf_percentage", ")", "# Store the desired confidence intervals", "self", ".", "percentile_interval", "=", "pd", ".", "DataFrame", "(", "conf_intervals", ".", "T", ",", "index", "=", "self", ".", "mle_params", ".", "index", ",", "columns", "=", "single_column_names", ")", "return", "None" ]
Calculates percentile bootstrap confidence intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- None. Will store the percentile intervals as `self.percentile_interval` Notes ----- Must have all ready called `self.generate_bootstrap_replicates`.
[ "Calculates", "percentile", "bootstrap", "confidence", "intervals", "for", "one", "s", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L663-L696
843
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.calc_abc_interval
def calc_abc_interval(self, conf_percentage, init_vals, epsilon=0.001, **fit_kwargs): """ Calculates Approximate Bootstrap Confidence Intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- None. Will store the ABC intervals as `self.abc_interval`. """ print("Calculating Approximate Bootstrap Confidence (ABC) Intervals") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the ABC confidence intervals conf_intervals =\ abc.calc_abc_interval(self.model_obj, self.mle_params.values, init_vals, conf_percentage, epsilon=epsilon, **fit_kwargs) # Store the ABC confidence intervals self.abc_interval = pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
python
def calc_abc_interval(self, conf_percentage, init_vals, epsilon=0.001, **fit_kwargs): """ Calculates Approximate Bootstrap Confidence Intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- None. Will store the ABC intervals as `self.abc_interval`. """ print("Calculating Approximate Bootstrap Confidence (ABC) Intervals") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the ABC confidence intervals conf_intervals =\ abc.calc_abc_interval(self.model_obj, self.mle_params.values, init_vals, conf_percentage, epsilon=epsilon, **fit_kwargs) # Store the ABC confidence intervals self.abc_interval = pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
[ "def", "calc_abc_interval", "(", "self", ",", "conf_percentage", ",", "init_vals", ",", "epsilon", "=", "0.001", ",", "*", "*", "fit_kwargs", ")", ":", "print", "(", "\"Calculating Approximate Bootstrap Confidence (ABC) Intervals\"", ")", "print", "(", "time", ".", "strftime", "(", "\"%a %m-%d-%Y %I:%M%p\"", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Get the alpha % that corresponds to the given confidence percentage.", "alpha", "=", "bc", ".", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Create the column names for the dataframe of confidence intervals", "single_column_names", "=", "[", "'{:.3g}%'", ".", "format", "(", "alpha", "/", "2.0", ")", ",", "'{:.3g}%'", ".", "format", "(", "100", "-", "alpha", "/", "2.0", ")", "]", "# Calculate the ABC confidence intervals", "conf_intervals", "=", "abc", ".", "calc_abc_interval", "(", "self", ".", "model_obj", ",", "self", ".", "mle_params", ".", "values", ",", "init_vals", ",", "conf_percentage", ",", "epsilon", "=", "epsilon", ",", "*", "*", "fit_kwargs", ")", "# Store the ABC confidence intervals", "self", ".", "abc_interval", "=", "pd", ".", "DataFrame", "(", "conf_intervals", ".", "T", ",", "index", "=", "self", ".", "mle_params", ".", "index", ",", "columns", "=", "single_column_names", ")", "return", "None" ]
Calculates Approximate Bootstrap Confidence Intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- None. Will store the ABC intervals as `self.abc_interval`.
[ "Calculates", "Approximate", "Bootstrap", "Confidence", "Intervals", "for", "one", "s", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L737-L788
844
timothyb0912/pylogit
pylogit/bootstrap.py
Boot.calc_conf_intervals
def calc_conf_intervals(self, conf_percentage, interval_type='all', init_vals=None, epsilon=abc.EPSILON, **fit_kwargs): """ Calculates percentile, bias-corrected and accelerated, and approximate bootstrap confidence intervals. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. interval_type : str in {'all', 'pi', 'bca', 'abc'}, optional. Denotes the type of confidence intervals that should be calculated. 'all' results in all types of confidence intervals being calculated. 'pi' means 'percentile intervals', 'bca' means 'bias-corrected and accelerated', and 'abc' means 'approximate bootstrap confidence' intervals. Default == 'all'. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions for the 'abc' intervals. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle` when calculating the 'abc' intervals. Returns ------- None. Will store the confidence intervals on their respective model objects: `self.percentile_interval`, `self.bca_interval`, `self.abc_interval`, or all of these objects. """ if interval_type == 'pi': self.calc_percentile_interval(conf_percentage) elif interval_type == 'bca': self.calc_bca_interval(conf_percentage) elif interval_type == 'abc': self.calc_abc_interval(conf_percentage, init_vals, epsilon=epsilon, **fit_kwargs) elif interval_type == 'all': print("Calculating Percentile Confidence Intervals") sys.stdout.flush() self.calc_percentile_interval(conf_percentage) print("Calculating BCa Confidence Intervals") sys.stdout.flush() self.calc_bca_interval(conf_percentage) # Note we don't print a user message here since that is done in # self.calc_abc_interval(). self.calc_abc_interval(conf_percentage, init_vals, epsilon=epsilon, **fit_kwargs) # Get the alpha % for the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Get lists of the interval type names and the endpoint names interval_type_names = ['percentile_interval', 'BCa_interval', 'ABC_interval'] endpoint_names = ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Create the column names for the dataframe of confidence intervals multi_index_names =\ list(itertools.product(interval_type_names, endpoint_names)) df_column_index = pd.MultiIndex.from_tuples(multi_index_names) # Create the dataframe containing all confidence intervals self.all_intervals = pd.concat([self.percentile_interval, self.bca_interval, self.abc_interval], axis=1, ignore_index=True) # Store the column names for the combined confidence intervals self.all_intervals.columns = df_column_index self.all_intervals.index = self.mle_params.index else: msg =\ "interval_type MUST be in `['pi', 'bca', 'abc', 'all']`" raise ValueError(msg) return None
python
def calc_conf_intervals(self, conf_percentage, interval_type='all', init_vals=None, epsilon=abc.EPSILON, **fit_kwargs): """ Calculates percentile, bias-corrected and accelerated, and approximate bootstrap confidence intervals. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. interval_type : str in {'all', 'pi', 'bca', 'abc'}, optional. Denotes the type of confidence intervals that should be calculated. 'all' results in all types of confidence intervals being calculated. 'pi' means 'percentile intervals', 'bca' means 'bias-corrected and accelerated', and 'abc' means 'approximate bootstrap confidence' intervals. Default == 'all'. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions for the 'abc' intervals. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle` when calculating the 'abc' intervals. Returns ------- None. Will store the confidence intervals on their respective model objects: `self.percentile_interval`, `self.bca_interval`, `self.abc_interval`, or all of these objects. """ if interval_type == 'pi': self.calc_percentile_interval(conf_percentage) elif interval_type == 'bca': self.calc_bca_interval(conf_percentage) elif interval_type == 'abc': self.calc_abc_interval(conf_percentage, init_vals, epsilon=epsilon, **fit_kwargs) elif interval_type == 'all': print("Calculating Percentile Confidence Intervals") sys.stdout.flush() self.calc_percentile_interval(conf_percentage) print("Calculating BCa Confidence Intervals") sys.stdout.flush() self.calc_bca_interval(conf_percentage) # Note we don't print a user message here since that is done in # self.calc_abc_interval(). self.calc_abc_interval(conf_percentage, init_vals, epsilon=epsilon, **fit_kwargs) # Get the alpha % for the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Get lists of the interval type names and the endpoint names interval_type_names = ['percentile_interval', 'BCa_interval', 'ABC_interval'] endpoint_names = ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Create the column names for the dataframe of confidence intervals multi_index_names =\ list(itertools.product(interval_type_names, endpoint_names)) df_column_index = pd.MultiIndex.from_tuples(multi_index_names) # Create the dataframe containing all confidence intervals self.all_intervals = pd.concat([self.percentile_interval, self.bca_interval, self.abc_interval], axis=1, ignore_index=True) # Store the column names for the combined confidence intervals self.all_intervals.columns = df_column_index self.all_intervals.index = self.mle_params.index else: msg =\ "interval_type MUST be in `['pi', 'bca', 'abc', 'all']`" raise ValueError(msg) return None
[ "def", "calc_conf_intervals", "(", "self", ",", "conf_percentage", ",", "interval_type", "=", "'all'", ",", "init_vals", "=", "None", ",", "epsilon", "=", "abc", ".", "EPSILON", ",", "*", "*", "fit_kwargs", ")", ":", "if", "interval_type", "==", "'pi'", ":", "self", ".", "calc_percentile_interval", "(", "conf_percentage", ")", "elif", "interval_type", "==", "'bca'", ":", "self", ".", "calc_bca_interval", "(", "conf_percentage", ")", "elif", "interval_type", "==", "'abc'", ":", "self", ".", "calc_abc_interval", "(", "conf_percentage", ",", "init_vals", ",", "epsilon", "=", "epsilon", ",", "*", "*", "fit_kwargs", ")", "elif", "interval_type", "==", "'all'", ":", "print", "(", "\"Calculating Percentile Confidence Intervals\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "self", ".", "calc_percentile_interval", "(", "conf_percentage", ")", "print", "(", "\"Calculating BCa Confidence Intervals\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "self", ".", "calc_bca_interval", "(", "conf_percentage", ")", "# Note we don't print a user message here since that is done in", "# self.calc_abc_interval().", "self", ".", "calc_abc_interval", "(", "conf_percentage", ",", "init_vals", ",", "epsilon", "=", "epsilon", ",", "*", "*", "fit_kwargs", ")", "# Get the alpha % for the given confidence percentage.", "alpha", "=", "bc", ".", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Get lists of the interval type names and the endpoint names", "interval_type_names", "=", "[", "'percentile_interval'", ",", "'BCa_interval'", ",", "'ABC_interval'", "]", "endpoint_names", "=", "[", "'{:.3g}%'", ".", "format", "(", "alpha", "/", "2.0", ")", ",", "'{:.3g}%'", ".", "format", "(", "100", "-", "alpha", "/", "2.0", ")", "]", "# Create the column names for the dataframe of confidence intervals", "multi_index_names", "=", "list", "(", "itertools", ".", "product", "(", "interval_type_names", ",", "endpoint_names", ")", ")", "df_column_index", "=", "pd", ".", "MultiIndex", ".", "from_tuples", "(", "multi_index_names", ")", "# Create the dataframe containing all confidence intervals", "self", ".", "all_intervals", "=", "pd", ".", "concat", "(", "[", "self", ".", "percentile_interval", ",", "self", ".", "bca_interval", ",", "self", ".", "abc_interval", "]", ",", "axis", "=", "1", ",", "ignore_index", "=", "True", ")", "# Store the column names for the combined confidence intervals", "self", ".", "all_intervals", ".", "columns", "=", "df_column_index", "self", ".", "all_intervals", ".", "index", "=", "self", ".", "mle_params", ".", "index", "else", ":", "msg", "=", "\"interval_type MUST be in `['pi', 'bca', 'abc', 'all']`\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Calculates percentile, bias-corrected and accelerated, and approximate bootstrap confidence intervals. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. interval_type : str in {'all', 'pi', 'bca', 'abc'}, optional. Denotes the type of confidence intervals that should be calculated. 'all' results in all types of confidence intervals being calculated. 'pi' means 'percentile intervals', 'bca' means 'bias-corrected and accelerated', and 'abc' means 'approximate bootstrap confidence' intervals. Default == 'all'. init_vals : 1D ndarray. The initial values used to estimate the one's choice model. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions for the 'abc' intervals. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle` when calculating the 'abc' intervals. Returns ------- None. Will store the confidence intervals on their respective model objects: `self.percentile_interval`, `self.bca_interval`, `self.abc_interval`, or all of these objects.
[ "Calculates", "percentile", "bias", "-", "corrected", "and", "accelerated", "and", "approximate", "bootstrap", "confidence", "intervals", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap.py#L790-L879
845
timothyb0912/pylogit
pylogit/clog_log.py
create_calc_dh_d_alpha
def create_calc_dh_d_alpha(estimator): """ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the outside intercept parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Should also contain an `intercept_ref_pos` attribute that is either None or an int. This attribute should denote which intercept is not being estimated (in the case of outside intercept parameters) for identification purposes. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of outside intercepts. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`. """ if estimator.intercept_ref_pos is not None: needed_idxs = range(estimator.rows_to_alts.shape[1]) needed_idxs.remove(estimator.intercept_ref_pos) dh_d_alpha = (estimator.rows_to_alts .copy() .transpose()[needed_idxs, :] .transpose()) else: dh_d_alpha = None # Create a function that will take in the pre-formed matrix, replace its # data in-place with the new data, and return the correct dh_dalpha on each # iteration of the minimizer calc_dh_d_alpha = partial(_cloglog_transform_deriv_alpha, output_array=dh_d_alpha) return calc_dh_d_alpha
python
def create_calc_dh_d_alpha(estimator): """ Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the outside intercept parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Should also contain an `intercept_ref_pos` attribute that is either None or an int. This attribute should denote which intercept is not being estimated (in the case of outside intercept parameters) for identification purposes. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of outside intercepts. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`. """ if estimator.intercept_ref_pos is not None: needed_idxs = range(estimator.rows_to_alts.shape[1]) needed_idxs.remove(estimator.intercept_ref_pos) dh_d_alpha = (estimator.rows_to_alts .copy() .transpose()[needed_idxs, :] .transpose()) else: dh_d_alpha = None # Create a function that will take in the pre-formed matrix, replace its # data in-place with the new data, and return the correct dh_dalpha on each # iteration of the minimizer calc_dh_d_alpha = partial(_cloglog_transform_deriv_alpha, output_array=dh_d_alpha) return calc_dh_d_alpha
[ "def", "create_calc_dh_d_alpha", "(", "estimator", ")", ":", "if", "estimator", ".", "intercept_ref_pos", "is", "not", "None", ":", "needed_idxs", "=", "range", "(", "estimator", ".", "rows_to_alts", ".", "shape", "[", "1", "]", ")", "needed_idxs", ".", "remove", "(", "estimator", ".", "intercept_ref_pos", ")", "dh_d_alpha", "=", "(", "estimator", ".", "rows_to_alts", ".", "copy", "(", ")", ".", "transpose", "(", ")", "[", "needed_idxs", ",", ":", "]", ".", "transpose", "(", ")", ")", "else", ":", "dh_d_alpha", "=", "None", "# Create a function that will take in the pre-formed matrix, replace its", "# data in-place with the new data, and return the correct dh_dalpha on each", "# iteration of the minimizer", "calc_dh_d_alpha", "=", "partial", "(", "_cloglog_transform_deriv_alpha", ",", "output_array", "=", "dh_d_alpha", ")", "return", "calc_dh_d_alpha" ]
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the outside intercept parameters. Parameters ---------- estimator : an instance of the estimation.LogitTypeEstimator class. Should contain a `rows_to_alts` attribute that is a 2D scipy sparse matrix that maps the rows of the `design` matrix to the alternatives available in this dataset. Should also contain an `intercept_ref_pos` attribute that is either None or an int. This attribute should denote which intercept is not being estimated (in the case of outside intercept parameters) for identification purposes. Returns ------- Callable. Will accept a 1D array of systematic utility values, a 1D array of alternative IDs, (shape parameters if there are any) and miscellaneous args and kwargs. Should return a 2D array whose elements contain the derivative of the tranformed utility vector with respect to the vector of outside intercepts. The dimensions of the returned vector should be `(design.shape[0], num_alternatives - 1)`.
[ "Return", "the", "function", "that", "can", "be", "used", "in", "the", "various", "gradient", "and", "hessian", "calculations", "to", "calculate", "the", "derivative", "of", "the", "transformation", "with", "respect", "to", "the", "outside", "intercept", "parameters", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/clog_log.py#L374-L415
846
timothyb0912/pylogit
pylogit/estimation.py
calc_individual_chi_squares
def calc_individual_chi_squares(residuals, long_probabilities, rows_to_obs): """ Calculates individual chi-squared values for each choice situation in the dataset. Parameters ---------- residuals : 1D ndarray. The choice vector minus the predicted probability of each alternative for each observation. long_probabilities : 1D ndarray. The probability of each alternative being chosen in each choice situation. rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. Returns ------- ind_chi_squareds : 1D ndarray. Will have as many elements as there are columns in `rows_to_obs`. Each element will contain the pearson chi-squared value for the given choice situation. """ chi_squared_terms = np.square(residuals) / long_probabilities return rows_to_obs.T.dot(chi_squared_terms)
python
def calc_individual_chi_squares(residuals, long_probabilities, rows_to_obs): """ Calculates individual chi-squared values for each choice situation in the dataset. Parameters ---------- residuals : 1D ndarray. The choice vector minus the predicted probability of each alternative for each observation. long_probabilities : 1D ndarray. The probability of each alternative being chosen in each choice situation. rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. Returns ------- ind_chi_squareds : 1D ndarray. Will have as many elements as there are columns in `rows_to_obs`. Each element will contain the pearson chi-squared value for the given choice situation. """ chi_squared_terms = np.square(residuals) / long_probabilities return rows_to_obs.T.dot(chi_squared_terms)
[ "def", "calc_individual_chi_squares", "(", "residuals", ",", "long_probabilities", ",", "rows_to_obs", ")", ":", "chi_squared_terms", "=", "np", ".", "square", "(", "residuals", ")", "/", "long_probabilities", "return", "rows_to_obs", ".", "T", ".", "dot", "(", "chi_squared_terms", ")" ]
Calculates individual chi-squared values for each choice situation in the dataset. Parameters ---------- residuals : 1D ndarray. The choice vector minus the predicted probability of each alternative for each observation. long_probabilities : 1D ndarray. The probability of each alternative being chosen in each choice situation. rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. Returns ------- ind_chi_squareds : 1D ndarray. Will have as many elements as there are columns in `rows_to_obs`. Each element will contain the pearson chi-squared value for the given choice situation.
[ "Calculates", "individual", "chi", "-", "squared", "values", "for", "each", "choice", "situation", "in", "the", "dataset", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L424-L451
847
timothyb0912/pylogit
pylogit/estimation.py
calc_rho_and_rho_bar_squared
def calc_rho_and_rho_bar_squared(final_log_likelihood, null_log_likelihood, num_est_parameters): """ Calculates McFadden's rho-squared and rho-bar squared for the given model. Parameters ---------- final_log_likelihood : float. The final log-likelihood of the model whose rho-squared and rho-bar squared are being calculated for. null_log_likelihood : float. The log-likelihood of the model in question, when all parameters are zero or their 'base' values. num_est_parameters : int. The number of parameters estimated in this model. Returns ------- `(rho_squared, rho_bar_squared)` : tuple of floats. The rho-squared and rho-bar-squared for the model. """ rho_squared = 1.0 - final_log_likelihood / null_log_likelihood rho_bar_squared = 1.0 - ((final_log_likelihood - num_est_parameters) / null_log_likelihood) return rho_squared, rho_bar_squared
python
def calc_rho_and_rho_bar_squared(final_log_likelihood, null_log_likelihood, num_est_parameters): """ Calculates McFadden's rho-squared and rho-bar squared for the given model. Parameters ---------- final_log_likelihood : float. The final log-likelihood of the model whose rho-squared and rho-bar squared are being calculated for. null_log_likelihood : float. The log-likelihood of the model in question, when all parameters are zero or their 'base' values. num_est_parameters : int. The number of parameters estimated in this model. Returns ------- `(rho_squared, rho_bar_squared)` : tuple of floats. The rho-squared and rho-bar-squared for the model. """ rho_squared = 1.0 - final_log_likelihood / null_log_likelihood rho_bar_squared = 1.0 - ((final_log_likelihood - num_est_parameters) / null_log_likelihood) return rho_squared, rho_bar_squared
[ "def", "calc_rho_and_rho_bar_squared", "(", "final_log_likelihood", ",", "null_log_likelihood", ",", "num_est_parameters", ")", ":", "rho_squared", "=", "1.0", "-", "final_log_likelihood", "/", "null_log_likelihood", "rho_bar_squared", "=", "1.0", "-", "(", "(", "final_log_likelihood", "-", "num_est_parameters", ")", "/", "null_log_likelihood", ")", "return", "rho_squared", ",", "rho_bar_squared" ]
Calculates McFadden's rho-squared and rho-bar squared for the given model. Parameters ---------- final_log_likelihood : float. The final log-likelihood of the model whose rho-squared and rho-bar squared are being calculated for. null_log_likelihood : float. The log-likelihood of the model in question, when all parameters are zero or their 'base' values. num_est_parameters : int. The number of parameters estimated in this model. Returns ------- `(rho_squared, rho_bar_squared)` : tuple of floats. The rho-squared and rho-bar-squared for the model.
[ "Calculates", "McFadden", "s", "rho", "-", "squared", "and", "rho", "-", "bar", "squared", "for", "the", "given", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L454-L480
848
timothyb0912/pylogit
pylogit/estimation.py
calc_and_store_post_estimation_results
def calc_and_store_post_estimation_results(results_dict, estimator): """ Calculates and stores post-estimation results that require the use of the systematic utility transformation functions or the various derivative functions. Note that this function is only valid for logit-type models. Parameters ---------- results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following keys: `["fun", "x", "log_likelihood_null"]`. estimator : an instance of the EstimationObj class. Should contain the following attributes or methods: - convenience_split_params - convenience_calc_probs - convenience_calc_gradient - convenience_calc_hessian - convenience_calc_fisher_approx - choice_vector - rows_to_obs Returns ------- results_dict : dict. The following keys will have been entered into `results_dict`: - final_log_likelihood - utility_coefs - intercept_params - shape_params - nest_params - chosen_probs - long_probs - residuals - ind_chi_squareds - rho_squared - rho_bar_squared - final_gradient - final_hessian - fisher_info """ # Store the final log-likelihood final_log_likelihood = -1 * results_dict["fun"] results_dict["final_log_likelihood"] = final_log_likelihood # Get the final array of estimated parameters final_params = results_dict["x"] # Add the estimated parameters to the results dictionary split_res = estimator.convenience_split_params(final_params, return_all_types=True) results_dict["nest_params"] = split_res[0] results_dict["shape_params"] = split_res[1] results_dict["intercept_params"] = split_res[2] results_dict["utility_coefs"] = split_res[3] # Get the probability of the chosen alternative and long_form probabilities chosen_probs, long_probs = estimator.convenience_calc_probs(final_params) results_dict["chosen_probs"] = chosen_probs results_dict["long_probs"] = long_probs ##### # Calculate the residuals and individual chi-square values ##### # Calculate the residual vector if len(long_probs.shape) == 1: residuals = estimator.choice_vector - long_probs else: residuals = estimator.choice_vector[:, None] - long_probs results_dict["residuals"] = residuals # Calculate the observation specific chi-squared components args = [residuals, long_probs, estimator.rows_to_obs] results_dict["ind_chi_squareds"] = calc_individual_chi_squares(*args) # Calculate and store the rho-squared and rho-bar-squared log_likelihood_null = results_dict["log_likelihood_null"] rho_results = calc_rho_and_rho_bar_squared(final_log_likelihood, log_likelihood_null, final_params.shape[0]) results_dict["rho_squared"] = rho_results[0] results_dict["rho_bar_squared"] = rho_results[1] ##### # Calculate the gradient, hessian, and BHHH approximation to the fisher # info matrix ##### results_dict["final_gradient"] =\ estimator.convenience_calc_gradient(final_params) results_dict["final_hessian"] =\ estimator.convenience_calc_hessian(final_params) results_dict["fisher_info"] =\ estimator.convenience_calc_fisher_approx(final_params) # Store the constrained positions that was used in this estimation process results_dict["constrained_pos"] = estimator.constrained_pos return results_dict
python
def calc_and_store_post_estimation_results(results_dict, estimator): """ Calculates and stores post-estimation results that require the use of the systematic utility transformation functions or the various derivative functions. Note that this function is only valid for logit-type models. Parameters ---------- results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following keys: `["fun", "x", "log_likelihood_null"]`. estimator : an instance of the EstimationObj class. Should contain the following attributes or methods: - convenience_split_params - convenience_calc_probs - convenience_calc_gradient - convenience_calc_hessian - convenience_calc_fisher_approx - choice_vector - rows_to_obs Returns ------- results_dict : dict. The following keys will have been entered into `results_dict`: - final_log_likelihood - utility_coefs - intercept_params - shape_params - nest_params - chosen_probs - long_probs - residuals - ind_chi_squareds - rho_squared - rho_bar_squared - final_gradient - final_hessian - fisher_info """ # Store the final log-likelihood final_log_likelihood = -1 * results_dict["fun"] results_dict["final_log_likelihood"] = final_log_likelihood # Get the final array of estimated parameters final_params = results_dict["x"] # Add the estimated parameters to the results dictionary split_res = estimator.convenience_split_params(final_params, return_all_types=True) results_dict["nest_params"] = split_res[0] results_dict["shape_params"] = split_res[1] results_dict["intercept_params"] = split_res[2] results_dict["utility_coefs"] = split_res[3] # Get the probability of the chosen alternative and long_form probabilities chosen_probs, long_probs = estimator.convenience_calc_probs(final_params) results_dict["chosen_probs"] = chosen_probs results_dict["long_probs"] = long_probs ##### # Calculate the residuals and individual chi-square values ##### # Calculate the residual vector if len(long_probs.shape) == 1: residuals = estimator.choice_vector - long_probs else: residuals = estimator.choice_vector[:, None] - long_probs results_dict["residuals"] = residuals # Calculate the observation specific chi-squared components args = [residuals, long_probs, estimator.rows_to_obs] results_dict["ind_chi_squareds"] = calc_individual_chi_squares(*args) # Calculate and store the rho-squared and rho-bar-squared log_likelihood_null = results_dict["log_likelihood_null"] rho_results = calc_rho_and_rho_bar_squared(final_log_likelihood, log_likelihood_null, final_params.shape[0]) results_dict["rho_squared"] = rho_results[0] results_dict["rho_bar_squared"] = rho_results[1] ##### # Calculate the gradient, hessian, and BHHH approximation to the fisher # info matrix ##### results_dict["final_gradient"] =\ estimator.convenience_calc_gradient(final_params) results_dict["final_hessian"] =\ estimator.convenience_calc_hessian(final_params) results_dict["fisher_info"] =\ estimator.convenience_calc_fisher_approx(final_params) # Store the constrained positions that was used in this estimation process results_dict["constrained_pos"] = estimator.constrained_pos return results_dict
[ "def", "calc_and_store_post_estimation_results", "(", "results_dict", ",", "estimator", ")", ":", "# Store the final log-likelihood", "final_log_likelihood", "=", "-", "1", "*", "results_dict", "[", "\"fun\"", "]", "results_dict", "[", "\"final_log_likelihood\"", "]", "=", "final_log_likelihood", "# Get the final array of estimated parameters", "final_params", "=", "results_dict", "[", "\"x\"", "]", "# Add the estimated parameters to the results dictionary", "split_res", "=", "estimator", ".", "convenience_split_params", "(", "final_params", ",", "return_all_types", "=", "True", ")", "results_dict", "[", "\"nest_params\"", "]", "=", "split_res", "[", "0", "]", "results_dict", "[", "\"shape_params\"", "]", "=", "split_res", "[", "1", "]", "results_dict", "[", "\"intercept_params\"", "]", "=", "split_res", "[", "2", "]", "results_dict", "[", "\"utility_coefs\"", "]", "=", "split_res", "[", "3", "]", "# Get the probability of the chosen alternative and long_form probabilities", "chosen_probs", ",", "long_probs", "=", "estimator", ".", "convenience_calc_probs", "(", "final_params", ")", "results_dict", "[", "\"chosen_probs\"", "]", "=", "chosen_probs", "results_dict", "[", "\"long_probs\"", "]", "=", "long_probs", "#####", "# Calculate the residuals and individual chi-square values", "#####", "# Calculate the residual vector", "if", "len", "(", "long_probs", ".", "shape", ")", "==", "1", ":", "residuals", "=", "estimator", ".", "choice_vector", "-", "long_probs", "else", ":", "residuals", "=", "estimator", ".", "choice_vector", "[", ":", ",", "None", "]", "-", "long_probs", "results_dict", "[", "\"residuals\"", "]", "=", "residuals", "# Calculate the observation specific chi-squared components", "args", "=", "[", "residuals", ",", "long_probs", ",", "estimator", ".", "rows_to_obs", "]", "results_dict", "[", "\"ind_chi_squareds\"", "]", "=", "calc_individual_chi_squares", "(", "*", "args", ")", "# Calculate and store the rho-squared and rho-bar-squared", "log_likelihood_null", "=", "results_dict", "[", "\"log_likelihood_null\"", "]", "rho_results", "=", "calc_rho_and_rho_bar_squared", "(", "final_log_likelihood", ",", "log_likelihood_null", ",", "final_params", ".", "shape", "[", "0", "]", ")", "results_dict", "[", "\"rho_squared\"", "]", "=", "rho_results", "[", "0", "]", "results_dict", "[", "\"rho_bar_squared\"", "]", "=", "rho_results", "[", "1", "]", "#####", "# Calculate the gradient, hessian, and BHHH approximation to the fisher", "# info matrix", "#####", "results_dict", "[", "\"final_gradient\"", "]", "=", "estimator", ".", "convenience_calc_gradient", "(", "final_params", ")", "results_dict", "[", "\"final_hessian\"", "]", "=", "estimator", ".", "convenience_calc_hessian", "(", "final_params", ")", "results_dict", "[", "\"fisher_info\"", "]", "=", "estimator", ".", "convenience_calc_fisher_approx", "(", "final_params", ")", "# Store the constrained positions that was used in this estimation process", "results_dict", "[", "\"constrained_pos\"", "]", "=", "estimator", ".", "constrained_pos", "return", "results_dict" ]
Calculates and stores post-estimation results that require the use of the systematic utility transformation functions or the various derivative functions. Note that this function is only valid for logit-type models. Parameters ---------- results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following keys: `["fun", "x", "log_likelihood_null"]`. estimator : an instance of the EstimationObj class. Should contain the following attributes or methods: - convenience_split_params - convenience_calc_probs - convenience_calc_gradient - convenience_calc_hessian - convenience_calc_fisher_approx - choice_vector - rows_to_obs Returns ------- results_dict : dict. The following keys will have been entered into `results_dict`: - final_log_likelihood - utility_coefs - intercept_params - shape_params - nest_params - chosen_probs - long_probs - residuals - ind_chi_squareds - rho_squared - rho_bar_squared - final_gradient - final_hessian - fisher_info
[ "Calculates", "and", "stores", "post", "-", "estimation", "results", "that", "require", "the", "use", "of", "the", "systematic", "utility", "transformation", "functions", "or", "the", "various", "derivative", "functions", ".", "Note", "that", "this", "function", "is", "only", "valid", "for", "logit", "-", "type", "models", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L483-L583
849
timothyb0912/pylogit
pylogit/estimation.py
estimate
def estimate(init_values, estimator, method, loss_tol, gradient_tol, maxiter, print_results, use_hessian=True, just_point=False, **kwargs): """ Estimate the given choice model that is defined by `estimator`. Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. estimator : an instance of the EstimationObj class. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. use_hessian : bool, optional. Determines whether the `calc_neg_hessian` method of the `estimator` object will be used as the hessian function during the estimation. This kwarg is used since some models (such as the Mixed Logit and Nested Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher Information Matrix, and users may prefer to not use this approximation for the hessian during estimation. just_point : bool, optional. Determines whether or not calculations that are non-critical for obtaining the maximum likelihood point estimate will be performed. Default == False. Return ------ results : dict. The dictionary of estimation results that is returned by scipy.optimize.minimize. It will also have (at minimum) the following keys: - "log-likelihood_null" - "final_log_likelihood" - "utility_coefs" - "intercept_params" - "shape_params" - "nest_params" - "chosen_probs" - "long_probs" - "residuals" - "ind_chi_squareds" - "rho_squared" - "rho_bar_squared" - "final_gradient" - "final_hessian" - "fisher_info" """ if not just_point: # Perform preliminary calculations log_likelihood_at_zero =\ estimator.convenience_calc_log_likelihood(estimator.zero_vector) initial_log_likelihood =\ estimator.convenience_calc_log_likelihood(init_values) if print_results: # Print the log-likelihood at zero null_msg = "Log-likelihood at zero: {:,.4f}" print(null_msg.format(log_likelihood_at_zero)) # Print the log-likelihood at the starting values init_msg = "Initial Log-likelihood: {:,.4f}" print(init_msg.format(initial_log_likelihood)) sys.stdout.flush() # Get the hessian fucntion for this estimation process hess_func = estimator.calc_neg_hessian if use_hessian else None # Estimate the actual parameters of the model start_time = time.time() results = minimize(estimator.calc_neg_log_likelihood_and_neg_gradient, init_values, method=method, jac=True, hess=hess_func, tol=loss_tol, options={'gtol': gradient_tol, "maxiter": maxiter}, **kwargs) if not just_point: if print_results: # Stop timing the estimation process and report the timing results end_time = time.time() elapsed_sec = (end_time - start_time) elapsed_min = elapsed_sec / 60.0 if elapsed_min > 1.0: msg = "Estimation Time for Point Estimation: {:.2f} minutes." print(msg.format(elapsed_min)) else: msg = "Estimation Time for Point Estimation: {:.2f} seconds." print(msg.format(elapsed_sec)) print("Final log-likelihood: {:,.4f}".format(-1 * results["fun"])) sys.stdout.flush() # Store the log-likelihood at zero results["log_likelihood_null"] = log_likelihood_at_zero # Calculate and store the post-estimation results results = calc_and_store_post_estimation_results(results, estimator) return results
python
def estimate(init_values, estimator, method, loss_tol, gradient_tol, maxiter, print_results, use_hessian=True, just_point=False, **kwargs): """ Estimate the given choice model that is defined by `estimator`. Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. estimator : an instance of the EstimationObj class. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. use_hessian : bool, optional. Determines whether the `calc_neg_hessian` method of the `estimator` object will be used as the hessian function during the estimation. This kwarg is used since some models (such as the Mixed Logit and Nested Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher Information Matrix, and users may prefer to not use this approximation for the hessian during estimation. just_point : bool, optional. Determines whether or not calculations that are non-critical for obtaining the maximum likelihood point estimate will be performed. Default == False. Return ------ results : dict. The dictionary of estimation results that is returned by scipy.optimize.minimize. It will also have (at minimum) the following keys: - "log-likelihood_null" - "final_log_likelihood" - "utility_coefs" - "intercept_params" - "shape_params" - "nest_params" - "chosen_probs" - "long_probs" - "residuals" - "ind_chi_squareds" - "rho_squared" - "rho_bar_squared" - "final_gradient" - "final_hessian" - "fisher_info" """ if not just_point: # Perform preliminary calculations log_likelihood_at_zero =\ estimator.convenience_calc_log_likelihood(estimator.zero_vector) initial_log_likelihood =\ estimator.convenience_calc_log_likelihood(init_values) if print_results: # Print the log-likelihood at zero null_msg = "Log-likelihood at zero: {:,.4f}" print(null_msg.format(log_likelihood_at_zero)) # Print the log-likelihood at the starting values init_msg = "Initial Log-likelihood: {:,.4f}" print(init_msg.format(initial_log_likelihood)) sys.stdout.flush() # Get the hessian fucntion for this estimation process hess_func = estimator.calc_neg_hessian if use_hessian else None # Estimate the actual parameters of the model start_time = time.time() results = minimize(estimator.calc_neg_log_likelihood_and_neg_gradient, init_values, method=method, jac=True, hess=hess_func, tol=loss_tol, options={'gtol': gradient_tol, "maxiter": maxiter}, **kwargs) if not just_point: if print_results: # Stop timing the estimation process and report the timing results end_time = time.time() elapsed_sec = (end_time - start_time) elapsed_min = elapsed_sec / 60.0 if elapsed_min > 1.0: msg = "Estimation Time for Point Estimation: {:.2f} minutes." print(msg.format(elapsed_min)) else: msg = "Estimation Time for Point Estimation: {:.2f} seconds." print(msg.format(elapsed_sec)) print("Final log-likelihood: {:,.4f}".format(-1 * results["fun"])) sys.stdout.flush() # Store the log-likelihood at zero results["log_likelihood_null"] = log_likelihood_at_zero # Calculate and store the post-estimation results results = calc_and_store_post_estimation_results(results, estimator) return results
[ "def", "estimate", "(", "init_values", ",", "estimator", ",", "method", ",", "loss_tol", ",", "gradient_tol", ",", "maxiter", ",", "print_results", ",", "use_hessian", "=", "True", ",", "just_point", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "just_point", ":", "# Perform preliminary calculations", "log_likelihood_at_zero", "=", "estimator", ".", "convenience_calc_log_likelihood", "(", "estimator", ".", "zero_vector", ")", "initial_log_likelihood", "=", "estimator", ".", "convenience_calc_log_likelihood", "(", "init_values", ")", "if", "print_results", ":", "# Print the log-likelihood at zero", "null_msg", "=", "\"Log-likelihood at zero: {:,.4f}\"", "print", "(", "null_msg", ".", "format", "(", "log_likelihood_at_zero", ")", ")", "# Print the log-likelihood at the starting values", "init_msg", "=", "\"Initial Log-likelihood: {:,.4f}\"", "print", "(", "init_msg", ".", "format", "(", "initial_log_likelihood", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Get the hessian fucntion for this estimation process", "hess_func", "=", "estimator", ".", "calc_neg_hessian", "if", "use_hessian", "else", "None", "# Estimate the actual parameters of the model", "start_time", "=", "time", ".", "time", "(", ")", "results", "=", "minimize", "(", "estimator", ".", "calc_neg_log_likelihood_and_neg_gradient", ",", "init_values", ",", "method", "=", "method", ",", "jac", "=", "True", ",", "hess", "=", "hess_func", ",", "tol", "=", "loss_tol", ",", "options", "=", "{", "'gtol'", ":", "gradient_tol", ",", "\"maxiter\"", ":", "maxiter", "}", ",", "*", "*", "kwargs", ")", "if", "not", "just_point", ":", "if", "print_results", ":", "# Stop timing the estimation process and report the timing results", "end_time", "=", "time", ".", "time", "(", ")", "elapsed_sec", "=", "(", "end_time", "-", "start_time", ")", "elapsed_min", "=", "elapsed_sec", "/", "60.0", "if", "elapsed_min", ">", "1.0", ":", "msg", "=", "\"Estimation Time for Point Estimation: {:.2f} minutes.\"", "print", "(", "msg", ".", "format", "(", "elapsed_min", ")", ")", "else", ":", "msg", "=", "\"Estimation Time for Point Estimation: {:.2f} seconds.\"", "print", "(", "msg", ".", "format", "(", "elapsed_sec", ")", ")", "print", "(", "\"Final log-likelihood: {:,.4f}\"", ".", "format", "(", "-", "1", "*", "results", "[", "\"fun\"", "]", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# Store the log-likelihood at zero", "results", "[", "\"log_likelihood_null\"", "]", "=", "log_likelihood_at_zero", "# Calculate and store the post-estimation results", "results", "=", "calc_and_store_post_estimation_results", "(", "results", ",", "estimator", ")", "return", "results" ]
Estimate the given choice model that is defined by `estimator`. Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. estimator : an instance of the EstimationObj class. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. use_hessian : bool, optional. Determines whether the `calc_neg_hessian` method of the `estimator` object will be used as the hessian function during the estimation. This kwarg is used since some models (such as the Mixed Logit and Nested Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher Information Matrix, and users may prefer to not use this approximation for the hessian during estimation. just_point : bool, optional. Determines whether or not calculations that are non-critical for obtaining the maximum likelihood point estimate will be performed. Default == False. Return ------ results : dict. The dictionary of estimation results that is returned by scipy.optimize.minimize. It will also have (at minimum) the following keys: - "log-likelihood_null" - "final_log_likelihood" - "utility_coefs" - "intercept_params" - "shape_params" - "nest_params" - "chosen_probs" - "long_probs" - "residuals" - "ind_chi_squareds" - "rho_squared" - "rho_bar_squared" - "final_gradient" - "final_hessian" - "fisher_info"
[ "Estimate", "the", "given", "choice", "model", "that", "is", "defined", "by", "estimator", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L586-L713
850
timothyb0912/pylogit
pylogit/estimation.py
EstimationObj.calc_neg_log_likelihood_and_neg_gradient
def calc_neg_log_likelihood_and_neg_gradient(self, params): """ Calculates and returns the negative of the log-likelihood and the negative of the gradient. This function is used as the objective function in scipy.optimize.minimize. """ neg_log_likelihood = -1 * self.convenience_calc_log_likelihood(params) neg_gradient = -1 * self.convenience_calc_gradient(params) if self.constrained_pos is not None: neg_gradient[self.constrained_pos] = 0 return neg_log_likelihood, neg_gradient
python
def calc_neg_log_likelihood_and_neg_gradient(self, params): """ Calculates and returns the negative of the log-likelihood and the negative of the gradient. This function is used as the objective function in scipy.optimize.minimize. """ neg_log_likelihood = -1 * self.convenience_calc_log_likelihood(params) neg_gradient = -1 * self.convenience_calc_gradient(params) if self.constrained_pos is not None: neg_gradient[self.constrained_pos] = 0 return neg_log_likelihood, neg_gradient
[ "def", "calc_neg_log_likelihood_and_neg_gradient", "(", "self", ",", "params", ")", ":", "neg_log_likelihood", "=", "-", "1", "*", "self", ".", "convenience_calc_log_likelihood", "(", "params", ")", "neg_gradient", "=", "-", "1", "*", "self", ".", "convenience_calc_gradient", "(", "params", ")", "if", "self", ".", "constrained_pos", "is", "not", "None", ":", "neg_gradient", "[", "self", ".", "constrained_pos", "]", "=", "0", "return", "neg_log_likelihood", ",", "neg_gradient" ]
Calculates and returns the negative of the log-likelihood and the negative of the gradient. This function is used as the objective function in scipy.optimize.minimize.
[ "Calculates", "and", "returns", "the", "negative", "of", "the", "log", "-", "likelihood", "and", "the", "negative", "of", "the", "gradient", ".", "This", "function", "is", "used", "as", "the", "objective", "function", "in", "scipy", ".", "optimize", ".", "minimize", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L211-L223
851
timothyb0912/pylogit
pylogit/bootstrap_utils.py
ensure_samples_is_ndim_ndarray
def ensure_samples_is_ndim_ndarray(samples, name='bootstrap', ndim=2): """ Ensures that `samples` is an `ndim` numpy array. Raises a helpful ValueError if otherwise. """ assert isinstance(ndim, int) assert isinstance(name, str) if not isinstance(samples, np.ndarray) or not (samples.ndim == ndim): sample_name = name + "_samples" msg = "`{}` MUST be a {}D ndarray.".format(sample_name, ndim) raise ValueError(msg) return None
python
def ensure_samples_is_ndim_ndarray(samples, name='bootstrap', ndim=2): """ Ensures that `samples` is an `ndim` numpy array. Raises a helpful ValueError if otherwise. """ assert isinstance(ndim, int) assert isinstance(name, str) if not isinstance(samples, np.ndarray) or not (samples.ndim == ndim): sample_name = name + "_samples" msg = "`{}` MUST be a {}D ndarray.".format(sample_name, ndim) raise ValueError(msg) return None
[ "def", "ensure_samples_is_ndim_ndarray", "(", "samples", ",", "name", "=", "'bootstrap'", ",", "ndim", "=", "2", ")", ":", "assert", "isinstance", "(", "ndim", ",", "int", ")", "assert", "isinstance", "(", "name", ",", "str", ")", "if", "not", "isinstance", "(", "samples", ",", "np", ".", "ndarray", ")", "or", "not", "(", "samples", ".", "ndim", "==", "ndim", ")", ":", "sample_name", "=", "name", "+", "\"_samples\"", "msg", "=", "\"`{}` MUST be a {}D ndarray.\"", ".", "format", "(", "sample_name", ",", "ndim", ")", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Ensures that `samples` is an `ndim` numpy array. Raises a helpful ValueError if otherwise.
[ "Ensures", "that", "samples", "is", "an", "ndim", "numpy", "array", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_utils.py#L27-L38
852
timothyb0912/pylogit
pylogit/construct_estimator.py
create_estimation_obj
def create_estimation_obj(model_obj, init_vals, mappings=None, ridge=None, constrained_pos=None, weights=None): """ Should return a model estimation object corresponding to the model type of the `model_obj`. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. init_vals : 1D ndarray. The initial values to start the estimation process with. In the following order, there should be one value for each nest coefficient, shape parameter, outside intercept parameter, or index coefficient that is being estimated. mappings : OrderedDict or None, optional. Keys will be `["rows_to_obs", "rows_to_alts", "chosen_row_to_obs", "rows_to_nests"]`. The value for `rows_to_obs` will map the rows of the `long_form` to the unique observations (on the columns) in their order of appearance. The value for `rows_to_alts` will map the rows of the `long_form` to the unique alternatives which are possible in the dataset (on the columns), in sorted order--not order of appearance. The value for `chosen_row_to_obs`, if not None, will map the rows of the `long_form` that contain the chosen alternatives to the specific observations those rows are associated with (denoted by the columns). The value of `rows_to_nests`, if not None, will map the rows of the `long_form` to the nest (denoted by the column) that contains the row's alternative. Default == None. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. weights : 1D ndarray. Should contain the weights for each corresponding observation for each row of the long format data. """ # Get the mapping matrices for each model mapping_matrices =\ model_obj.get_mappings_for_fit() if mappings is None else mappings # Create the zero vector for each model. zero_vector = np.zeros(init_vals.shape[0]) # Get the internal model name internal_model_name = display_name_to_model_type[model_obj.model_type] # Get the split parameter function and estimator class for this model. estimator_class, current_split_func =\ (model_type_to_resources[internal_model_name]['estimator'], model_type_to_resources[internal_model_name]['split_func']) # Create the estimator instance that is desired. estimation_obj = estimator_class(model_obj, mapping_matrices, ridge, zero_vector, current_split_func, constrained_pos, weights=weights) # Return the created object return estimation_obj
python
def create_estimation_obj(model_obj, init_vals, mappings=None, ridge=None, constrained_pos=None, weights=None): """ Should return a model estimation object corresponding to the model type of the `model_obj`. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. init_vals : 1D ndarray. The initial values to start the estimation process with. In the following order, there should be one value for each nest coefficient, shape parameter, outside intercept parameter, or index coefficient that is being estimated. mappings : OrderedDict or None, optional. Keys will be `["rows_to_obs", "rows_to_alts", "chosen_row_to_obs", "rows_to_nests"]`. The value for `rows_to_obs` will map the rows of the `long_form` to the unique observations (on the columns) in their order of appearance. The value for `rows_to_alts` will map the rows of the `long_form` to the unique alternatives which are possible in the dataset (on the columns), in sorted order--not order of appearance. The value for `chosen_row_to_obs`, if not None, will map the rows of the `long_form` that contain the chosen alternatives to the specific observations those rows are associated with (denoted by the columns). The value of `rows_to_nests`, if not None, will map the rows of the `long_form` to the nest (denoted by the column) that contains the row's alternative. Default == None. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. weights : 1D ndarray. Should contain the weights for each corresponding observation for each row of the long format data. """ # Get the mapping matrices for each model mapping_matrices =\ model_obj.get_mappings_for_fit() if mappings is None else mappings # Create the zero vector for each model. zero_vector = np.zeros(init_vals.shape[0]) # Get the internal model name internal_model_name = display_name_to_model_type[model_obj.model_type] # Get the split parameter function and estimator class for this model. estimator_class, current_split_func =\ (model_type_to_resources[internal_model_name]['estimator'], model_type_to_resources[internal_model_name]['split_func']) # Create the estimator instance that is desired. estimation_obj = estimator_class(model_obj, mapping_matrices, ridge, zero_vector, current_split_func, constrained_pos, weights=weights) # Return the created object return estimation_obj
[ "def", "create_estimation_obj", "(", "model_obj", ",", "init_vals", ",", "mappings", "=", "None", ",", "ridge", "=", "None", ",", "constrained_pos", "=", "None", ",", "weights", "=", "None", ")", ":", "# Get the mapping matrices for each model", "mapping_matrices", "=", "model_obj", ".", "get_mappings_for_fit", "(", ")", "if", "mappings", "is", "None", "else", "mappings", "# Create the zero vector for each model.", "zero_vector", "=", "np", ".", "zeros", "(", "init_vals", ".", "shape", "[", "0", "]", ")", "# Get the internal model name", "internal_model_name", "=", "display_name_to_model_type", "[", "model_obj", ".", "model_type", "]", "# Get the split parameter function and estimator class for this model.", "estimator_class", ",", "current_split_func", "=", "(", "model_type_to_resources", "[", "internal_model_name", "]", "[", "'estimator'", "]", ",", "model_type_to_resources", "[", "internal_model_name", "]", "[", "'split_func'", "]", ")", "# Create the estimator instance that is desired.", "estimation_obj", "=", "estimator_class", "(", "model_obj", ",", "mapping_matrices", ",", "ridge", ",", "zero_vector", ",", "current_split_func", ",", "constrained_pos", ",", "weights", "=", "weights", ")", "# Return the created object", "return", "estimation_obj" ]
Should return a model estimation object corresponding to the model type of the `model_obj`. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. init_vals : 1D ndarray. The initial values to start the estimation process with. In the following order, there should be one value for each nest coefficient, shape parameter, outside intercept parameter, or index coefficient that is being estimated. mappings : OrderedDict or None, optional. Keys will be `["rows_to_obs", "rows_to_alts", "chosen_row_to_obs", "rows_to_nests"]`. The value for `rows_to_obs` will map the rows of the `long_form` to the unique observations (on the columns) in their order of appearance. The value for `rows_to_alts` will map the rows of the `long_form` to the unique alternatives which are possible in the dataset (on the columns), in sorted order--not order of appearance. The value for `chosen_row_to_obs`, if not None, will map the rows of the `long_form` that contain the chosen alternatives to the specific observations those rows are associated with (denoted by the columns). The value of `rows_to_nests`, if not None, will map the rows of the `long_form` to the nest (denoted by the column) that contains the row's alternative. Default == None. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. weights : 1D ndarray. Should contain the weights for each corresponding observation for each row of the long format data.
[ "Should", "return", "a", "model", "estimation", "object", "corresponding", "to", "the", "model", "type", "of", "the", "model_obj", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/construct_estimator.py#L54-L119
853
timothyb0912/pylogit
pylogit/bootstrap_abc.py
ensure_wide_weights_is_1D_or_2D_ndarray
def ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights): """ Ensures that `wide_weights` is a 1D or 2D ndarray. Raises a helpful ValueError if otherwise. """ if not isinstance(wide_weights, np.ndarray): msg = "wide_weights MUST be a ndarray." raise ValueError(msg) ndim = wide_weights.ndim if not 0 < ndim < 3: msg = "wide_weights MUST be a 1D or 2D ndarray." raise ValueError(msg) return None
python
def ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights): """ Ensures that `wide_weights` is a 1D or 2D ndarray. Raises a helpful ValueError if otherwise. """ if not isinstance(wide_weights, np.ndarray): msg = "wide_weights MUST be a ndarray." raise ValueError(msg) ndim = wide_weights.ndim if not 0 < ndim < 3: msg = "wide_weights MUST be a 1D or 2D ndarray." raise ValueError(msg) return None
[ "def", "ensure_wide_weights_is_1D_or_2D_ndarray", "(", "wide_weights", ")", ":", "if", "not", "isinstance", "(", "wide_weights", ",", "np", ".", "ndarray", ")", ":", "msg", "=", "\"wide_weights MUST be a ndarray.\"", "raise", "ValueError", "(", "msg", ")", "ndim", "=", "wide_weights", ".", "ndim", "if", "not", "0", "<", "ndim", "<", "3", ":", "msg", "=", "\"wide_weights MUST be a 1D or 2D ndarray.\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Ensures that `wide_weights` is a 1D or 2D ndarray. Raises a helpful ValueError if otherwise.
[ "Ensures", "that", "wide_weights", "is", "a", "1D", "or", "2D", "ndarray", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_abc.py#L51-L63
854
timothyb0912/pylogit
pylogit/bootstrap_abc.py
check_validity_of_long_form_args
def check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs): """ Ensures the args to `create_long_form_weights` have expected properties. """ # Ensure model_obj has the necessary method for create_long_form_weights ensure_model_obj_has_mapping_constructor(model_obj) # Ensure wide_weights is a 1D or 2D ndarray. ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights) # Ensure rows_to_obs is a scipy sparse matrix ensure_rows_to_obs_validity(rows_to_obs) return None
python
def check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs): """ Ensures the args to `create_long_form_weights` have expected properties. """ # Ensure model_obj has the necessary method for create_long_form_weights ensure_model_obj_has_mapping_constructor(model_obj) # Ensure wide_weights is a 1D or 2D ndarray. ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights) # Ensure rows_to_obs is a scipy sparse matrix ensure_rows_to_obs_validity(rows_to_obs) return None
[ "def", "check_validity_of_long_form_args", "(", "model_obj", ",", "wide_weights", ",", "rows_to_obs", ")", ":", "# Ensure model_obj has the necessary method for create_long_form_weights", "ensure_model_obj_has_mapping_constructor", "(", "model_obj", ")", "# Ensure wide_weights is a 1D or 2D ndarray.", "ensure_wide_weights_is_1D_or_2D_ndarray", "(", "wide_weights", ")", "# Ensure rows_to_obs is a scipy sparse matrix", "ensure_rows_to_obs_validity", "(", "rows_to_obs", ")", "return", "None" ]
Ensures the args to `create_long_form_weights` have expected properties.
[ "Ensures", "the", "args", "to", "create_long_form_weights", "have", "expected", "properties", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_abc.py#L66-L76
855
timothyb0912/pylogit
pylogit/bootstrap_abc.py
calc_finite_diff_terms_for_abc
def calc_finite_diff_terms_for_abc(model_obj, mle_params, init_vals, epsilon, **fit_kwargs): """ Calculates the terms needed for the finite difference approximations of the empirical influence and second order empirical influence functions. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. epsilon : positive float. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be 'close' to zero. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- term_plus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from adding a small value to the observation corresponding to that elements respective row. term_minus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from subtracting a small value to the observation corresponding to that elements respective row. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6, Equations 22.32 and 22.36. Notes ----- The returned, symbolic value for `term_minus` does not explicitly appear in Equations 22.32 or 22.36. However, it is used to compute a midpoint / slope approximation to the finite difference derivative used to define the empirical influence function. """ # Determine the number of observations in this dataset. num_obs = model_obj.data[model_obj.obs_id_col].unique().size # Determine the initial weights per observation. init_weights_wide = np.ones(num_obs, dtype=float) / num_obs # Initialize wide weights for elements of the second order influence array. init_wide_weights_plus = (1 - epsilon) * init_weights_wide init_wide_weights_minus = (1 + epsilon) * init_weights_wide # Initialize the second order influence array term_plus = np.empty((num_obs, init_vals.shape[0]), dtype=float) term_minus = np.empty((num_obs, init_vals.shape[0]), dtype=float) # Get the rows_to_obs mapping matrix for this model. rows_to_obs = model_obj.get_mappings_for_fit()['rows_to_obs'] # Extract the initial weights from the fit kwargs new_fit_kwargs = deepcopy(fit_kwargs) if fit_kwargs is not None and 'weights' in fit_kwargs: orig_weights = fit_kwargs['weights'] del new_fit_kwargs['weights'] else: orig_weights = 1 # Make sure we're just getting the point estimate new_fit_kwargs['just_point'] = True # Populate the second order influence array for obs in xrange(num_obs): # Note we create the long weights in a for-loop to avoid creating a # num_obs by num_obs matrix, which may be a problem for large datasets # Get the wide format weights for this observation current_wide_weights_plus = init_wide_weights_plus.copy() current_wide_weights_plus[obs] += epsilon current_wide_weights_minus = init_wide_weights_minus.copy() current_wide_weights_minus[obs] -= epsilon # Get the long format weights for this observation long_weights_plus =\ (create_long_form_weights(model_obj, current_wide_weights_plus, rows_to_obs=rows_to_obs) * orig_weights) long_weights_minus =\ (create_long_form_weights(model_obj, current_wide_weights_minus, rows_to_obs=rows_to_obs) * orig_weights) # Get the needed influence estimates. term_plus[obs] = model_obj.fit_mle(init_vals, weights=long_weights_plus, **new_fit_kwargs)['x'] term_minus[obs] = model_obj.fit_mle(init_vals, weights=long_weights_minus, **new_fit_kwargs)['x'] return term_plus, term_minus
python
def calc_finite_diff_terms_for_abc(model_obj, mle_params, init_vals, epsilon, **fit_kwargs): """ Calculates the terms needed for the finite difference approximations of the empirical influence and second order empirical influence functions. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. epsilon : positive float. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be 'close' to zero. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- term_plus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from adding a small value to the observation corresponding to that elements respective row. term_minus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from subtracting a small value to the observation corresponding to that elements respective row. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6, Equations 22.32 and 22.36. Notes ----- The returned, symbolic value for `term_minus` does not explicitly appear in Equations 22.32 or 22.36. However, it is used to compute a midpoint / slope approximation to the finite difference derivative used to define the empirical influence function. """ # Determine the number of observations in this dataset. num_obs = model_obj.data[model_obj.obs_id_col].unique().size # Determine the initial weights per observation. init_weights_wide = np.ones(num_obs, dtype=float) / num_obs # Initialize wide weights for elements of the second order influence array. init_wide_weights_plus = (1 - epsilon) * init_weights_wide init_wide_weights_minus = (1 + epsilon) * init_weights_wide # Initialize the second order influence array term_plus = np.empty((num_obs, init_vals.shape[0]), dtype=float) term_minus = np.empty((num_obs, init_vals.shape[0]), dtype=float) # Get the rows_to_obs mapping matrix for this model. rows_to_obs = model_obj.get_mappings_for_fit()['rows_to_obs'] # Extract the initial weights from the fit kwargs new_fit_kwargs = deepcopy(fit_kwargs) if fit_kwargs is not None and 'weights' in fit_kwargs: orig_weights = fit_kwargs['weights'] del new_fit_kwargs['weights'] else: orig_weights = 1 # Make sure we're just getting the point estimate new_fit_kwargs['just_point'] = True # Populate the second order influence array for obs in xrange(num_obs): # Note we create the long weights in a for-loop to avoid creating a # num_obs by num_obs matrix, which may be a problem for large datasets # Get the wide format weights for this observation current_wide_weights_plus = init_wide_weights_plus.copy() current_wide_weights_plus[obs] += epsilon current_wide_weights_minus = init_wide_weights_minus.copy() current_wide_weights_minus[obs] -= epsilon # Get the long format weights for this observation long_weights_plus =\ (create_long_form_weights(model_obj, current_wide_weights_plus, rows_to_obs=rows_to_obs) * orig_weights) long_weights_minus =\ (create_long_form_weights(model_obj, current_wide_weights_minus, rows_to_obs=rows_to_obs) * orig_weights) # Get the needed influence estimates. term_plus[obs] = model_obj.fit_mle(init_vals, weights=long_weights_plus, **new_fit_kwargs)['x'] term_minus[obs] = model_obj.fit_mle(init_vals, weights=long_weights_minus, **new_fit_kwargs)['x'] return term_plus, term_minus
[ "def", "calc_finite_diff_terms_for_abc", "(", "model_obj", ",", "mle_params", ",", "init_vals", ",", "epsilon", ",", "*", "*", "fit_kwargs", ")", ":", "# Determine the number of observations in this dataset.", "num_obs", "=", "model_obj", ".", "data", "[", "model_obj", ".", "obs_id_col", "]", ".", "unique", "(", ")", ".", "size", "# Determine the initial weights per observation.", "init_weights_wide", "=", "np", ".", "ones", "(", "num_obs", ",", "dtype", "=", "float", ")", "/", "num_obs", "# Initialize wide weights for elements of the second order influence array.", "init_wide_weights_plus", "=", "(", "1", "-", "epsilon", ")", "*", "init_weights_wide", "init_wide_weights_minus", "=", "(", "1", "+", "epsilon", ")", "*", "init_weights_wide", "# Initialize the second order influence array", "term_plus", "=", "np", ".", "empty", "(", "(", "num_obs", ",", "init_vals", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "float", ")", "term_minus", "=", "np", ".", "empty", "(", "(", "num_obs", ",", "init_vals", ".", "shape", "[", "0", "]", ")", ",", "dtype", "=", "float", ")", "# Get the rows_to_obs mapping matrix for this model.", "rows_to_obs", "=", "model_obj", ".", "get_mappings_for_fit", "(", ")", "[", "'rows_to_obs'", "]", "# Extract the initial weights from the fit kwargs", "new_fit_kwargs", "=", "deepcopy", "(", "fit_kwargs", ")", "if", "fit_kwargs", "is", "not", "None", "and", "'weights'", "in", "fit_kwargs", ":", "orig_weights", "=", "fit_kwargs", "[", "'weights'", "]", "del", "new_fit_kwargs", "[", "'weights'", "]", "else", ":", "orig_weights", "=", "1", "# Make sure we're just getting the point estimate", "new_fit_kwargs", "[", "'just_point'", "]", "=", "True", "# Populate the second order influence array", "for", "obs", "in", "xrange", "(", "num_obs", ")", ":", "# Note we create the long weights in a for-loop to avoid creating a", "# num_obs by num_obs matrix, which may be a problem for large datasets", "# Get the wide format weights for this observation", "current_wide_weights_plus", "=", "init_wide_weights_plus", ".", "copy", "(", ")", "current_wide_weights_plus", "[", "obs", "]", "+=", "epsilon", "current_wide_weights_minus", "=", "init_wide_weights_minus", ".", "copy", "(", ")", "current_wide_weights_minus", "[", "obs", "]", "-=", "epsilon", "# Get the long format weights for this observation", "long_weights_plus", "=", "(", "create_long_form_weights", "(", "model_obj", ",", "current_wide_weights_plus", ",", "rows_to_obs", "=", "rows_to_obs", ")", "*", "orig_weights", ")", "long_weights_minus", "=", "(", "create_long_form_weights", "(", "model_obj", ",", "current_wide_weights_minus", ",", "rows_to_obs", "=", "rows_to_obs", ")", "*", "orig_weights", ")", "# Get the needed influence estimates.", "term_plus", "[", "obs", "]", "=", "model_obj", ".", "fit_mle", "(", "init_vals", ",", "weights", "=", "long_weights_plus", ",", "*", "*", "new_fit_kwargs", ")", "[", "'x'", "]", "term_minus", "[", "obs", "]", "=", "model_obj", ".", "fit_mle", "(", "init_vals", ",", "weights", "=", "long_weights_minus", ",", "*", "*", "new_fit_kwargs", ")", "[", "'x'", "]", "return", "term_plus", ",", "term_minus" ]
Calculates the terms needed for the finite difference approximations of the empirical influence and second order empirical influence functions. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. epsilon : positive float. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be 'close' to zero. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- term_plus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from adding a small value to the observation corresponding to that elements respective row. term_minus : 2D ndarray. Should have one row for each observation. Should have one column for each parameter in the parameter vector being estimated. Elements should denote the finite difference term that comes from subtracting a small value to the observation corresponding to that elements respective row. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6, Equations 22.32 and 22.36. Notes ----- The returned, symbolic value for `term_minus` does not explicitly appear in Equations 22.32 or 22.36. However, it is used to compute a midpoint / slope approximation to the finite difference derivative used to define the empirical influence function.
[ "Calculates", "the", "terms", "needed", "for", "the", "finite", "difference", "approximations", "of", "the", "empirical", "influence", "and", "second", "order", "empirical", "influence", "functions", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_abc.py#L123-L222
856
timothyb0912/pylogit
pylogit/bootstrap_abc.py
calc_abc_interval
def calc_abc_interval(model_obj, mle_params, init_vals, conf_percentage, epsilon=0.001, **fit_kwargs): """ Calculate 'approximate bootstrap confidence' intervals. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) # Calculate the empirical influence component and second order empirical # influence component for each observation empirical_influence, second_order_influence =\ calc_influence_arrays_for_abc(model_obj, mle_params, init_vals, epsilon, **fit_kwargs) # Calculate the acceleration constant for the ABC interval. acceleration = calc_acceleration_abc(empirical_influence) # Use the delta method to calculate the standard error of the MLE parameter # estimate of the model using the original data. std_error = calc_std_error_abc(empirical_influence) # Approximate the bias of the MLE parameter estimates. bias = calc_bias_abc(second_order_influence) # Calculate the quadratic coefficient. Note we are using the 'efron' # version of the desired function because the direct implementation of the # formulas in the textbook don't return the correct results. The 'efron' # versions re-implement the calculations from 'abcnon.R' in Efron's # 'bootstrap' library in R. # quadratic_coef = calc_quadratic_coef_abc(model_obj, # mle_params, # init_vals, # empirical_influence, # std_error, # epsilon, # **fit_kwargs) quadratic_coef = efron_quadratic_coef_abc(model_obj, mle_params, init_vals, empirical_influence, std_error, epsilon, **fit_kwargs) # Calculate the total curvature of the level surface of the weight vector, # where the set of weights in the surface are those where the weighted MLE # equals the original (i.e. the equal-weighted) MLE. total_curvature = calc_total_curvature_abc(bias, std_error, quadratic_coef) # Calculate the bias correction constant. bias_correction = calc_bias_correction_abc(acceleration, total_curvature) # Calculate the lower limit of the conf_percentage confidence intervals # Note we are using the 'efron' version of the desired function because the # direct implementation of the formulas in the textbook don't return the # correct results. The 'efron' versions re-implement the calculations from # 'abcnon.R' in Efron's 'bootstrap' library in R. # lower_endpoint, upper_endpoint =\ # calc_endpoints_for_abc_confidence_interval(conf_percentage, # model_obj, # init_vals, # bias_correction, # acceleration, # std_error, # empirical_influence, # **fit_kwargs) lower_endpoint, upper_endpoint =\ efron_endpoints_for_abc_confidence_interval(conf_percentage, model_obj, init_vals, bias_correction, acceleration, std_error, empirical_influence, **fit_kwargs) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoint, upper_endpoint) return conf_intervals
python
def calc_abc_interval(model_obj, mle_params, init_vals, conf_percentage, epsilon=0.001, **fit_kwargs): """ Calculate 'approximate bootstrap confidence' intervals. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) # Calculate the empirical influence component and second order empirical # influence component for each observation empirical_influence, second_order_influence =\ calc_influence_arrays_for_abc(model_obj, mle_params, init_vals, epsilon, **fit_kwargs) # Calculate the acceleration constant for the ABC interval. acceleration = calc_acceleration_abc(empirical_influence) # Use the delta method to calculate the standard error of the MLE parameter # estimate of the model using the original data. std_error = calc_std_error_abc(empirical_influence) # Approximate the bias of the MLE parameter estimates. bias = calc_bias_abc(second_order_influence) # Calculate the quadratic coefficient. Note we are using the 'efron' # version of the desired function because the direct implementation of the # formulas in the textbook don't return the correct results. The 'efron' # versions re-implement the calculations from 'abcnon.R' in Efron's # 'bootstrap' library in R. # quadratic_coef = calc_quadratic_coef_abc(model_obj, # mle_params, # init_vals, # empirical_influence, # std_error, # epsilon, # **fit_kwargs) quadratic_coef = efron_quadratic_coef_abc(model_obj, mle_params, init_vals, empirical_influence, std_error, epsilon, **fit_kwargs) # Calculate the total curvature of the level surface of the weight vector, # where the set of weights in the surface are those where the weighted MLE # equals the original (i.e. the equal-weighted) MLE. total_curvature = calc_total_curvature_abc(bias, std_error, quadratic_coef) # Calculate the bias correction constant. bias_correction = calc_bias_correction_abc(acceleration, total_curvature) # Calculate the lower limit of the conf_percentage confidence intervals # Note we are using the 'efron' version of the desired function because the # direct implementation of the formulas in the textbook don't return the # correct results. The 'efron' versions re-implement the calculations from # 'abcnon.R' in Efron's 'bootstrap' library in R. # lower_endpoint, upper_endpoint =\ # calc_endpoints_for_abc_confidence_interval(conf_percentage, # model_obj, # init_vals, # bias_correction, # acceleration, # std_error, # empirical_influence, # **fit_kwargs) lower_endpoint, upper_endpoint =\ efron_endpoints_for_abc_confidence_interval(conf_percentage, model_obj, init_vals, bias_correction, acceleration, std_error, empirical_influence, **fit_kwargs) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoint, upper_endpoint) return conf_intervals
[ "def", "calc_abc_interval", "(", "model_obj", ",", "mle_params", ",", "init_vals", ",", "conf_percentage", ",", "epsilon", "=", "0.001", ",", "*", "*", "fit_kwargs", ")", ":", "# Check validity of arguments", "check_conf_percentage_validity", "(", "conf_percentage", ")", "# Calculate the empirical influence component and second order empirical", "# influence component for each observation", "empirical_influence", ",", "second_order_influence", "=", "calc_influence_arrays_for_abc", "(", "model_obj", ",", "mle_params", ",", "init_vals", ",", "epsilon", ",", "*", "*", "fit_kwargs", ")", "# Calculate the acceleration constant for the ABC interval.", "acceleration", "=", "calc_acceleration_abc", "(", "empirical_influence", ")", "# Use the delta method to calculate the standard error of the MLE parameter", "# estimate of the model using the original data.", "std_error", "=", "calc_std_error_abc", "(", "empirical_influence", ")", "# Approximate the bias of the MLE parameter estimates.", "bias", "=", "calc_bias_abc", "(", "second_order_influence", ")", "# Calculate the quadratic coefficient. Note we are using the 'efron'", "# version of the desired function because the direct implementation of the", "# formulas in the textbook don't return the correct results. The 'efron'", "# versions re-implement the calculations from 'abcnon.R' in Efron's", "# 'bootstrap' library in R.", "# quadratic_coef = calc_quadratic_coef_abc(model_obj,", "# mle_params,", "# init_vals,", "# empirical_influence,", "# std_error,", "# epsilon,", "# **fit_kwargs)", "quadratic_coef", "=", "efron_quadratic_coef_abc", "(", "model_obj", ",", "mle_params", ",", "init_vals", ",", "empirical_influence", ",", "std_error", ",", "epsilon", ",", "*", "*", "fit_kwargs", ")", "# Calculate the total curvature of the level surface of the weight vector,", "# where the set of weights in the surface are those where the weighted MLE", "# equals the original (i.e. the equal-weighted) MLE.", "total_curvature", "=", "calc_total_curvature_abc", "(", "bias", ",", "std_error", ",", "quadratic_coef", ")", "# Calculate the bias correction constant.", "bias_correction", "=", "calc_bias_correction_abc", "(", "acceleration", ",", "total_curvature", ")", "# Calculate the lower limit of the conf_percentage confidence intervals", "# Note we are using the 'efron' version of the desired function because the", "# direct implementation of the formulas in the textbook don't return the", "# correct results. The 'efron' versions re-implement the calculations from", "# 'abcnon.R' in Efron's 'bootstrap' library in R.", "# lower_endpoint, upper_endpoint =\\", "# calc_endpoints_for_abc_confidence_interval(conf_percentage,", "# model_obj,", "# init_vals,", "# bias_correction,", "# acceleration,", "# std_error,", "# empirical_influence,", "# **fit_kwargs)", "lower_endpoint", ",", "upper_endpoint", "=", "efron_endpoints_for_abc_confidence_interval", "(", "conf_percentage", ",", "model_obj", ",", "init_vals", ",", "bias_correction", ",", "acceleration", ",", "std_error", ",", "empirical_influence", ",", "*", "*", "fit_kwargs", ")", "# Combine the enpoints into a single ndarray.", "conf_intervals", "=", "combine_conf_endpoints", "(", "lower_endpoint", ",", "upper_endpoint", ")", "return", "conf_intervals" ]
Calculate 'approximate bootstrap confidence' intervals. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. init_vals : 1D ndarray. The initial values used to estimate the desired choice model. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. epsilon : positive float, optional. Should denote the 'very small' value being used to calculate the desired finite difference approximations to the various influence functions. Should be close to zero. Default == sys.float_info.epsilon. fit_kwargs : additional keyword arguments, optional. Should contain any additional kwargs used to alter the default behavior of `model_obj.fit_mle` and thereby enforce conformity with how the MLE was obtained. Will be passed directly to `model_obj.fit_mle`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 22.6. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212.
[ "Calculate", "approximate", "bootstrap", "confidence", "intervals", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_abc.py#L1160-L1278
857
timothyb0912/pylogit
pylogit/mixed_logit.py
check_length_of_init_values
def check_length_of_init_values(design_3d, init_values): """ Ensures that the initial values are of the correct length, given the design matrix that they will be dot-producted with. Raises a ValueError if that is not the case, and provides a useful error message to users. Parameters ---------- init_values : 1D ndarray. 1D numpy array of the initial values to start the optimizatin process with. There should be one value for each index coefficient being estimated. design_3d : 2D ndarray. 2D numpy array with one row per observation per available alternative. There should be one column per index coefficient being estimated. All elements should be ints, floats, or longs. Returns ------- None. """ if init_values.shape[0] != design_3d.shape[2]: msg_1 = "The initial values are of the wrong dimension. " msg_2 = "They should be of dimension {}".format(design_3d.shape[2]) raise ValueError(msg_1 + msg_2) return None
python
def check_length_of_init_values(design_3d, init_values): """ Ensures that the initial values are of the correct length, given the design matrix that they will be dot-producted with. Raises a ValueError if that is not the case, and provides a useful error message to users. Parameters ---------- init_values : 1D ndarray. 1D numpy array of the initial values to start the optimizatin process with. There should be one value for each index coefficient being estimated. design_3d : 2D ndarray. 2D numpy array with one row per observation per available alternative. There should be one column per index coefficient being estimated. All elements should be ints, floats, or longs. Returns ------- None. """ if init_values.shape[0] != design_3d.shape[2]: msg_1 = "The initial values are of the wrong dimension. " msg_2 = "They should be of dimension {}".format(design_3d.shape[2]) raise ValueError(msg_1 + msg_2) return None
[ "def", "check_length_of_init_values", "(", "design_3d", ",", "init_values", ")", ":", "if", "init_values", ".", "shape", "[", "0", "]", "!=", "design_3d", ".", "shape", "[", "2", "]", ":", "msg_1", "=", "\"The initial values are of the wrong dimension. \"", "msg_2", "=", "\"They should be of dimension {}\"", ".", "format", "(", "design_3d", ".", "shape", "[", "2", "]", ")", "raise", "ValueError", "(", "msg_1", "+", "msg_2", ")", "return", "None" ]
Ensures that the initial values are of the correct length, given the design matrix that they will be dot-producted with. Raises a ValueError if that is not the case, and provides a useful error message to users. Parameters ---------- init_values : 1D ndarray. 1D numpy array of the initial values to start the optimizatin process with. There should be one value for each index coefficient being estimated. design_3d : 2D ndarray. 2D numpy array with one row per observation per available alternative. There should be one column per index coefficient being estimated. All elements should be ints, floats, or longs. Returns ------- None.
[ "Ensures", "that", "the", "initial", "values", "are", "of", "the", "correct", "length", "given", "the", "design", "matrix", "that", "they", "will", "be", "dot", "-", "producted", "with", ".", "Raises", "a", "ValueError", "if", "that", "is", "not", "the", "case", "and", "provides", "a", "useful", "error", "message", "to", "users", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/mixed_logit.py#L106-L132
858
timothyb0912/pylogit
pylogit/mixed_logit.py
add_mixl_specific_results_to_estimation_res
def add_mixl_specific_results_to_estimation_res(estimator, results_dict): """ Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict. """ # Get the probability of each sequence of choices, given the draws prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"], estimator.choice_vector, estimator.rows_to_mixers, return_type='all') # Add the various items to the results_dict. results_dict["simulated_sequence_probs"] = prob_res[0] results_dict["expanded_sequence_probs"] = prob_res[1] return results_dict
python
def add_mixl_specific_results_to_estimation_res(estimator, results_dict): """ Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict. """ # Get the probability of each sequence of choices, given the draws prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"], estimator.choice_vector, estimator.rows_to_mixers, return_type='all') # Add the various items to the results_dict. results_dict["simulated_sequence_probs"] = prob_res[0] results_dict["expanded_sequence_probs"] = prob_res[1] return results_dict
[ "def", "add_mixl_specific_results_to_estimation_res", "(", "estimator", ",", "results_dict", ")", ":", "# Get the probability of each sequence of choices, given the draws", "prob_res", "=", "mlc", ".", "calc_choice_sequence_probs", "(", "results_dict", "[", "\"long_probs\"", "]", ",", "estimator", ".", "choice_vector", ",", "estimator", ".", "rows_to_mixers", ",", "return_type", "=", "'all'", ")", "# Add the various items to the results_dict.", "results_dict", "[", "\"simulated_sequence_probs\"", "]", "=", "prob_res", "[", "0", "]", "results_dict", "[", "\"expanded_sequence_probs\"", "]", "=", "prob_res", "[", "1", "]", "return", "results_dict" ]
Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict.
[ "Stores", "particular", "items", "in", "the", "results", "dictionary", "that", "are", "unique", "to", "mixed", "logit", "-", "type", "models", ".", "In", "particular", "this", "function", "calculates", "and", "adds", "sequence_probs", "and", "expanded_sequence_probs", "to", "the", "results", "dictionary", ".", "The", "constrained_pos", "object", "is", "also", "stored", "to", "the", "results_dict", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/mixed_logit.py#L135-L168
859
timothyb0912/pylogit
pylogit/nested_logit.py
identify_degenerate_nests
def identify_degenerate_nests(nest_spec): """ Identify the nests within nest_spec that are degenerate, i.e. those nests with only a single alternative within the nest. Parameters ---------- nest_spec : OrderedDict. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Returns ------- list. Will contain the positions in the list of keys from `nest_spec` that are degenerate. """ degenerate_positions = [] for pos, key in enumerate(nest_spec): if len(nest_spec[key]) == 1: degenerate_positions.append(pos) return degenerate_positions
python
def identify_degenerate_nests(nest_spec): """ Identify the nests within nest_spec that are degenerate, i.e. those nests with only a single alternative within the nest. Parameters ---------- nest_spec : OrderedDict. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Returns ------- list. Will contain the positions in the list of keys from `nest_spec` that are degenerate. """ degenerate_positions = [] for pos, key in enumerate(nest_spec): if len(nest_spec[key]) == 1: degenerate_positions.append(pos) return degenerate_positions
[ "def", "identify_degenerate_nests", "(", "nest_spec", ")", ":", "degenerate_positions", "=", "[", "]", "for", "pos", ",", "key", "in", "enumerate", "(", "nest_spec", ")", ":", "if", "len", "(", "nest_spec", "[", "key", "]", ")", "==", "1", ":", "degenerate_positions", ".", "append", "(", "pos", ")", "return", "degenerate_positions" ]
Identify the nests within nest_spec that are degenerate, i.e. those nests with only a single alternative within the nest. Parameters ---------- nest_spec : OrderedDict. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Returns ------- list. Will contain the positions in the list of keys from `nest_spec` that are degenerate.
[ "Identify", "the", "nests", "within", "nest_spec", "that", "are", "degenerate", "i", ".", "e", ".", "those", "nests", "with", "only", "a", "single", "alternative", "within", "the", "nest", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/nested_logit.py#L36-L58
860
timothyb0912/pylogit
pylogit/nested_logit.py
NestedEstimator.check_length_of_initial_values
def check_length_of_initial_values(self, init_values): """ Ensures that the initial values are of the correct length. """ # Figure out how many shape parameters we should have and how many # index coefficients we should have num_nests = self.rows_to_nests.shape[1] num_index_coefs = self.design.shape[1] assumed_param_dimensions = num_index_coefs + num_nests if init_values.shape[0] != assumed_param_dimensions: msg = "The initial values are of the wrong dimension" msg_1 = "It should be of dimension {}" msg_2 = "But instead it has dimension {}" raise ValueError(msg + msg_1.format(assumed_param_dimensions) + msg_2.format(init_values.shape[0])) return None
python
def check_length_of_initial_values(self, init_values): """ Ensures that the initial values are of the correct length. """ # Figure out how many shape parameters we should have and how many # index coefficients we should have num_nests = self.rows_to_nests.shape[1] num_index_coefs = self.design.shape[1] assumed_param_dimensions = num_index_coefs + num_nests if init_values.shape[0] != assumed_param_dimensions: msg = "The initial values are of the wrong dimension" msg_1 = "It should be of dimension {}" msg_2 = "But instead it has dimension {}" raise ValueError(msg + msg_1.format(assumed_param_dimensions) + msg_2.format(init_values.shape[0])) return None
[ "def", "check_length_of_initial_values", "(", "self", ",", "init_values", ")", ":", "# Figure out how many shape parameters we should have and how many", "# index coefficients we should have", "num_nests", "=", "self", ".", "rows_to_nests", ".", "shape", "[", "1", "]", "num_index_coefs", "=", "self", ".", "design", ".", "shape", "[", "1", "]", "assumed_param_dimensions", "=", "num_index_coefs", "+", "num_nests", "if", "init_values", ".", "shape", "[", "0", "]", "!=", "assumed_param_dimensions", ":", "msg", "=", "\"The initial values are of the wrong dimension\"", "msg_1", "=", "\"It should be of dimension {}\"", "msg_2", "=", "\"But instead it has dimension {}\"", "raise", "ValueError", "(", "msg", "+", "msg_1", ".", "format", "(", "assumed_param_dimensions", ")", "+", "msg_2", ".", "format", "(", "init_values", ".", "shape", "[", "0", "]", ")", ")", "return", "None" ]
Ensures that the initial values are of the correct length.
[ "Ensures", "that", "the", "initial", "values", "are", "of", "the", "correct", "length", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/nested_logit.py#L177-L195
861
timothyb0912/pylogit
pylogit/nested_logit.py
NestedEstimator.convenience_split_params
def convenience_split_params(self, params, return_all_types=False): """ Splits parameter vector into nest parameters and index parameters. Parameters ---------- all_params : 1D ndarray. Should contain all of the parameters being estimated (i.e. all the nest coefficients and all of the index coefficients). All elements should be ints, floats, or longs. rows_to_nests : 2D scipy sparse array. There should be one row per observation per available alternative and one column per nest. This matrix maps the rows of the design matrix to the unique nests (on the columns). return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 2 elements will be returned, as described below. The tuple will contain the nest parameters and the index coefficients. Returns ------- orig_nest_coefs : 1D ndarray. The nest coefficients being used for estimation. Note that these values are the logit of the inverse of the scale parameters for each lower level nest. index_coefs : 1D ndarray. The index coefficients of this nested logit model. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters. """ return split_param_vec(params, self.rows_to_nests, return_all_types=return_all_types)
python
def convenience_split_params(self, params, return_all_types=False): """ Splits parameter vector into nest parameters and index parameters. Parameters ---------- all_params : 1D ndarray. Should contain all of the parameters being estimated (i.e. all the nest coefficients and all of the index coefficients). All elements should be ints, floats, or longs. rows_to_nests : 2D scipy sparse array. There should be one row per observation per available alternative and one column per nest. This matrix maps the rows of the design matrix to the unique nests (on the columns). return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 2 elements will be returned, as described below. The tuple will contain the nest parameters and the index coefficients. Returns ------- orig_nest_coefs : 1D ndarray. The nest coefficients being used for estimation. Note that these values are the logit of the inverse of the scale parameters for each lower level nest. index_coefs : 1D ndarray. The index coefficients of this nested logit model. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters. """ return split_param_vec(params, self.rows_to_nests, return_all_types=return_all_types)
[ "def", "convenience_split_params", "(", "self", ",", "params", ",", "return_all_types", "=", "False", ")", ":", "return", "split_param_vec", "(", "params", ",", "self", ".", "rows_to_nests", ",", "return_all_types", "=", "return_all_types", ")" ]
Splits parameter vector into nest parameters and index parameters. Parameters ---------- all_params : 1D ndarray. Should contain all of the parameters being estimated (i.e. all the nest coefficients and all of the index coefficients). All elements should be ints, floats, or longs. rows_to_nests : 2D scipy sparse array. There should be one row per observation per available alternative and one column per nest. This matrix maps the rows of the design matrix to the unique nests (on the columns). return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 2 elements will be returned, as described below. The tuple will contain the nest parameters and the index coefficients. Returns ------- orig_nest_coefs : 1D ndarray. The nest coefficients being used for estimation. Note that these values are the logit of the inverse of the scale parameters for each lower level nest. index_coefs : 1D ndarray. The index coefficients of this nested logit model. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters.
[ "Splits", "parameter", "vector", "into", "nest", "parameters", "and", "index", "parameters", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/nested_logit.py#L197-L236
862
timothyb0912/pylogit
pylogit/choice_calcs.py
robust_outer_product
def robust_outer_product(vec_1, vec_2): """ Calculates a 'robust' outer product of two vectors that may or may not contain very small values. Parameters ---------- vec_1 : 1D ndarray vec_2 : 1D ndarray Returns ------- outer_prod : 2D ndarray. The outer product of vec_1 and vec_2 """ mantissa_1, exponents_1 = np.frexp(vec_1) mantissa_2, exponents_2 = np.frexp(vec_2) new_mantissas = mantissa_1[None, :] * mantissa_2[:, None] new_exponents = exponents_1[None, :] + exponents_2[:, None] return new_mantissas * np.exp2(new_exponents)
python
def robust_outer_product(vec_1, vec_2): """ Calculates a 'robust' outer product of two vectors that may or may not contain very small values. Parameters ---------- vec_1 : 1D ndarray vec_2 : 1D ndarray Returns ------- outer_prod : 2D ndarray. The outer product of vec_1 and vec_2 """ mantissa_1, exponents_1 = np.frexp(vec_1) mantissa_2, exponents_2 = np.frexp(vec_2) new_mantissas = mantissa_1[None, :] * mantissa_2[:, None] new_exponents = exponents_1[None, :] + exponents_2[:, None] return new_mantissas * np.exp2(new_exponents)
[ "def", "robust_outer_product", "(", "vec_1", ",", "vec_2", ")", ":", "mantissa_1", ",", "exponents_1", "=", "np", ".", "frexp", "(", "vec_1", ")", "mantissa_2", ",", "exponents_2", "=", "np", ".", "frexp", "(", "vec_2", ")", "new_mantissas", "=", "mantissa_1", "[", "None", ",", ":", "]", "*", "mantissa_2", "[", ":", ",", "None", "]", "new_exponents", "=", "exponents_1", "[", "None", ",", ":", "]", "+", "exponents_2", "[", ":", ",", "None", "]", "return", "new_mantissas", "*", "np", ".", "exp2", "(", "new_exponents", ")" ]
Calculates a 'robust' outer product of two vectors that may or may not contain very small values. Parameters ---------- vec_1 : 1D ndarray vec_2 : 1D ndarray Returns ------- outer_prod : 2D ndarray. The outer product of vec_1 and vec_2
[ "Calculates", "a", "robust", "outer", "product", "of", "two", "vectors", "that", "may", "or", "may", "not", "contain", "very", "small", "values", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/choice_calcs.py#L523-L541
863
timothyb0912/pylogit
pylogit/bootstrap_calcs.py
calc_percentile_interval
def calc_percentile_interval(bootstrap_replicates, conf_percentage): """ Calculate bootstrap confidence intervals based on raw percentiles of the bootstrap distribution of samples. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 12.5 and Section 13.3. See Equation 13.3. Notes ----- This function differs slightly from the actual percentile bootstrap procedure described in Efron and Tibshirani (1994). To ensure that the returned endpoints of one's bootstrap confidence intervals are actual values that were observed in the bootstrap distribution, both the procedure of Efron and Tibshirani and this function make more conservative confidence intervals. However, this function uses a simpler (and in some cases less conservative) correction than that of Efron and Tibshirani. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) ensure_samples_is_ndim_ndarray(bootstrap_replicates, ndim=2) # Get the alpha * 100% value alpha = get_alpha_from_conf_percentage(conf_percentage) # Get the lower and upper percentiles that demarcate the desired interval. lower_percent = alpha / 2.0 upper_percent = 100.0 - lower_percent # Calculate the lower and upper endpoints of the confidence intervals. # Note that the particular choices of interpolation methods are made in # order to produce conservatively wide confidence intervals and ensure that # all returned endpoints in the confidence intervals are actually observed # in the bootstrap distribution. This is in accordance with the spirit of # Efron and Tibshirani (1994). lower_endpoint = np.percentile(bootstrap_replicates, lower_percent, interpolation='lower', axis=0) upper_endpoint = np.percentile(bootstrap_replicates, upper_percent, interpolation='higher', axis=0) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoint, upper_endpoint) return conf_intervals
python
def calc_percentile_interval(bootstrap_replicates, conf_percentage): """ Calculate bootstrap confidence intervals based on raw percentiles of the bootstrap distribution of samples. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 12.5 and Section 13.3. See Equation 13.3. Notes ----- This function differs slightly from the actual percentile bootstrap procedure described in Efron and Tibshirani (1994). To ensure that the returned endpoints of one's bootstrap confidence intervals are actual values that were observed in the bootstrap distribution, both the procedure of Efron and Tibshirani and this function make more conservative confidence intervals. However, this function uses a simpler (and in some cases less conservative) correction than that of Efron and Tibshirani. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) ensure_samples_is_ndim_ndarray(bootstrap_replicates, ndim=2) # Get the alpha * 100% value alpha = get_alpha_from_conf_percentage(conf_percentage) # Get the lower and upper percentiles that demarcate the desired interval. lower_percent = alpha / 2.0 upper_percent = 100.0 - lower_percent # Calculate the lower and upper endpoints of the confidence intervals. # Note that the particular choices of interpolation methods are made in # order to produce conservatively wide confidence intervals and ensure that # all returned endpoints in the confidence intervals are actually observed # in the bootstrap distribution. This is in accordance with the spirit of # Efron and Tibshirani (1994). lower_endpoint = np.percentile(bootstrap_replicates, lower_percent, interpolation='lower', axis=0) upper_endpoint = np.percentile(bootstrap_replicates, upper_percent, interpolation='higher', axis=0) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoint, upper_endpoint) return conf_intervals
[ "def", "calc_percentile_interval", "(", "bootstrap_replicates", ",", "conf_percentage", ")", ":", "# Check validity of arguments", "check_conf_percentage_validity", "(", "conf_percentage", ")", "ensure_samples_is_ndim_ndarray", "(", "bootstrap_replicates", ",", "ndim", "=", "2", ")", "# Get the alpha * 100% value", "alpha", "=", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Get the lower and upper percentiles that demarcate the desired interval.", "lower_percent", "=", "alpha", "/", "2.0", "upper_percent", "=", "100.0", "-", "lower_percent", "# Calculate the lower and upper endpoints of the confidence intervals.", "# Note that the particular choices of interpolation methods are made in", "# order to produce conservatively wide confidence intervals and ensure that", "# all returned endpoints in the confidence intervals are actually observed", "# in the bootstrap distribution. This is in accordance with the spirit of", "# Efron and Tibshirani (1994).", "lower_endpoint", "=", "np", ".", "percentile", "(", "bootstrap_replicates", ",", "lower_percent", ",", "interpolation", "=", "'lower'", ",", "axis", "=", "0", ")", "upper_endpoint", "=", "np", ".", "percentile", "(", "bootstrap_replicates", ",", "upper_percent", ",", "interpolation", "=", "'higher'", ",", "axis", "=", "0", ")", "# Combine the enpoints into a single ndarray.", "conf_intervals", "=", "combine_conf_endpoints", "(", "lower_endpoint", ",", "upper_endpoint", ")", "return", "conf_intervals" ]
Calculate bootstrap confidence intervals based on raw percentiles of the bootstrap distribution of samples. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 12.5 and Section 13.3. See Equation 13.3. Notes ----- This function differs slightly from the actual percentile bootstrap procedure described in Efron and Tibshirani (1994). To ensure that the returned endpoints of one's bootstrap confidence intervals are actual values that were observed in the bootstrap distribution, both the procedure of Efron and Tibshirani and this function make more conservative confidence intervals. However, this function uses a simpler (and in some cases less conservative) correction than that of Efron and Tibshirani.
[ "Calculate", "bootstrap", "confidence", "intervals", "based", "on", "raw", "percentiles", "of", "the", "bootstrap", "distribution", "of", "samples", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_calcs.py#L20-L83
864
timothyb0912/pylogit
pylogit/bootstrap_calcs.py
calc_bca_interval
def calc_bca_interval(bootstrap_replicates, jackknife_replicates, mle_params, conf_percentage): """ Calculate 'bias-corrected and accelerated' bootstrap confidence intervals. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. jackknife_replicates : 2D ndarray. Each row should correspond to a different jackknife parameter sample, formed by deleting a particular observation and then re-estimating the desired model. Each column should correspond to an element of the parameter vector being estimated. mle_params : 1D ndarray. The original dataset's maximum likelihood point estimate. Should have the same number of elements as `samples.shape[1]`. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) ensure_samples_is_ndim_ndarray(bootstrap_replicates, ndim=2) ensure_samples_is_ndim_ndarray(jackknife_replicates, name='jackknife', ndim=2) # Calculate the alpha * 100% value alpha_percent = get_alpha_from_conf_percentage(conf_percentage) # Estimate the bias correction for the bootstrap samples bias_correction =\ calc_bias_correction_bca(bootstrap_replicates, mle_params) # Estimate the acceleration acceleration = calc_acceleration_bca(jackknife_replicates) # Get the lower and upper percent value for the raw bootstrap samples. lower_percents =\ calc_lower_bca_percentile(alpha_percent, bias_correction, acceleration) upper_percents =\ calc_upper_bca_percentile(alpha_percent, bias_correction, acceleration) # Get the lower and upper endpoints for the desired confidence intervals. lower_endpoints = np.diag(np.percentile(bootstrap_replicates, lower_percents, interpolation='lower', axis=0)) upper_endpoints = np.diag(np.percentile(bootstrap_replicates, upper_percents, interpolation='higher', axis=0)) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoints, upper_endpoints) return conf_intervals
python
def calc_bca_interval(bootstrap_replicates, jackknife_replicates, mle_params, conf_percentage): """ Calculate 'bias-corrected and accelerated' bootstrap confidence intervals. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. jackknife_replicates : 2D ndarray. Each row should correspond to a different jackknife parameter sample, formed by deleting a particular observation and then re-estimating the desired model. Each column should correspond to an element of the parameter vector being estimated. mle_params : 1D ndarray. The original dataset's maximum likelihood point estimate. Should have the same number of elements as `samples.shape[1]`. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212. """ # Check validity of arguments check_conf_percentage_validity(conf_percentage) ensure_samples_is_ndim_ndarray(bootstrap_replicates, ndim=2) ensure_samples_is_ndim_ndarray(jackknife_replicates, name='jackknife', ndim=2) # Calculate the alpha * 100% value alpha_percent = get_alpha_from_conf_percentage(conf_percentage) # Estimate the bias correction for the bootstrap samples bias_correction =\ calc_bias_correction_bca(bootstrap_replicates, mle_params) # Estimate the acceleration acceleration = calc_acceleration_bca(jackknife_replicates) # Get the lower and upper percent value for the raw bootstrap samples. lower_percents =\ calc_lower_bca_percentile(alpha_percent, bias_correction, acceleration) upper_percents =\ calc_upper_bca_percentile(alpha_percent, bias_correction, acceleration) # Get the lower and upper endpoints for the desired confidence intervals. lower_endpoints = np.diag(np.percentile(bootstrap_replicates, lower_percents, interpolation='lower', axis=0)) upper_endpoints = np.diag(np.percentile(bootstrap_replicates, upper_percents, interpolation='higher', axis=0)) # Combine the enpoints into a single ndarray. conf_intervals = combine_conf_endpoints(lower_endpoints, upper_endpoints) return conf_intervals
[ "def", "calc_bca_interval", "(", "bootstrap_replicates", ",", "jackknife_replicates", ",", "mle_params", ",", "conf_percentage", ")", ":", "# Check validity of arguments", "check_conf_percentage_validity", "(", "conf_percentage", ")", "ensure_samples_is_ndim_ndarray", "(", "bootstrap_replicates", ",", "ndim", "=", "2", ")", "ensure_samples_is_ndim_ndarray", "(", "jackknife_replicates", ",", "name", "=", "'jackknife'", ",", "ndim", "=", "2", ")", "# Calculate the alpha * 100% value", "alpha_percent", "=", "get_alpha_from_conf_percentage", "(", "conf_percentage", ")", "# Estimate the bias correction for the bootstrap samples", "bias_correction", "=", "calc_bias_correction_bca", "(", "bootstrap_replicates", ",", "mle_params", ")", "# Estimate the acceleration", "acceleration", "=", "calc_acceleration_bca", "(", "jackknife_replicates", ")", "# Get the lower and upper percent value for the raw bootstrap samples.", "lower_percents", "=", "calc_lower_bca_percentile", "(", "alpha_percent", ",", "bias_correction", ",", "acceleration", ")", "upper_percents", "=", "calc_upper_bca_percentile", "(", "alpha_percent", ",", "bias_correction", ",", "acceleration", ")", "# Get the lower and upper endpoints for the desired confidence intervals.", "lower_endpoints", "=", "np", ".", "diag", "(", "np", ".", "percentile", "(", "bootstrap_replicates", ",", "lower_percents", ",", "interpolation", "=", "'lower'", ",", "axis", "=", "0", ")", ")", "upper_endpoints", "=", "np", ".", "diag", "(", "np", ".", "percentile", "(", "bootstrap_replicates", ",", "upper_percents", ",", "interpolation", "=", "'higher'", ",", "axis", "=", "0", ")", ")", "# Combine the enpoints into a single ndarray.", "conf_intervals", "=", "combine_conf_endpoints", "(", "lower_endpoints", ",", "upper_endpoints", ")", "return", "conf_intervals" ]
Calculate 'bias-corrected and accelerated' bootstrap confidence intervals. Parameters ---------- bootstrap_replicates : 2D ndarray. Each row should correspond to a different bootstrap parameter sample. Each column should correspond to an element of the parameter vector being estimated. jackknife_replicates : 2D ndarray. Each row should correspond to a different jackknife parameter sample, formed by deleting a particular observation and then re-estimating the desired model. Each column should correspond to an element of the parameter vector being estimated. mle_params : 1D ndarray. The original dataset's maximum likelihood point estimate. Should have the same number of elements as `samples.shape[1]`. conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level of the returned confidence interval. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- conf_intervals : 2D ndarray. The shape of the returned array will be `(2, samples.shape[1])`. The first row will correspond to the lower value in the confidence interval. The second row will correspond to the upper value in the confidence interval. There will be one column for each element of the parameter vector being estimated. References ---------- Efron, Bradley, and Robert J. Tibshirani. An Introduction to the Bootstrap. CRC press, 1994. Section 14.3. DiCiccio, Thomas J., and Bradley Efron. "Bootstrap confidence intervals." Statistical science (1996): 189-212.
[ "Calculate", "bias", "-", "corrected", "and", "accelerated", "bootstrap", "confidence", "intervals", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_calcs.py#L254-L323
865
timothyb0912/pylogit
pylogit/bootstrap_mle.py
extract_default_init_vals
def extract_default_init_vals(orig_model_obj, mnl_point_series, num_params): """ Get the default initial values for the desired model type, based on the point estimate of the MNL model that is 'closest' to the desired model. Parameters ---------- orig_model_obj : an instance or sublcass of the MNDC class. Should correspond to the actual model that we want to bootstrap. mnl_point_series : pandas Series. Should denote the point estimate from the MNL model that is 'closest' to the desired model. num_params : int. Should denote the number of parameters being estimated (including any parameters that are being constrained during estimation). Returns ------- init_vals : 1D ndarray of initial values for the MLE of the desired model. """ # Initialize the initial values init_vals = np.zeros(num_params, dtype=float) # Figure out which values in mnl_point_series are the index coefficients no_outside_intercepts = orig_model_obj.intercept_names is None if no_outside_intercepts: init_index_coefs = mnl_point_series.values init_intercepts = None else: init_index_coefs =\ mnl_point_series.loc[orig_model_obj.ind_var_names].values init_intercepts =\ mnl_point_series.loc[orig_model_obj.intercept_names].values # Add any mixing variables to the index coefficients. if orig_model_obj.mixing_vars is not None: num_mixing_vars = len(orig_model_obj.mixing_vars) init_index_coefs = np.concatenate([init_index_coefs, np.zeros(num_mixing_vars)], axis=0) # Account for the special transformation of the index coefficients that is # needed for the asymmetric logit model. if orig_model_obj.model_type == model_type_to_display_name["Asym"]: multiplier = np.log(len(np.unique(orig_model_obj.alt_IDs))) # Cast the initial index coefficients to a float dtype to ensure # successful broadcasting init_index_coefs = init_index_coefs.astype(float) # Adjust the scale of the index coefficients for the asymmetric logit. init_index_coefs /= multiplier # Combine the initial interept values with the initial index coefficients if init_intercepts is not None: init_index_coefs =\ np.concatenate([init_intercepts, init_index_coefs], axis=0) # Add index coefficients (and mixing variables) to the total initial array num_index = init_index_coefs.shape[0] init_vals[-1 * num_index:] = init_index_coefs # Note that the initial values for the transformed nest coefficients and # the shape parameters is zero so we don't have to change anything return init_vals
python
def extract_default_init_vals(orig_model_obj, mnl_point_series, num_params): """ Get the default initial values for the desired model type, based on the point estimate of the MNL model that is 'closest' to the desired model. Parameters ---------- orig_model_obj : an instance or sublcass of the MNDC class. Should correspond to the actual model that we want to bootstrap. mnl_point_series : pandas Series. Should denote the point estimate from the MNL model that is 'closest' to the desired model. num_params : int. Should denote the number of parameters being estimated (including any parameters that are being constrained during estimation). Returns ------- init_vals : 1D ndarray of initial values for the MLE of the desired model. """ # Initialize the initial values init_vals = np.zeros(num_params, dtype=float) # Figure out which values in mnl_point_series are the index coefficients no_outside_intercepts = orig_model_obj.intercept_names is None if no_outside_intercepts: init_index_coefs = mnl_point_series.values init_intercepts = None else: init_index_coefs =\ mnl_point_series.loc[orig_model_obj.ind_var_names].values init_intercepts =\ mnl_point_series.loc[orig_model_obj.intercept_names].values # Add any mixing variables to the index coefficients. if orig_model_obj.mixing_vars is not None: num_mixing_vars = len(orig_model_obj.mixing_vars) init_index_coefs = np.concatenate([init_index_coefs, np.zeros(num_mixing_vars)], axis=0) # Account for the special transformation of the index coefficients that is # needed for the asymmetric logit model. if orig_model_obj.model_type == model_type_to_display_name["Asym"]: multiplier = np.log(len(np.unique(orig_model_obj.alt_IDs))) # Cast the initial index coefficients to a float dtype to ensure # successful broadcasting init_index_coefs = init_index_coefs.astype(float) # Adjust the scale of the index coefficients for the asymmetric logit. init_index_coefs /= multiplier # Combine the initial interept values with the initial index coefficients if init_intercepts is not None: init_index_coefs =\ np.concatenate([init_intercepts, init_index_coefs], axis=0) # Add index coefficients (and mixing variables) to the total initial array num_index = init_index_coefs.shape[0] init_vals[-1 * num_index:] = init_index_coefs # Note that the initial values for the transformed nest coefficients and # the shape parameters is zero so we don't have to change anything return init_vals
[ "def", "extract_default_init_vals", "(", "orig_model_obj", ",", "mnl_point_series", ",", "num_params", ")", ":", "# Initialize the initial values", "init_vals", "=", "np", ".", "zeros", "(", "num_params", ",", "dtype", "=", "float", ")", "# Figure out which values in mnl_point_series are the index coefficients", "no_outside_intercepts", "=", "orig_model_obj", ".", "intercept_names", "is", "None", "if", "no_outside_intercepts", ":", "init_index_coefs", "=", "mnl_point_series", ".", "values", "init_intercepts", "=", "None", "else", ":", "init_index_coefs", "=", "mnl_point_series", ".", "loc", "[", "orig_model_obj", ".", "ind_var_names", "]", ".", "values", "init_intercepts", "=", "mnl_point_series", ".", "loc", "[", "orig_model_obj", ".", "intercept_names", "]", ".", "values", "# Add any mixing variables to the index coefficients.", "if", "orig_model_obj", ".", "mixing_vars", "is", "not", "None", ":", "num_mixing_vars", "=", "len", "(", "orig_model_obj", ".", "mixing_vars", ")", "init_index_coefs", "=", "np", ".", "concatenate", "(", "[", "init_index_coefs", ",", "np", ".", "zeros", "(", "num_mixing_vars", ")", "]", ",", "axis", "=", "0", ")", "# Account for the special transformation of the index coefficients that is", "# needed for the asymmetric logit model.", "if", "orig_model_obj", ".", "model_type", "==", "model_type_to_display_name", "[", "\"Asym\"", "]", ":", "multiplier", "=", "np", ".", "log", "(", "len", "(", "np", ".", "unique", "(", "orig_model_obj", ".", "alt_IDs", ")", ")", ")", "# Cast the initial index coefficients to a float dtype to ensure", "# successful broadcasting", "init_index_coefs", "=", "init_index_coefs", ".", "astype", "(", "float", ")", "# Adjust the scale of the index coefficients for the asymmetric logit.", "init_index_coefs", "/=", "multiplier", "# Combine the initial interept values with the initial index coefficients", "if", "init_intercepts", "is", "not", "None", ":", "init_index_coefs", "=", "np", ".", "concatenate", "(", "[", "init_intercepts", ",", "init_index_coefs", "]", ",", "axis", "=", "0", ")", "# Add index coefficients (and mixing variables) to the total initial array", "num_index", "=", "init_index_coefs", ".", "shape", "[", "0", "]", "init_vals", "[", "-", "1", "*", "num_index", ":", "]", "=", "init_index_coefs", "# Note that the initial values for the transformed nest coefficients and", "# the shape parameters is zero so we don't have to change anything", "return", "init_vals" ]
Get the default initial values for the desired model type, based on the point estimate of the MNL model that is 'closest' to the desired model. Parameters ---------- orig_model_obj : an instance or sublcass of the MNDC class. Should correspond to the actual model that we want to bootstrap. mnl_point_series : pandas Series. Should denote the point estimate from the MNL model that is 'closest' to the desired model. num_params : int. Should denote the number of parameters being estimated (including any parameters that are being constrained during estimation). Returns ------- init_vals : 1D ndarray of initial values for the MLE of the desired model.
[ "Get", "the", "default", "initial", "values", "for", "the", "desired", "model", "type", "based", "on", "the", "point", "estimate", "of", "the", "MNL", "model", "that", "is", "closest", "to", "the", "desired", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_mle.py#L14-L75
866
timothyb0912/pylogit
pylogit/bootstrap_mle.py
get_model_abbrev
def get_model_abbrev(model_obj): """ Extract the string used to specify the model type of this model object in `pylogit.create_chohice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- str. The internal abbreviation used for the particular type of MNDC_Model. """ # Get the 'display name' for our model. model_type = model_obj.model_type # Find the model abbreviation for this model's display name. for key in model_type_to_display_name: if model_type_to_display_name[key] == model_type: return key # If none of the strings in model_type_to_display_name matches our model # object, then raise an error. msg = "Model object has an unknown or incorrect model type." raise ValueError(msg)
python
def get_model_abbrev(model_obj): """ Extract the string used to specify the model type of this model object in `pylogit.create_chohice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- str. The internal abbreviation used for the particular type of MNDC_Model. """ # Get the 'display name' for our model. model_type = model_obj.model_type # Find the model abbreviation for this model's display name. for key in model_type_to_display_name: if model_type_to_display_name[key] == model_type: return key # If none of the strings in model_type_to_display_name matches our model # object, then raise an error. msg = "Model object has an unknown or incorrect model type." raise ValueError(msg)
[ "def", "get_model_abbrev", "(", "model_obj", ")", ":", "# Get the 'display name' for our model.", "model_type", "=", "model_obj", ".", "model_type", "# Find the model abbreviation for this model's display name.", "for", "key", "in", "model_type_to_display_name", ":", "if", "model_type_to_display_name", "[", "key", "]", "==", "model_type", ":", "return", "key", "# If none of the strings in model_type_to_display_name matches our model", "# object, then raise an error.", "msg", "=", "\"Model object has an unknown or incorrect model type.\"", "raise", "ValueError", "(", "msg", ")" ]
Extract the string used to specify the model type of this model object in `pylogit.create_chohice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- str. The internal abbreviation used for the particular type of MNDC_Model.
[ "Extract", "the", "string", "used", "to", "specify", "the", "model", "type", "of", "this", "model", "object", "in", "pylogit", ".", "create_chohice_model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_mle.py#L78-L100
867
timothyb0912/pylogit
pylogit/bootstrap_mle.py
get_model_creation_kwargs
def get_model_creation_kwargs(model_obj): """ Get a dictionary of the keyword arguments needed to create the passed model object using `pylogit.create_choice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- model_kwargs : dict. Contains the keyword arguments and the required values that are needed to initialize a replica of `model_obj`. """ # Extract the model abbreviation for this model model_abbrev = get_model_abbrev(model_obj) # Create a dictionary to store the keyword arguments needed to Initialize # the new model object.d model_kwargs = {"model_type": model_abbrev, "names": model_obj.name_spec, "intercept_names": model_obj.intercept_names, "intercept_ref_pos": model_obj.intercept_ref_position, "shape_names": model_obj.shape_names, "shape_ref_pos": model_obj.shape_ref_position, "nest_spec": model_obj.nest_spec, "mixing_vars": model_obj.mixing_vars, "mixing_id_col": model_obj.mixing_id_col} return model_kwargs
python
def get_model_creation_kwargs(model_obj): """ Get a dictionary of the keyword arguments needed to create the passed model object using `pylogit.create_choice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- model_kwargs : dict. Contains the keyword arguments and the required values that are needed to initialize a replica of `model_obj`. """ # Extract the model abbreviation for this model model_abbrev = get_model_abbrev(model_obj) # Create a dictionary to store the keyword arguments needed to Initialize # the new model object.d model_kwargs = {"model_type": model_abbrev, "names": model_obj.name_spec, "intercept_names": model_obj.intercept_names, "intercept_ref_pos": model_obj.intercept_ref_position, "shape_names": model_obj.shape_names, "shape_ref_pos": model_obj.shape_ref_position, "nest_spec": model_obj.nest_spec, "mixing_vars": model_obj.mixing_vars, "mixing_id_col": model_obj.mixing_id_col} return model_kwargs
[ "def", "get_model_creation_kwargs", "(", "model_obj", ")", ":", "# Extract the model abbreviation for this model", "model_abbrev", "=", "get_model_abbrev", "(", "model_obj", ")", "# Create a dictionary to store the keyword arguments needed to Initialize", "# the new model object.d", "model_kwargs", "=", "{", "\"model_type\"", ":", "model_abbrev", ",", "\"names\"", ":", "model_obj", ".", "name_spec", ",", "\"intercept_names\"", ":", "model_obj", ".", "intercept_names", ",", "\"intercept_ref_pos\"", ":", "model_obj", ".", "intercept_ref_position", ",", "\"shape_names\"", ":", "model_obj", ".", "shape_names", ",", "\"shape_ref_pos\"", ":", "model_obj", ".", "shape_ref_position", ",", "\"nest_spec\"", ":", "model_obj", ".", "nest_spec", ",", "\"mixing_vars\"", ":", "model_obj", ".", "mixing_vars", ",", "\"mixing_id_col\"", ":", "model_obj", ".", "mixing_id_col", "}", "return", "model_kwargs" ]
Get a dictionary of the keyword arguments needed to create the passed model object using `pylogit.create_choice_model`. Parameters ---------- model_obj : An MNDC_Model instance. Returns ------- model_kwargs : dict. Contains the keyword arguments and the required values that are needed to initialize a replica of `model_obj`.
[ "Get", "a", "dictionary", "of", "the", "keyword", "arguments", "needed", "to", "create", "the", "passed", "model", "object", "using", "pylogit", ".", "create_choice_model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_mle.py#L103-L133
868
timothyb0912/pylogit
pylogit/pylogit.py
ensure_valid_model_type
def ensure_valid_model_type(specified_type, model_type_list): """ Checks to make sure that `specified_type` is in `model_type_list` and raises a helpful error if this is not the case. Parameters ---------- specified_type : str. Denotes the user-specified model type that is to be checked. model_type_list : list of strings. Contains all of the model types that are acceptable kwarg values. Returns ------- None. """ if specified_type not in model_type_list: msg_1 = "The specified model_type was not valid." msg_2 = "Valid model-types are {}".format(model_type_list) msg_3 = "The passed model-type was: {}".format(specified_type) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
python
def ensure_valid_model_type(specified_type, model_type_list): """ Checks to make sure that `specified_type` is in `model_type_list` and raises a helpful error if this is not the case. Parameters ---------- specified_type : str. Denotes the user-specified model type that is to be checked. model_type_list : list of strings. Contains all of the model types that are acceptable kwarg values. Returns ------- None. """ if specified_type not in model_type_list: msg_1 = "The specified model_type was not valid." msg_2 = "Valid model-types are {}".format(model_type_list) msg_3 = "The passed model-type was: {}".format(specified_type) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
[ "def", "ensure_valid_model_type", "(", "specified_type", ",", "model_type_list", ")", ":", "if", "specified_type", "not", "in", "model_type_list", ":", "msg_1", "=", "\"The specified model_type was not valid.\"", "msg_2", "=", "\"Valid model-types are {}\"", ".", "format", "(", "model_type_list", ")", "msg_3", "=", "\"The passed model-type was: {}\"", ".", "format", "(", "specified_type", ")", "total_msg", "=", "\"\\n\"", ".", "join", "(", "[", "msg_1", ",", "msg_2", ",", "msg_3", "]", ")", "raise", "ValueError", "(", "total_msg", ")", "return", "None" ]
Checks to make sure that `specified_type` is in `model_type_list` and raises a helpful error if this is not the case. Parameters ---------- specified_type : str. Denotes the user-specified model type that is to be checked. model_type_list : list of strings. Contains all of the model types that are acceptable kwarg values. Returns ------- None.
[ "Checks", "to", "make", "sure", "that", "specified_type", "is", "in", "model_type_list", "and", "raises", "a", "helpful", "error", "if", "this", "is", "not", "the", "case", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/pylogit.py#L58-L80
869
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_valid_nums_in_specification_cols
def ensure_valid_nums_in_specification_cols(specification, dataframe): """ Checks whether each column in `specification` contains numeric data, excluding positive or negative infinity and excluding NaN. Raises ValueError if any of the columns do not meet these requirements. Parameters ---------- specification : iterable of column headers in `dataframe`. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None. """ problem_cols = [] for col in specification: # The condition below checks for values that are not floats or integers # This will catch values that are strings. if dataframe[col].dtype.kind not in ['f', 'i', 'u']: problem_cols.append(col) # The condition below checks for positive or negative inifinity values. elif np.isinf(dataframe[col]).any(): problem_cols.append(col) # This condition will check for NaN values. elif np.isnan(dataframe[col]).any(): problem_cols.append(col) if problem_cols != []: msg = "The following columns contain either +/- inifinity values, " msg_2 = "NaN values, or values that are not real numbers " msg_3 = "(e.g. strings):\n{}" total_msg = msg + msg_2 + msg_3 raise ValueError(total_msg.format(problem_cols)) return None
python
def ensure_valid_nums_in_specification_cols(specification, dataframe): """ Checks whether each column in `specification` contains numeric data, excluding positive or negative infinity and excluding NaN. Raises ValueError if any of the columns do not meet these requirements. Parameters ---------- specification : iterable of column headers in `dataframe`. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None. """ problem_cols = [] for col in specification: # The condition below checks for values that are not floats or integers # This will catch values that are strings. if dataframe[col].dtype.kind not in ['f', 'i', 'u']: problem_cols.append(col) # The condition below checks for positive or negative inifinity values. elif np.isinf(dataframe[col]).any(): problem_cols.append(col) # This condition will check for NaN values. elif np.isnan(dataframe[col]).any(): problem_cols.append(col) if problem_cols != []: msg = "The following columns contain either +/- inifinity values, " msg_2 = "NaN values, or values that are not real numbers " msg_3 = "(e.g. strings):\n{}" total_msg = msg + msg_2 + msg_3 raise ValueError(total_msg.format(problem_cols)) return None
[ "def", "ensure_valid_nums_in_specification_cols", "(", "specification", ",", "dataframe", ")", ":", "problem_cols", "=", "[", "]", "for", "col", "in", "specification", ":", "# The condition below checks for values that are not floats or integers", "# This will catch values that are strings.", "if", "dataframe", "[", "col", "]", ".", "dtype", ".", "kind", "not", "in", "[", "'f'", ",", "'i'", ",", "'u'", "]", ":", "problem_cols", ".", "append", "(", "col", ")", "# The condition below checks for positive or negative inifinity values.", "elif", "np", ".", "isinf", "(", "dataframe", "[", "col", "]", ")", ".", "any", "(", ")", ":", "problem_cols", ".", "append", "(", "col", ")", "# This condition will check for NaN values.", "elif", "np", ".", "isnan", "(", "dataframe", "[", "col", "]", ")", ".", "any", "(", ")", ":", "problem_cols", ".", "append", "(", "col", ")", "if", "problem_cols", "!=", "[", "]", ":", "msg", "=", "\"The following columns contain either +/- inifinity values, \"", "msg_2", "=", "\"NaN values, or values that are not real numbers \"", "msg_3", "=", "\"(e.g. strings):\\n{}\"", "total_msg", "=", "msg", "+", "msg_2", "+", "msg_3", "raise", "ValueError", "(", "total_msg", ".", "format", "(", "problem_cols", ")", ")", "return", "None" ]
Checks whether each column in `specification` contains numeric data, excluding positive or negative infinity and excluding NaN. Raises ValueError if any of the columns do not meet these requirements. Parameters ---------- specification : iterable of column headers in `dataframe`. dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None.
[ "Checks", "whether", "each", "column", "in", "specification", "contains", "numeric", "data", "excluding", "positive", "or", "negative", "infinity", "and", "excluding", "NaN", ".", "Raises", "ValueError", "if", "any", "of", "the", "columns", "do", "not", "meet", "these", "requirements", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L60-L96
870
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_length_of_shape_or_intercept_names
def check_length_of_shape_or_intercept_names(name_list, num_alts, constrained_param, list_title): """ Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None. """ if len(name_list) != (num_alts - constrained_param): msg_1 = "{} is of the wrong length:".format(list_title) msg_2 = "len({}) == {}".format(list_title, len(name_list)) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}".format(correct_length) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
python
def check_length_of_shape_or_intercept_names(name_list, num_alts, constrained_param, list_title): """ Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None. """ if len(name_list) != (num_alts - constrained_param): msg_1 = "{} is of the wrong length:".format(list_title) msg_2 = "len({}) == {}".format(list_title, len(name_list)) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}".format(correct_length) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
[ "def", "check_length_of_shape_or_intercept_names", "(", "name_list", ",", "num_alts", ",", "constrained_param", ",", "list_title", ")", ":", "if", "len", "(", "name_list", ")", "!=", "(", "num_alts", "-", "constrained_param", ")", ":", "msg_1", "=", "\"{} is of the wrong length:\"", ".", "format", "(", "list_title", ")", "msg_2", "=", "\"len({}) == {}\"", ".", "format", "(", "list_title", ",", "len", "(", "name_list", ")", ")", "correct_length", "=", "num_alts", "-", "constrained_param", "msg_3", "=", "\"The correct length is: {}\"", ".", "format", "(", "correct_length", ")", "total_msg", "=", "\"\\n\"", ".", "join", "(", "[", "msg_1", ",", "msg_2", ",", "msg_3", "]", ")", "raise", "ValueError", "(", "total_msg", ")", "return", "None" ]
Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None.
[ "Ensures", "that", "the", "length", "of", "the", "parameter", "names", "matches", "the", "number", "of", "parameters", "that", "will", "be", "estimated", ".", "Will", "raise", "a", "ValueError", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L145-L180
871
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_type_of_nest_spec_keys_and_values
def check_type_of_nest_spec_keys_and_values(nest_spec): """ Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ try: assert all([isinstance(k, str) for k in nest_spec]) assert all([isinstance(nest_spec[k], list) for k in nest_spec]) except AssertionError: msg = "All nest_spec keys/values must be strings/lists." raise TypeError(msg) return None
python
def check_type_of_nest_spec_keys_and_values(nest_spec): """ Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ try: assert all([isinstance(k, str) for k in nest_spec]) assert all([isinstance(nest_spec[k], list) for k in nest_spec]) except AssertionError: msg = "All nest_spec keys/values must be strings/lists." raise TypeError(msg) return None
[ "def", "check_type_of_nest_spec_keys_and_values", "(", "nest_spec", ")", ":", "try", ":", "assert", "all", "(", "[", "isinstance", "(", "k", ",", "str", ")", "for", "k", "in", "nest_spec", "]", ")", "assert", "all", "(", "[", "isinstance", "(", "nest_spec", "[", "k", "]", ",", "list", ")", "for", "k", "in", "nest_spec", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"All nest_spec keys/values must be strings/lists.\"", "raise", "TypeError", "(", "msg", ")", "return", "None" ]
Ensures that the keys and values of `nest_spec` are strings and lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None.
[ "Ensures", "that", "the", "keys", "and", "values", "of", "nest_spec", "are", "strings", "and", "lists", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L183-L207
872
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_for_empty_nests_in_nest_spec
def check_for_empty_nests_in_nest_spec(nest_spec): """ Ensures that the values of `nest_spec` are not empty lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ empty_nests = [] for k in nest_spec: if len(nest_spec[k]) == 0: empty_nests.append(k) if empty_nests != []: msg = "The following nests are INCORRECTLY empty: {}" raise ValueError(msg.format(empty_nests)) return None
python
def check_for_empty_nests_in_nest_spec(nest_spec): """ Ensures that the values of `nest_spec` are not empty lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None. """ empty_nests = [] for k in nest_spec: if len(nest_spec[k]) == 0: empty_nests.append(k) if empty_nests != []: msg = "The following nests are INCORRECTLY empty: {}" raise ValueError(msg.format(empty_nests)) return None
[ "def", "check_for_empty_nests_in_nest_spec", "(", "nest_spec", ")", ":", "empty_nests", "=", "[", "]", "for", "k", "in", "nest_spec", ":", "if", "len", "(", "nest_spec", "[", "k", "]", ")", "==", "0", ":", "empty_nests", ".", "append", "(", "k", ")", "if", "empty_nests", "!=", "[", "]", ":", "msg", "=", "\"The following nests are INCORRECTLY empty: {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "empty_nests", ")", ")", "return", "None" ]
Ensures that the values of `nest_spec` are not empty lists. Raises a helpful ValueError if they are. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. Returns ------- None.
[ "Ensures", "that", "the", "values", "of", "nest_spec", "are", "not", "empty", "lists", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L210-L235
873
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_alt_ids_in_nest_spec_are_ints
def ensure_alt_ids_in_nest_spec_are_ints(nest_spec, list_elements): """ Ensures that the alternative id's in `nest_spec` are integers. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of lists of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None. """ try: assert all([isinstance(x, int) for x in list_elements]) except AssertionError: msg = "All elements of the nest_spec values should be integers" raise ValueError(msg) return None
python
def ensure_alt_ids_in_nest_spec_are_ints(nest_spec, list_elements): """ Ensures that the alternative id's in `nest_spec` are integers. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of lists of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None. """ try: assert all([isinstance(x, int) for x in list_elements]) except AssertionError: msg = "All elements of the nest_spec values should be integers" raise ValueError(msg) return None
[ "def", "ensure_alt_ids_in_nest_spec_are_ints", "(", "nest_spec", ",", "list_elements", ")", ":", "try", ":", "assert", "all", "(", "[", "isinstance", "(", "x", ",", "int", ")", "for", "x", "in", "list_elements", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"All elements of the nest_spec values should be integers\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Ensures that the alternative id's in `nest_spec` are integers. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of lists of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None.
[ "Ensures", "that", "the", "alternative", "id", "s", "in", "nest_spec", "are", "integers", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L238-L264
874
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_alt_ids_are_only_in_one_nest
def ensure_alt_ids_are_only_in_one_nest(nest_spec, list_elements): """ Ensures that the alternative id's in `nest_spec` are only associated with a single nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None. """ try: assert len(set(list_elements)) == len(list_elements) except AssertionError: msg = "Each alternative id should only be in a single nest." raise ValueError(msg) return None
python
def ensure_alt_ids_are_only_in_one_nest(nest_spec, list_elements): """ Ensures that the alternative id's in `nest_spec` are only associated with a single nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None. """ try: assert len(set(list_elements)) == len(list_elements) except AssertionError: msg = "Each alternative id should only be in a single nest." raise ValueError(msg) return None
[ "def", "ensure_alt_ids_are_only_in_one_nest", "(", "nest_spec", ",", "list_elements", ")", ":", "try", ":", "assert", "len", "(", "set", "(", "list_elements", ")", ")", "==", "len", "(", "list_elements", ")", "except", "AssertionError", ":", "msg", "=", "\"Each alternative id should only be in a single nest.\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Ensures that the alternative id's in `nest_spec` are only associated with a single nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. Returns ------- None.
[ "Ensures", "that", "the", "alternative", "id", "s", "in", "nest_spec", "are", "only", "associated", "with", "a", "single", "nest", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L267-L293
875
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_all_alt_ids_have_a_nest
def ensure_all_alt_ids_have_a_nest(nest_spec, list_elements, all_ids): """ Ensures that the alternative id's in `nest_spec` are all associated with a nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None. """ unaccounted_alt_ids = [] for alt_id in all_ids: if alt_id not in list_elements: unaccounted_alt_ids.append(alt_id) if unaccounted_alt_ids != []: msg = "Associate the following alternative ids with a nest: {}" raise ValueError(msg.format(unaccounted_alt_ids)) return None
python
def ensure_all_alt_ids_have_a_nest(nest_spec, list_elements, all_ids): """ Ensures that the alternative id's in `nest_spec` are all associated with a nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None. """ unaccounted_alt_ids = [] for alt_id in all_ids: if alt_id not in list_elements: unaccounted_alt_ids.append(alt_id) if unaccounted_alt_ids != []: msg = "Associate the following alternative ids with a nest: {}" raise ValueError(msg.format(unaccounted_alt_ids)) return None
[ "def", "ensure_all_alt_ids_have_a_nest", "(", "nest_spec", ",", "list_elements", ",", "all_ids", ")", ":", "unaccounted_alt_ids", "=", "[", "]", "for", "alt_id", "in", "all_ids", ":", "if", "alt_id", "not", "in", "list_elements", ":", "unaccounted_alt_ids", ".", "append", "(", "alt_id", ")", "if", "unaccounted_alt_ids", "!=", "[", "]", ":", "msg", "=", "\"Associate the following alternative ids with a nest: {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "unaccounted_alt_ids", ")", ")", "return", "None" ]
Ensures that the alternative id's in `nest_spec` are all associated with a nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None.
[ "Ensures", "that", "the", "alternative", "id", "s", "in", "nest_spec", "are", "all", "associated", "with", "a", "nest", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L296-L327
876
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_nest_alts_are_valid_alts
def ensure_nest_alts_are_valid_alts(nest_spec, list_elements, all_ids): """ Ensures that the alternative id's in `nest_spec` are all in the universal choice set for this dataset. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None. """ invalid_alt_ids = [] for x in list_elements: if x not in all_ids: invalid_alt_ids.append(x) if invalid_alt_ids != []: msg = "The following elements are not in df[alt_id_col]: {}" raise ValueError(msg.format(invalid_alt_ids)) return None
python
def ensure_nest_alts_are_valid_alts(nest_spec, list_elements, all_ids): """ Ensures that the alternative id's in `nest_spec` are all in the universal choice set for this dataset. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None. """ invalid_alt_ids = [] for x in list_elements: if x not in all_ids: invalid_alt_ids.append(x) if invalid_alt_ids != []: msg = "The following elements are not in df[alt_id_col]: {}" raise ValueError(msg.format(invalid_alt_ids)) return None
[ "def", "ensure_nest_alts_are_valid_alts", "(", "nest_spec", ",", "list_elements", ",", "all_ids", ")", ":", "invalid_alt_ids", "=", "[", "]", "for", "x", "in", "list_elements", ":", "if", "x", "not", "in", "all_ids", ":", "invalid_alt_ids", ".", "append", "(", "x", ")", "if", "invalid_alt_ids", "!=", "[", "]", ":", "msg", "=", "\"The following elements are not in df[alt_id_col]: {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "invalid_alt_ids", ")", ")", "return", "None" ]
Ensures that the alternative id's in `nest_spec` are all in the universal choice set for this dataset. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None.
[ "Ensures", "that", "the", "alternative", "id", "s", "in", "nest_spec", "are", "all", "in", "the", "universal", "choice", "set", "for", "this", "dataset", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "not", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L330-L361
877
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_type_and_size_of_param_list
def check_type_and_size_of_param_list(param_list, expected_length): """ Ensure that param_list is a list with the expected length. Raises a helpful ValueError if this is not the case. """ try: assert isinstance(param_list, list) assert len(param_list) == expected_length except AssertionError: msg = "param_list must be a list containing {} elements." raise ValueError(msg.format(expected_length)) return None
python
def check_type_and_size_of_param_list(param_list, expected_length): """ Ensure that param_list is a list with the expected length. Raises a helpful ValueError if this is not the case. """ try: assert isinstance(param_list, list) assert len(param_list) == expected_length except AssertionError: msg = "param_list must be a list containing {} elements." raise ValueError(msg.format(expected_length)) return None
[ "def", "check_type_and_size_of_param_list", "(", "param_list", ",", "expected_length", ")", ":", "try", ":", "assert", "isinstance", "(", "param_list", ",", "list", ")", "assert", "len", "(", "param_list", ")", "==", "expected_length", "except", "AssertionError", ":", "msg", "=", "\"param_list must be a list containing {} elements.\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "expected_length", ")", ")", "return", "None" ]
Ensure that param_list is a list with the expected length. Raises a helpful ValueError if this is not the case.
[ "Ensure", "that", "param_list", "is", "a", "list", "with", "the", "expected", "length", ".", "Raises", "a", "helpful", "ValueError", "if", "this", "is", "not", "the", "case", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L410-L422
878
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_type_of_param_list_elements
def check_type_of_param_list_elements(param_list): """ Ensures that all elements of param_list are ndarrays or None. Raises a helpful ValueError if otherwise. """ try: assert isinstance(param_list[0], np.ndarray) assert all([(x is None or isinstance(x, np.ndarray)) for x in param_list]) except AssertionError: msg = "param_list[0] must be a numpy array." msg_2 = "All other elements must be numpy arrays or None." total_msg = msg + "\n" + msg_2 raise TypeError(total_msg) return None
python
def check_type_of_param_list_elements(param_list): """ Ensures that all elements of param_list are ndarrays or None. Raises a helpful ValueError if otherwise. """ try: assert isinstance(param_list[0], np.ndarray) assert all([(x is None or isinstance(x, np.ndarray)) for x in param_list]) except AssertionError: msg = "param_list[0] must be a numpy array." msg_2 = "All other elements must be numpy arrays or None." total_msg = msg + "\n" + msg_2 raise TypeError(total_msg) return None
[ "def", "check_type_of_param_list_elements", "(", "param_list", ")", ":", "try", ":", "assert", "isinstance", "(", "param_list", "[", "0", "]", ",", "np", ".", "ndarray", ")", "assert", "all", "(", "[", "(", "x", "is", "None", "or", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ")", "for", "x", "in", "param_list", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"param_list[0] must be a numpy array.\"", "msg_2", "=", "\"All other elements must be numpy arrays or None.\"", "total_msg", "=", "msg", "+", "\"\\n\"", "+", "msg_2", "raise", "TypeError", "(", "total_msg", ")", "return", "None" ]
Ensures that all elements of param_list are ndarrays or None. Raises a helpful ValueError if otherwise.
[ "Ensures", "that", "all", "elements", "of", "param_list", "are", "ndarrays", "or", "None", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L425-L440
879
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
check_num_columns_in_param_list_arrays
def check_num_columns_in_param_list_arrays(param_list): """ Ensure that each array in param_list, that is not None, has the same number of columns. Raises a helpful ValueError if otherwise. Parameters ---------- param_list : list of ndarrays or None. Returns ------- None. """ try: num_columns = param_list[0].shape[1] assert all([x is None or (x.shape[1] == num_columns) for x in param_list]) except AssertionError: msg = "param_list arrays should have equal number of columns." raise ValueError(msg) return None
python
def check_num_columns_in_param_list_arrays(param_list): """ Ensure that each array in param_list, that is not None, has the same number of columns. Raises a helpful ValueError if otherwise. Parameters ---------- param_list : list of ndarrays or None. Returns ------- None. """ try: num_columns = param_list[0].shape[1] assert all([x is None or (x.shape[1] == num_columns) for x in param_list]) except AssertionError: msg = "param_list arrays should have equal number of columns." raise ValueError(msg) return None
[ "def", "check_num_columns_in_param_list_arrays", "(", "param_list", ")", ":", "try", ":", "num_columns", "=", "param_list", "[", "0", "]", ".", "shape", "[", "1", "]", "assert", "all", "(", "[", "x", "is", "None", "or", "(", "x", ".", "shape", "[", "1", "]", "==", "num_columns", ")", "for", "x", "in", "param_list", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"param_list arrays should have equal number of columns.\"", "raise", "ValueError", "(", "msg", ")", "return", "None" ]
Ensure that each array in param_list, that is not None, has the same number of columns. Raises a helpful ValueError if otherwise. Parameters ---------- param_list : list of ndarrays or None. Returns ------- None.
[ "Ensure", "that", "each", "array", "in", "param_list", "that", "is", "not", "None", "has", "the", "same", "number", "of", "columns", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L443-L464
880
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
ensure_all_mixing_vars_are_in_the_name_dict
def ensure_all_mixing_vars_are_in_the_name_dict(mixing_vars, name_dict, ind_var_names): """ Ensures that all of the variables listed in `mixing_vars` are present in `ind_var_names`. Raises a helpful ValueError if otherwise. Parameters ---------- mixing_vars : list of strings, or None. Each string denotes a parameter to be treated as a random variable. name_dict : OrderedDict or None. Contains the specification relating column headers in one's data (i.e. the keys of the OrderedDict) to the index coefficients to be estimated based on this data (i.e. the values of each key). ind_var_names : list of strings. Each string denotes an index coefficient (i.e. a beta) to be estimated. Returns ------- None. """ if mixing_vars is None: return None # Determine the strings in mixing_vars that are missing from ind_var_names problem_names = [variable_name for variable_name in mixing_vars if variable_name not in ind_var_names] # Create error messages for the case where we have a name dictionary and # the case where we do not have a name dictionary. msg_0 = "The following parameter names were not in the values of the " msg_1 = "passed name dictionary: \n{}" msg_with_name_dict = msg_0 + msg_1.format(problem_names) msg_2 = "The following paramter names did not match any of the default " msg_3 = "names generated for the parameters to be estimated: \n{}" msg_4 = "The default names that were generated were: \n{}" msg_without_name_dict = (msg_2 + msg_3.format(problem_names) + msg_4.format(ind_var_names)) # Raise a helpful ValueError if any mixing_vars were missing from # ind_var_names if problem_names != []: if name_dict: raise ValueError(msg_with_name_dict) else: raise ValueError(msg_without_name_dict) return None
python
def ensure_all_mixing_vars_are_in_the_name_dict(mixing_vars, name_dict, ind_var_names): """ Ensures that all of the variables listed in `mixing_vars` are present in `ind_var_names`. Raises a helpful ValueError if otherwise. Parameters ---------- mixing_vars : list of strings, or None. Each string denotes a parameter to be treated as a random variable. name_dict : OrderedDict or None. Contains the specification relating column headers in one's data (i.e. the keys of the OrderedDict) to the index coefficients to be estimated based on this data (i.e. the values of each key). ind_var_names : list of strings. Each string denotes an index coefficient (i.e. a beta) to be estimated. Returns ------- None. """ if mixing_vars is None: return None # Determine the strings in mixing_vars that are missing from ind_var_names problem_names = [variable_name for variable_name in mixing_vars if variable_name not in ind_var_names] # Create error messages for the case where we have a name dictionary and # the case where we do not have a name dictionary. msg_0 = "The following parameter names were not in the values of the " msg_1 = "passed name dictionary: \n{}" msg_with_name_dict = msg_0 + msg_1.format(problem_names) msg_2 = "The following paramter names did not match any of the default " msg_3 = "names generated for the parameters to be estimated: \n{}" msg_4 = "The default names that were generated were: \n{}" msg_without_name_dict = (msg_2 + msg_3.format(problem_names) + msg_4.format(ind_var_names)) # Raise a helpful ValueError if any mixing_vars were missing from # ind_var_names if problem_names != []: if name_dict: raise ValueError(msg_with_name_dict) else: raise ValueError(msg_without_name_dict) return None
[ "def", "ensure_all_mixing_vars_are_in_the_name_dict", "(", "mixing_vars", ",", "name_dict", ",", "ind_var_names", ")", ":", "if", "mixing_vars", "is", "None", ":", "return", "None", "# Determine the strings in mixing_vars that are missing from ind_var_names", "problem_names", "=", "[", "variable_name", "for", "variable_name", "in", "mixing_vars", "if", "variable_name", "not", "in", "ind_var_names", "]", "# Create error messages for the case where we have a name dictionary and", "# the case where we do not have a name dictionary.", "msg_0", "=", "\"The following parameter names were not in the values of the \"", "msg_1", "=", "\"passed name dictionary: \\n{}\"", "msg_with_name_dict", "=", "msg_0", "+", "msg_1", ".", "format", "(", "problem_names", ")", "msg_2", "=", "\"The following paramter names did not match any of the default \"", "msg_3", "=", "\"names generated for the parameters to be estimated: \\n{}\"", "msg_4", "=", "\"The default names that were generated were: \\n{}\"", "msg_without_name_dict", "=", "(", "msg_2", "+", "msg_3", ".", "format", "(", "problem_names", ")", "+", "msg_4", ".", "format", "(", "ind_var_names", ")", ")", "# Raise a helpful ValueError if any mixing_vars were missing from", "# ind_var_names", "if", "problem_names", "!=", "[", "]", ":", "if", "name_dict", ":", "raise", "ValueError", "(", "msg_with_name_dict", ")", "else", ":", "raise", "ValueError", "(", "msg_without_name_dict", ")", "return", "None" ]
Ensures that all of the variables listed in `mixing_vars` are present in `ind_var_names`. Raises a helpful ValueError if otherwise. Parameters ---------- mixing_vars : list of strings, or None. Each string denotes a parameter to be treated as a random variable. name_dict : OrderedDict or None. Contains the specification relating column headers in one's data (i.e. the keys of the OrderedDict) to the index coefficients to be estimated based on this data (i.e. the values of each key). ind_var_names : list of strings. Each string denotes an index coefficient (i.e. a beta) to be estimated. Returns ------- None.
[ "Ensures", "that", "all", "of", "the", "variables", "listed", "in", "mixing_vars", "are", "present", "in", "ind_var_names", ".", "Raises", "a", "helpful", "ValueError", "if", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L524-L574
881
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
compute_aic
def compute_aic(model_object): """ Compute the Akaike Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` should be a number, and `model_object.params` should be a pandas Series. Returns ------- aic : float. The AIC for the estimated model. Notes ----- aic = -2 * log_likelihood + 2 * num_estimated_parameters References ---------- Akaike, H. (1974). 'A new look at the statistical identification model', IEEE Transactions on Automatic Control 19, 6: 716-723. """ assert isinstance(model_object.params, pd.Series) assert isinstance(model_object.log_likelihood, Number) return -2 * model_object.log_likelihood + 2 * model_object.params.size
python
def compute_aic(model_object): """ Compute the Akaike Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` should be a number, and `model_object.params` should be a pandas Series. Returns ------- aic : float. The AIC for the estimated model. Notes ----- aic = -2 * log_likelihood + 2 * num_estimated_parameters References ---------- Akaike, H. (1974). 'A new look at the statistical identification model', IEEE Transactions on Automatic Control 19, 6: 716-723. """ assert isinstance(model_object.params, pd.Series) assert isinstance(model_object.log_likelihood, Number) return -2 * model_object.log_likelihood + 2 * model_object.params.size
[ "def", "compute_aic", "(", "model_object", ")", ":", "assert", "isinstance", "(", "model_object", ".", "params", ",", "pd", ".", "Series", ")", "assert", "isinstance", "(", "model_object", ".", "log_likelihood", ",", "Number", ")", "return", "-", "2", "*", "model_object", ".", "log_likelihood", "+", "2", "*", "model_object", ".", "params", ".", "size" ]
Compute the Akaike Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` should be a number, and `model_object.params` should be a pandas Series. Returns ------- aic : float. The AIC for the estimated model. Notes ----- aic = -2 * log_likelihood + 2 * num_estimated_parameters References ---------- Akaike, H. (1974). 'A new look at the statistical identification model', IEEE Transactions on Automatic Control 19, 6: 716-723.
[ "Compute", "the", "Akaike", "Information", "Criteria", "for", "an", "estimated", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L611-L639
882
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
compute_bic
def compute_bic(model_object): """ Compute the Bayesian Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` and `model_object.nobs` should be a number, and `model_object.params` should be a pandas Series. Returns ------- bic : float. The BIC for the estimated model. Notes ----- bic = -2 * log_likelihood + log(num_observations) * num_parameters The original BIC was introduced as (-1 / 2) times the formula above. However, for model comparison purposes, it does not matter if the goodness-of-fit measure is multiplied by a constant across all models being compared. Moreover, the formula used above allows for a common scale between measures such as the AIC, BIC, DIC, etc. References ---------- Schwarz, G. (1978), 'Estimating the dimension of a model', The Annals of Statistics 6, 2: 461–464. """ assert isinstance(model_object.params, pd.Series) assert isinstance(model_object.log_likelihood, Number) assert isinstance(model_object.nobs, Number) log_likelihood = model_object.log_likelihood num_obs = model_object.nobs num_params = model_object.params.size return -2 * log_likelihood + np.log(num_obs) * num_params
python
def compute_bic(model_object): """ Compute the Bayesian Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` and `model_object.nobs` should be a number, and `model_object.params` should be a pandas Series. Returns ------- bic : float. The BIC for the estimated model. Notes ----- bic = -2 * log_likelihood + log(num_observations) * num_parameters The original BIC was introduced as (-1 / 2) times the formula above. However, for model comparison purposes, it does not matter if the goodness-of-fit measure is multiplied by a constant across all models being compared. Moreover, the formula used above allows for a common scale between measures such as the AIC, BIC, DIC, etc. References ---------- Schwarz, G. (1978), 'Estimating the dimension of a model', The Annals of Statistics 6, 2: 461–464. """ assert isinstance(model_object.params, pd.Series) assert isinstance(model_object.log_likelihood, Number) assert isinstance(model_object.nobs, Number) log_likelihood = model_object.log_likelihood num_obs = model_object.nobs num_params = model_object.params.size return -2 * log_likelihood + np.log(num_obs) * num_params
[ "def", "compute_bic", "(", "model_object", ")", ":", "assert", "isinstance", "(", "model_object", ".", "params", ",", "pd", ".", "Series", ")", "assert", "isinstance", "(", "model_object", ".", "log_likelihood", ",", "Number", ")", "assert", "isinstance", "(", "model_object", ".", "nobs", ",", "Number", ")", "log_likelihood", "=", "model_object", ".", "log_likelihood", "num_obs", "=", "model_object", ".", "nobs", "num_params", "=", "model_object", ".", "params", ".", "size", "return", "-", "2", "*", "log_likelihood", "+", "np", ".", "log", "(", "num_obs", ")", "*", "num_params" ]
Compute the Bayesian Information Criteria for an estimated model. Parameters ---------- model_object : an MNDC_Model (multinomial discrete choice model) instance. The model should have already been estimated. `model_object.log_likelihood` and `model_object.nobs` should be a number, and `model_object.params` should be a pandas Series. Returns ------- bic : float. The BIC for the estimated model. Notes ----- bic = -2 * log_likelihood + log(num_observations) * num_parameters The original BIC was introduced as (-1 / 2) times the formula above. However, for model comparison purposes, it does not matter if the goodness-of-fit measure is multiplied by a constant across all models being compared. Moreover, the formula used above allows for a common scale between measures such as the AIC, BIC, DIC, etc. References ---------- Schwarz, G. (1978), 'Estimating the dimension of a model', The Annals of Statistics 6, 2: 461–464.
[ "Compute", "the", "Bayesian", "Information", "Criteria", "for", "an", "estimated", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L642-L681
883
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._create_results_summary
def _create_results_summary(self): """ Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["params", "standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([isinstance(getattr(self, attr), pd.Series) for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.summary = pd.concat((self.params, self.standard_errors, self.tvalues, self.pvalues, self.robust_std_errs, self.robust_t_stats, self.robust_p_vals), axis=1) return None
python
def _create_results_summary(self): """ Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["params", "standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([isinstance(getattr(self, attr), pd.Series) for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.summary = pd.concat((self.params, self.standard_errors, self.tvalues, self.pvalues, self.robust_std_errs, self.robust_t_stats, self.robust_p_vals), axis=1) return None
[ "def", "_create_results_summary", "(", "self", ")", ":", "# Make sure we have all attributes needed to create the results summary", "needed_attributes", "=", "[", "\"params\"", ",", "\"standard_errors\"", ",", "\"tvalues\"", ",", "\"pvalues\"", ",", "\"robust_std_errs\"", ",", "\"robust_t_stats\"", ",", "\"robust_p_vals\"", "]", "try", ":", "assert", "all", "(", "[", "hasattr", "(", "self", ",", "attr", ")", "for", "attr", "in", "needed_attributes", "]", ")", "assert", "all", "(", "[", "isinstance", "(", "getattr", "(", "self", ",", "attr", ")", ",", "pd", ".", "Series", ")", "for", "attr", "in", "needed_attributes", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"Call this function only after setting/calculating all other\"", "msg_2", "=", "\" estimation results attributes\"", "raise", "NotImplementedError", "(", "msg", "+", "msg_2", ")", "self", ".", "summary", "=", "pd", ".", "concat", "(", "(", "self", ".", "params", ",", "self", ".", "standard_errors", ",", "self", ".", "tvalues", ",", "self", ".", "pvalues", ",", "self", ".", "robust_std_errs", ",", "self", ".", "robust_t_stats", ",", "self", ".", "robust_p_vals", ")", ",", "axis", "=", "1", ")", "return", "None" ]
Create the dataframe that displays the estimation results, and store it on the model instance. Returns ------- None.
[ "Create", "the", "dataframe", "that", "displays", "the", "estimation", "results", "and", "store", "it", "on", "the", "model", "instance", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L995-L1029
884
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._record_values_for_fit_summary_and_statsmodels
def _record_values_for_fit_summary_and_statsmodels(self): """ Store the various estimation results that are used to describe how well the estimated model fits the given dataset, and record the values that are needed for the statsmodels estimation results table. All values are stored on the model instance. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["fitted_probs", "params", "log_likelihood", "standard_errors"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([getattr(self, attr) is not None for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) # Record the number of observations self.nobs = self.fitted_probs.shape[0] # This is the number of estimated parameters self.df_model = self.params.shape[0] # The number of observations minus the number of estimated parameters self.df_resid = self.nobs - self.df_model # This is just the log-likelihood. The opaque name is used for # conformance with statsmodels self.llf = self.log_likelihood # This is just a repeat of the standard errors self.bse = self.standard_errors # These are the penalized measures of fit used for model comparison self.aic = compute_aic(self) self.bic = compute_bic(self) return None
python
def _record_values_for_fit_summary_and_statsmodels(self): """ Store the various estimation results that are used to describe how well the estimated model fits the given dataset, and record the values that are needed for the statsmodels estimation results table. All values are stored on the model instance. Returns ------- None. """ # Make sure we have all attributes needed to create the results summary needed_attributes = ["fitted_probs", "params", "log_likelihood", "standard_errors"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([getattr(self, attr) is not None for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) # Record the number of observations self.nobs = self.fitted_probs.shape[0] # This is the number of estimated parameters self.df_model = self.params.shape[0] # The number of observations minus the number of estimated parameters self.df_resid = self.nobs - self.df_model # This is just the log-likelihood. The opaque name is used for # conformance with statsmodels self.llf = self.log_likelihood # This is just a repeat of the standard errors self.bse = self.standard_errors # These are the penalized measures of fit used for model comparison self.aic = compute_aic(self) self.bic = compute_bic(self) return None
[ "def", "_record_values_for_fit_summary_and_statsmodels", "(", "self", ")", ":", "# Make sure we have all attributes needed to create the results summary", "needed_attributes", "=", "[", "\"fitted_probs\"", ",", "\"params\"", ",", "\"log_likelihood\"", ",", "\"standard_errors\"", "]", "try", ":", "assert", "all", "(", "[", "hasattr", "(", "self", ",", "attr", ")", "for", "attr", "in", "needed_attributes", "]", ")", "assert", "all", "(", "[", "getattr", "(", "self", ",", "attr", ")", "is", "not", "None", "for", "attr", "in", "needed_attributes", "]", ")", "except", "AssertionError", ":", "msg", "=", "\"Call this function only after setting/calculating all other\"", "msg_2", "=", "\" estimation results attributes\"", "raise", "NotImplementedError", "(", "msg", "+", "msg_2", ")", "# Record the number of observations", "self", ".", "nobs", "=", "self", ".", "fitted_probs", ".", "shape", "[", "0", "]", "# This is the number of estimated parameters", "self", ".", "df_model", "=", "self", ".", "params", ".", "shape", "[", "0", "]", "# The number of observations minus the number of estimated parameters", "self", ".", "df_resid", "=", "self", ".", "nobs", "-", "self", ".", "df_model", "# This is just the log-likelihood. The opaque name is used for", "# conformance with statsmodels", "self", ".", "llf", "=", "self", ".", "log_likelihood", "# This is just a repeat of the standard errors", "self", ".", "bse", "=", "self", ".", "standard_errors", "# These are the penalized measures of fit used for model comparison", "self", ".", "aic", "=", "compute_aic", "(", "self", ")", "self", ".", "bic", "=", "compute_bic", "(", "self", ")", "return", "None" ]
Store the various estimation results that are used to describe how well the estimated model fits the given dataset, and record the values that are needed for the statsmodels estimation results table. All values are stored on the model instance. Returns ------- None.
[ "Store", "the", "various", "estimation", "results", "that", "are", "used", "to", "describe", "how", "well", "the", "estimated", "model", "fits", "the", "given", "dataset", "and", "record", "the", "values", "that", "are", "needed", "for", "the", "statsmodels", "estimation", "results", "table", ".", "All", "values", "are", "stored", "on", "the", "model", "instance", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1031-L1071
885
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._store_inferential_results
def _store_inferential_results(self, value_array, index_names, attribute_name, series_name=None, column_names=None): """ Store the estimation results that relate to statistical inference, such as parameter estimates, standard errors, p-values, etc. Parameters ---------- value_array : 1D or 2D ndarray. Contains the values that are to be stored on the model instance. index_names : list of strings. Contains the names that are to be displayed on the 'rows' for each value being stored. There should be one element for each value of `value_array.` series_name : string or None, optional. The name of the pandas series being created for `value_array.` This kwarg should be None when `value_array` is a 1D ndarray. attribute_name : string. The attribute name that will be exposed on the model instance and related to the passed `value_array.` column_names : list of strings, or None, optional. Same as `index_names` except that it pertains to the columns of a 2D ndarray. When `value_array` is a 2D ndarray, There should be one element for each column of `value_array.` This kwarg should be None otherwise. Returns ------- None. Stores a pandas series or dataframe on the model instance. """ if len(value_array.shape) == 1: assert series_name is not None new_attribute_value = pd.Series(value_array, index=index_names, name=series_name) elif len(value_array.shape) == 2: assert column_names is not None new_attribute_value = pd.DataFrame(value_array, index=index_names, columns=column_names) setattr(self, attribute_name, new_attribute_value) return None
python
def _store_inferential_results(self, value_array, index_names, attribute_name, series_name=None, column_names=None): """ Store the estimation results that relate to statistical inference, such as parameter estimates, standard errors, p-values, etc. Parameters ---------- value_array : 1D or 2D ndarray. Contains the values that are to be stored on the model instance. index_names : list of strings. Contains the names that are to be displayed on the 'rows' for each value being stored. There should be one element for each value of `value_array.` series_name : string or None, optional. The name of the pandas series being created for `value_array.` This kwarg should be None when `value_array` is a 1D ndarray. attribute_name : string. The attribute name that will be exposed on the model instance and related to the passed `value_array.` column_names : list of strings, or None, optional. Same as `index_names` except that it pertains to the columns of a 2D ndarray. When `value_array` is a 2D ndarray, There should be one element for each column of `value_array.` This kwarg should be None otherwise. Returns ------- None. Stores a pandas series or dataframe on the model instance. """ if len(value_array.shape) == 1: assert series_name is not None new_attribute_value = pd.Series(value_array, index=index_names, name=series_name) elif len(value_array.shape) == 2: assert column_names is not None new_attribute_value = pd.DataFrame(value_array, index=index_names, columns=column_names) setattr(self, attribute_name, new_attribute_value) return None
[ "def", "_store_inferential_results", "(", "self", ",", "value_array", ",", "index_names", ",", "attribute_name", ",", "series_name", "=", "None", ",", "column_names", "=", "None", ")", ":", "if", "len", "(", "value_array", ".", "shape", ")", "==", "1", ":", "assert", "series_name", "is", "not", "None", "new_attribute_value", "=", "pd", ".", "Series", "(", "value_array", ",", "index", "=", "index_names", ",", "name", "=", "series_name", ")", "elif", "len", "(", "value_array", ".", "shape", ")", "==", "2", ":", "assert", "column_names", "is", "not", "None", "new_attribute_value", "=", "pd", ".", "DataFrame", "(", "value_array", ",", "index", "=", "index_names", ",", "columns", "=", "column_names", ")", "setattr", "(", "self", ",", "attribute_name", ",", "new_attribute_value", ")", "return", "None" ]
Store the estimation results that relate to statistical inference, such as parameter estimates, standard errors, p-values, etc. Parameters ---------- value_array : 1D or 2D ndarray. Contains the values that are to be stored on the model instance. index_names : list of strings. Contains the names that are to be displayed on the 'rows' for each value being stored. There should be one element for each value of `value_array.` series_name : string or None, optional. The name of the pandas series being created for `value_array.` This kwarg should be None when `value_array` is a 1D ndarray. attribute_name : string. The attribute name that will be exposed on the model instance and related to the passed `value_array.` column_names : list of strings, or None, optional. Same as `index_names` except that it pertains to the columns of a 2D ndarray. When `value_array` is a 2D ndarray, There should be one element for each column of `value_array.` This kwarg should be None otherwise. Returns ------- None. Stores a pandas series or dataframe on the model instance.
[ "Store", "the", "estimation", "results", "that", "relate", "to", "statistical", "inference", "such", "as", "parameter", "estimates", "standard", "errors", "p", "-", "values", "etc", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1117-L1164
886
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._store_generic_inference_results
def _store_generic_inference_results(self, results_dict, all_params, all_names): """ Store the model inference values that are common to all choice models. This includes things like index coefficients, gradients, hessians, asymptotic covariance matrices, t-values, p-values, and robust versions of these values. Parameters ---------- results_dict : dict. The estimation result dictionary that is output from scipy.optimize.minimize. In addition to the standard keys which are included, it should also contain the following keys: `["utility_coefs", "final_gradient", "final_hessian", "fisher_info"]`. The "final_gradient", "final_hessian", and "fisher_info" values should be the gradient, hessian, and Fisher-Information Matrix of the log likelihood, evaluated at the final parameter vector. all_params : list of 1D ndarrays. Should contain the various types of parameters that were actually estimated. all_names : list of strings. Should contain names of each estimated parameter. Returns ------- None. Stores all results on the model instance. """ # Store the utility coefficients self._store_inferential_results(results_dict["utility_coefs"], index_names=self.ind_var_names, attribute_name="coefs", series_name="coefficients") # Store the gradient self._store_inferential_results(results_dict["final_gradient"], index_names=all_names, attribute_name="gradient", series_name="gradient") # Store the hessian self._store_inferential_results(results_dict["final_hessian"], index_names=all_names, attribute_name="hessian", column_names=all_names) # Store the variance-covariance matrix self._store_inferential_results(-1 * scipy.linalg.inv(self.hessian), index_names=all_names, attribute_name="cov", column_names=all_names) # Store ALL of the estimated parameters self._store_inferential_results(np.concatenate(all_params, axis=0), index_names=all_names, attribute_name="params", series_name="parameters") # Store the standard errors self._store_inferential_results(np.sqrt(np.diag(self.cov)), index_names=all_names, attribute_name="standard_errors", series_name="std_err") # Store the t-stats of the estimated parameters self.tvalues = self.params / self.standard_errors self.tvalues.name = "t_stats" # Store the p-values p_vals = 2 * scipy.stats.norm.sf(np.abs(self.tvalues)) self._store_inferential_results(p_vals, index_names=all_names, attribute_name="pvalues", series_name="p_values") # Store the fischer information matrix of estimated coefficients self._store_inferential_results(results_dict["fisher_info"], index_names=all_names, attribute_name="fisher_information", column_names=all_names) # Store the 'robust' variance-covariance matrix robust_covariance = calc_asymptotic_covariance(self.hessian, self.fisher_information) self._store_inferential_results(robust_covariance, index_names=all_names, attribute_name="robust_cov", column_names=all_names) # Store the 'robust' standard errors self._store_inferential_results(np.sqrt(np.diag(self.robust_cov)), index_names=all_names, attribute_name="robust_std_errs", series_name="robust_std_err") # Store the 'robust' t-stats of the estimated coefficients self.robust_t_stats = self.params / self.robust_std_errs self.robust_t_stats.name = "robust_t_stats" # Store the 'robust' p-values one_sided_p_vals = scipy.stats.norm.sf(np.abs(self.robust_t_stats)) self._store_inferential_results(2 * one_sided_p_vals, index_names=all_names, attribute_name="robust_p_vals", series_name="robust_p_values") return None
python
def _store_generic_inference_results(self, results_dict, all_params, all_names): """ Store the model inference values that are common to all choice models. This includes things like index coefficients, gradients, hessians, asymptotic covariance matrices, t-values, p-values, and robust versions of these values. Parameters ---------- results_dict : dict. The estimation result dictionary that is output from scipy.optimize.minimize. In addition to the standard keys which are included, it should also contain the following keys: `["utility_coefs", "final_gradient", "final_hessian", "fisher_info"]`. The "final_gradient", "final_hessian", and "fisher_info" values should be the gradient, hessian, and Fisher-Information Matrix of the log likelihood, evaluated at the final parameter vector. all_params : list of 1D ndarrays. Should contain the various types of parameters that were actually estimated. all_names : list of strings. Should contain names of each estimated parameter. Returns ------- None. Stores all results on the model instance. """ # Store the utility coefficients self._store_inferential_results(results_dict["utility_coefs"], index_names=self.ind_var_names, attribute_name="coefs", series_name="coefficients") # Store the gradient self._store_inferential_results(results_dict["final_gradient"], index_names=all_names, attribute_name="gradient", series_name="gradient") # Store the hessian self._store_inferential_results(results_dict["final_hessian"], index_names=all_names, attribute_name="hessian", column_names=all_names) # Store the variance-covariance matrix self._store_inferential_results(-1 * scipy.linalg.inv(self.hessian), index_names=all_names, attribute_name="cov", column_names=all_names) # Store ALL of the estimated parameters self._store_inferential_results(np.concatenate(all_params, axis=0), index_names=all_names, attribute_name="params", series_name="parameters") # Store the standard errors self._store_inferential_results(np.sqrt(np.diag(self.cov)), index_names=all_names, attribute_name="standard_errors", series_name="std_err") # Store the t-stats of the estimated parameters self.tvalues = self.params / self.standard_errors self.tvalues.name = "t_stats" # Store the p-values p_vals = 2 * scipy.stats.norm.sf(np.abs(self.tvalues)) self._store_inferential_results(p_vals, index_names=all_names, attribute_name="pvalues", series_name="p_values") # Store the fischer information matrix of estimated coefficients self._store_inferential_results(results_dict["fisher_info"], index_names=all_names, attribute_name="fisher_information", column_names=all_names) # Store the 'robust' variance-covariance matrix robust_covariance = calc_asymptotic_covariance(self.hessian, self.fisher_information) self._store_inferential_results(robust_covariance, index_names=all_names, attribute_name="robust_cov", column_names=all_names) # Store the 'robust' standard errors self._store_inferential_results(np.sqrt(np.diag(self.robust_cov)), index_names=all_names, attribute_name="robust_std_errs", series_name="robust_std_err") # Store the 'robust' t-stats of the estimated coefficients self.robust_t_stats = self.params / self.robust_std_errs self.robust_t_stats.name = "robust_t_stats" # Store the 'robust' p-values one_sided_p_vals = scipy.stats.norm.sf(np.abs(self.robust_t_stats)) self._store_inferential_results(2 * one_sided_p_vals, index_names=all_names, attribute_name="robust_p_vals", series_name="robust_p_values") return None
[ "def", "_store_generic_inference_results", "(", "self", ",", "results_dict", ",", "all_params", ",", "all_names", ")", ":", "# Store the utility coefficients", "self", ".", "_store_inferential_results", "(", "results_dict", "[", "\"utility_coefs\"", "]", ",", "index_names", "=", "self", ".", "ind_var_names", ",", "attribute_name", "=", "\"coefs\"", ",", "series_name", "=", "\"coefficients\"", ")", "# Store the gradient", "self", ".", "_store_inferential_results", "(", "results_dict", "[", "\"final_gradient\"", "]", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"gradient\"", ",", "series_name", "=", "\"gradient\"", ")", "# Store the hessian", "self", ".", "_store_inferential_results", "(", "results_dict", "[", "\"final_hessian\"", "]", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"hessian\"", ",", "column_names", "=", "all_names", ")", "# Store the variance-covariance matrix", "self", ".", "_store_inferential_results", "(", "-", "1", "*", "scipy", ".", "linalg", ".", "inv", "(", "self", ".", "hessian", ")", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"cov\"", ",", "column_names", "=", "all_names", ")", "# Store ALL of the estimated parameters", "self", ".", "_store_inferential_results", "(", "np", ".", "concatenate", "(", "all_params", ",", "axis", "=", "0", ")", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"params\"", ",", "series_name", "=", "\"parameters\"", ")", "# Store the standard errors", "self", ".", "_store_inferential_results", "(", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "self", ".", "cov", ")", ")", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"standard_errors\"", ",", "series_name", "=", "\"std_err\"", ")", "# Store the t-stats of the estimated parameters", "self", ".", "tvalues", "=", "self", ".", "params", "/", "self", ".", "standard_errors", "self", ".", "tvalues", ".", "name", "=", "\"t_stats\"", "# Store the p-values", "p_vals", "=", "2", "*", "scipy", ".", "stats", ".", "norm", ".", "sf", "(", "np", ".", "abs", "(", "self", ".", "tvalues", ")", ")", "self", ".", "_store_inferential_results", "(", "p_vals", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"pvalues\"", ",", "series_name", "=", "\"p_values\"", ")", "# Store the fischer information matrix of estimated coefficients", "self", ".", "_store_inferential_results", "(", "results_dict", "[", "\"fisher_info\"", "]", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"fisher_information\"", ",", "column_names", "=", "all_names", ")", "# Store the 'robust' variance-covariance matrix", "robust_covariance", "=", "calc_asymptotic_covariance", "(", "self", ".", "hessian", ",", "self", ".", "fisher_information", ")", "self", ".", "_store_inferential_results", "(", "robust_covariance", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"robust_cov\"", ",", "column_names", "=", "all_names", ")", "# Store the 'robust' standard errors", "self", ".", "_store_inferential_results", "(", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "self", ".", "robust_cov", ")", ")", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"robust_std_errs\"", ",", "series_name", "=", "\"robust_std_err\"", ")", "# Store the 'robust' t-stats of the estimated coefficients", "self", ".", "robust_t_stats", "=", "self", ".", "params", "/", "self", ".", "robust_std_errs", "self", ".", "robust_t_stats", ".", "name", "=", "\"robust_t_stats\"", "# Store the 'robust' p-values", "one_sided_p_vals", "=", "scipy", ".", "stats", ".", "norm", ".", "sf", "(", "np", ".", "abs", "(", "self", ".", "robust_t_stats", ")", ")", "self", ".", "_store_inferential_results", "(", "2", "*", "one_sided_p_vals", ",", "index_names", "=", "all_names", ",", "attribute_name", "=", "\"robust_p_vals\"", ",", "series_name", "=", "\"robust_p_values\"", ")", "return", "None" ]
Store the model inference values that are common to all choice models. This includes things like index coefficients, gradients, hessians, asymptotic covariance matrices, t-values, p-values, and robust versions of these values. Parameters ---------- results_dict : dict. The estimation result dictionary that is output from scipy.optimize.minimize. In addition to the standard keys which are included, it should also contain the following keys: `["utility_coefs", "final_gradient", "final_hessian", "fisher_info"]`. The "final_gradient", "final_hessian", and "fisher_info" values should be the gradient, hessian, and Fisher-Information Matrix of the log likelihood, evaluated at the final parameter vector. all_params : list of 1D ndarrays. Should contain the various types of parameters that were actually estimated. all_names : list of strings. Should contain names of each estimated parameter. Returns ------- None. Stores all results on the model instance.
[ "Store", "the", "model", "inference", "values", "that", "are", "common", "to", "all", "choice", "models", ".", "This", "includes", "things", "like", "index", "coefficients", "gradients", "hessians", "asymptotic", "covariance", "matrices", "t", "-", "values", "p", "-", "values", "and", "robust", "versions", "of", "these", "values", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1166-L1275
887
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._store_optional_parameters
def _store_optional_parameters(self, optional_params, name_list_attr, default_name_str, all_names, all_params, param_attr_name, series_name): """ Extract the optional parameters from the `results_dict`, save them to the model object, and update the list of all parameters and all parameter names. Parameters ---------- optional_params : 1D ndarray. The optional parameters whose values and names should be stored. name_list_attr : str. The attribute name on the model object where the names of the optional estimated parameters will be stored (if they exist). default_name_str : str. The name string that will be used to create generic names for the estimated parameters, in the event that the estimated parameters do not have names that were specified by the user. Should contain empty curly braces for use with python string formatting. all_names : list of strings. The current list of the names of the estimated parameters. The names of these optional parameters will be added to the beginning of this list. all_params : list of 1D ndarrays. Each array is a set of estimated parameters. The current optional parameters will be added to the beginning of this list. param_attr_name : str. The attribute name that will be used to store the optional parameter values on the model object. series_name : str. The string that will be used as the name of the series that contains the optional parameters. Returns ------- (all_names, all_params) : tuple. """ # Identify the number of optional parameters num_elements = optional_params.shape[0] # Get the names of the optional parameters parameter_names = getattr(self, name_list_attr) if parameter_names is None: parameter_names = [default_name_str.format(x) for x in range(1, num_elements + 1)] # Store the names of the optional parameters in all_names all_names = list(parameter_names) + list(all_names) # Store the values of the optional parameters in all_params all_params.insert(0, optional_params) # Store the optional parameters on the model object self._store_inferential_results(optional_params, index_names=parameter_names, attribute_name=param_attr_name, series_name=series_name) return all_names, all_params
python
def _store_optional_parameters(self, optional_params, name_list_attr, default_name_str, all_names, all_params, param_attr_name, series_name): """ Extract the optional parameters from the `results_dict`, save them to the model object, and update the list of all parameters and all parameter names. Parameters ---------- optional_params : 1D ndarray. The optional parameters whose values and names should be stored. name_list_attr : str. The attribute name on the model object where the names of the optional estimated parameters will be stored (if they exist). default_name_str : str. The name string that will be used to create generic names for the estimated parameters, in the event that the estimated parameters do not have names that were specified by the user. Should contain empty curly braces for use with python string formatting. all_names : list of strings. The current list of the names of the estimated parameters. The names of these optional parameters will be added to the beginning of this list. all_params : list of 1D ndarrays. Each array is a set of estimated parameters. The current optional parameters will be added to the beginning of this list. param_attr_name : str. The attribute name that will be used to store the optional parameter values on the model object. series_name : str. The string that will be used as the name of the series that contains the optional parameters. Returns ------- (all_names, all_params) : tuple. """ # Identify the number of optional parameters num_elements = optional_params.shape[0] # Get the names of the optional parameters parameter_names = getattr(self, name_list_attr) if parameter_names is None: parameter_names = [default_name_str.format(x) for x in range(1, num_elements + 1)] # Store the names of the optional parameters in all_names all_names = list(parameter_names) + list(all_names) # Store the values of the optional parameters in all_params all_params.insert(0, optional_params) # Store the optional parameters on the model object self._store_inferential_results(optional_params, index_names=parameter_names, attribute_name=param_attr_name, series_name=series_name) return all_names, all_params
[ "def", "_store_optional_parameters", "(", "self", ",", "optional_params", ",", "name_list_attr", ",", "default_name_str", ",", "all_names", ",", "all_params", ",", "param_attr_name", ",", "series_name", ")", ":", "# Identify the number of optional parameters", "num_elements", "=", "optional_params", ".", "shape", "[", "0", "]", "# Get the names of the optional parameters", "parameter_names", "=", "getattr", "(", "self", ",", "name_list_attr", ")", "if", "parameter_names", "is", "None", ":", "parameter_names", "=", "[", "default_name_str", ".", "format", "(", "x", ")", "for", "x", "in", "range", "(", "1", ",", "num_elements", "+", "1", ")", "]", "# Store the names of the optional parameters in all_names", "all_names", "=", "list", "(", "parameter_names", ")", "+", "list", "(", "all_names", ")", "# Store the values of the optional parameters in all_params", "all_params", ".", "insert", "(", "0", ",", "optional_params", ")", "# Store the optional parameters on the model object", "self", ".", "_store_inferential_results", "(", "optional_params", ",", "index_names", "=", "parameter_names", ",", "attribute_name", "=", "param_attr_name", ",", "series_name", "=", "series_name", ")", "return", "all_names", ",", "all_params" ]
Extract the optional parameters from the `results_dict`, save them to the model object, and update the list of all parameters and all parameter names. Parameters ---------- optional_params : 1D ndarray. The optional parameters whose values and names should be stored. name_list_attr : str. The attribute name on the model object where the names of the optional estimated parameters will be stored (if they exist). default_name_str : str. The name string that will be used to create generic names for the estimated parameters, in the event that the estimated parameters do not have names that were specified by the user. Should contain empty curly braces for use with python string formatting. all_names : list of strings. The current list of the names of the estimated parameters. The names of these optional parameters will be added to the beginning of this list. all_params : list of 1D ndarrays. Each array is a set of estimated parameters. The current optional parameters will be added to the beginning of this list. param_attr_name : str. The attribute name that will be used to store the optional parameter values on the model object. series_name : str. The string that will be used as the name of the series that contains the optional parameters. Returns ------- (all_names, all_params) : tuple.
[ "Extract", "the", "optional", "parameters", "from", "the", "results_dict", "save", "them", "to", "the", "model", "object", "and", "update", "the", "list", "of", "all", "parameters", "and", "all", "parameter", "names", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1277-L1339
888
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._adjust_inferential_results_for_parameter_constraints
def _adjust_inferential_results_for_parameter_constraints(self, constraints): """ Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. """ if constraints is not None: # Ensure the model object has inferential results inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
python
def _adjust_inferential_results_for_parameter_constraints(self, constraints): """ Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None. """ if constraints is not None: # Ensure the model object has inferential results inferential_attributes = ["standard_errors", "tvalues", "pvalues", "robust_std_errs", "robust_t_stats", "robust_p_vals"] assert all([hasattr(self, x) for x in inferential_attributes]) assert hasattr(self, "params") all_names = self.params.index.tolist() for series in [getattr(self, x) for x in inferential_attributes]: for pos in constraints: series.loc[all_names[pos]] = np.nan return None
[ "def", "_adjust_inferential_results_for_parameter_constraints", "(", "self", ",", "constraints", ")", ":", "if", "constraints", "is", "not", "None", ":", "# Ensure the model object has inferential results", "inferential_attributes", "=", "[", "\"standard_errors\"", ",", "\"tvalues\"", ",", "\"pvalues\"", ",", "\"robust_std_errs\"", ",", "\"robust_t_stats\"", ",", "\"robust_p_vals\"", "]", "assert", "all", "(", "[", "hasattr", "(", "self", ",", "x", ")", "for", "x", "in", "inferential_attributes", "]", ")", "assert", "hasattr", "(", "self", ",", "\"params\"", ")", "all_names", "=", "self", ".", "params", ".", "index", ".", "tolist", "(", ")", "for", "series", "in", "[", "getattr", "(", "self", ",", "x", ")", "for", "x", "in", "inferential_attributes", "]", ":", "for", "pos", "in", "constraints", ":", "series", ".", "loc", "[", "all_names", "[", "pos", "]", "]", "=", "np", ".", "nan", "return", "None" ]
Ensure that parameters that were constrained during estimation do not have any values showed for inferential results. After all, no inference was performed. Parameters ---------- constraints : list of ints, or None. If list, should contain the positions in the array of all estimated parameters that were constrained to their initial values. Returns ------- None.
[ "Ensure", "that", "parameters", "that", "were", "constrained", "during", "estimation", "do", "not", "have", "any", "values", "showed", "for", "inferential", "results", ".", "After", "all", "no", "inference", "was", "performed", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1341-L1375
889
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._check_result_dict_for_needed_keys
def _check_result_dict_for_needed_keys(self, results_dict): """ Ensure that `results_dict` has the needed keys to store all the estimation results. Raise a helpful ValueError otherwise. """ missing_cols = [x for x in needed_result_keys if x not in results_dict] if missing_cols != []: msg = "The following keys are missing from results_dict\n{}" raise ValueError(msg.format(missing_cols)) return None
python
def _check_result_dict_for_needed_keys(self, results_dict): """ Ensure that `results_dict` has the needed keys to store all the estimation results. Raise a helpful ValueError otherwise. """ missing_cols = [x for x in needed_result_keys if x not in results_dict] if missing_cols != []: msg = "The following keys are missing from results_dict\n{}" raise ValueError(msg.format(missing_cols)) return None
[ "def", "_check_result_dict_for_needed_keys", "(", "self", ",", "results_dict", ")", ":", "missing_cols", "=", "[", "x", "for", "x", "in", "needed_result_keys", "if", "x", "not", "in", "results_dict", "]", "if", "missing_cols", "!=", "[", "]", ":", "msg", "=", "\"The following keys are missing from results_dict\\n{}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "missing_cols", ")", ")", "return", "None" ]
Ensure that `results_dict` has the needed keys to store all the estimation results. Raise a helpful ValueError otherwise.
[ "Ensure", "that", "results_dict", "has", "the", "needed", "keys", "to", "store", "all", "the", "estimation", "results", ".", "Raise", "a", "helpful", "ValueError", "otherwise", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1377-L1386
890
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model._add_mixing_variable_names_to_individual_vars
def _add_mixing_variable_names_to_individual_vars(self): """ Ensure that the model objects mixing variables are added to its list of individual variables. """ assert isinstance(self.ind_var_names, list) # Note that if one estimates a mixed logit model, then the mixing # variables will be added to individual vars. And if one estimates # the model again (perhaps from different starting values), then # an error will be raised when creating the coefs series because we # will have added the mixing variables twice. The condition below # should prevent this error. already_included = any(["Sigma " in x for x in self.ind_var_names]) if self.mixing_vars is not None and not already_included: new_ind_var_names = ["Sigma " + x for x in self.mixing_vars] self.ind_var_names += new_ind_var_names return None
python
def _add_mixing_variable_names_to_individual_vars(self): """ Ensure that the model objects mixing variables are added to its list of individual variables. """ assert isinstance(self.ind_var_names, list) # Note that if one estimates a mixed logit model, then the mixing # variables will be added to individual vars. And if one estimates # the model again (perhaps from different starting values), then # an error will be raised when creating the coefs series because we # will have added the mixing variables twice. The condition below # should prevent this error. already_included = any(["Sigma " in x for x in self.ind_var_names]) if self.mixing_vars is not None and not already_included: new_ind_var_names = ["Sigma " + x for x in self.mixing_vars] self.ind_var_names += new_ind_var_names return None
[ "def", "_add_mixing_variable_names_to_individual_vars", "(", "self", ")", ":", "assert", "isinstance", "(", "self", ".", "ind_var_names", ",", "list", ")", "# Note that if one estimates a mixed logit model, then the mixing", "# variables will be added to individual vars. And if one estimates", "# the model again (perhaps from different starting values), then", "# an error will be raised when creating the coefs series because we", "# will have added the mixing variables twice. The condition below", "# should prevent this error.", "already_included", "=", "any", "(", "[", "\"Sigma \"", "in", "x", "for", "x", "in", "self", ".", "ind_var_names", "]", ")", "if", "self", ".", "mixing_vars", "is", "not", "None", "and", "not", "already_included", ":", "new_ind_var_names", "=", "[", "\"Sigma \"", "+", "x", "for", "x", "in", "self", ".", "mixing_vars", "]", "self", ".", "ind_var_names", "+=", "new_ind_var_names", "return", "None" ]
Ensure that the model objects mixing variables are added to its list of individual variables.
[ "Ensure", "that", "the", "model", "objects", "mixing", "variables", "are", "added", "to", "its", "list", "of", "individual", "variables", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1388-L1405
891
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
MNDC_Model.print_summaries
def print_summaries(self): """ Returns None. Will print the measures of fit and the estimation results for the model. """ if hasattr(self, "fit_summary") and hasattr(self, "summary"): print("\n") print(self.fit_summary) print("=" * 30) print(self.summary) else: msg = "This {} object has not yet been estimated so there " msg_2 = "are no estimation summaries to print." raise NotImplementedError(msg.format(self.model_type) + msg_2) return None
python
def print_summaries(self): """ Returns None. Will print the measures of fit and the estimation results for the model. """ if hasattr(self, "fit_summary") and hasattr(self, "summary"): print("\n") print(self.fit_summary) print("=" * 30) print(self.summary) else: msg = "This {} object has not yet been estimated so there " msg_2 = "are no estimation summaries to print." raise NotImplementedError(msg.format(self.model_type) + msg_2) return None
[ "def", "print_summaries", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"fit_summary\"", ")", "and", "hasattr", "(", "self", ",", "\"summary\"", ")", ":", "print", "(", "\"\\n\"", ")", "print", "(", "self", ".", "fit_summary", ")", "print", "(", "\"=\"", "*", "30", ")", "print", "(", "self", ".", "summary", ")", "else", ":", "msg", "=", "\"This {} object has not yet been estimated so there \"", "msg_2", "=", "\"are no estimation summaries to print.\"", "raise", "NotImplementedError", "(", "msg", ".", "format", "(", "self", ".", "model_type", ")", "+", "msg_2", ")", "return", "None" ]
Returns None. Will print the measures of fit and the estimation results for the model.
[ "Returns", "None", ".", "Will", "print", "the", "measures", "of", "fit", "and", "the", "estimation", "results", "for", "the", "model", "." ]
f83b0fd6debaa7358d87c3828428f6d4ead71357
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L1556-L1572
892
taskcluster/json-e
jsone/prattparser.py
prefix
def prefix(*kinds): """Decorate a method as handling prefix tokens of the given kinds""" def wrap(fn): try: fn.prefix_kinds.extend(kinds) except AttributeError: fn.prefix_kinds = list(kinds) return fn return wrap
python
def prefix(*kinds): """Decorate a method as handling prefix tokens of the given kinds""" def wrap(fn): try: fn.prefix_kinds.extend(kinds) except AttributeError: fn.prefix_kinds = list(kinds) return fn return wrap
[ "def", "prefix", "(", "*", "kinds", ")", ":", "def", "wrap", "(", "fn", ")", ":", "try", ":", "fn", ".", "prefix_kinds", ".", "extend", "(", "kinds", ")", "except", "AttributeError", ":", "fn", ".", "prefix_kinds", "=", "list", "(", "kinds", ")", "return", "fn", "return", "wrap" ]
Decorate a method as handling prefix tokens of the given kinds
[ "Decorate", "a", "method", "as", "handling", "prefix", "tokens", "of", "the", "given", "kinds" ]
ac0c9fba1de3ed619f05a64dae929f6687789cbc
https://github.com/taskcluster/json-e/blob/ac0c9fba1de3ed619f05a64dae929f6687789cbc/jsone/prattparser.py#L20-L28
893
taskcluster/json-e
jsone/prattparser.py
infix
def infix(*kinds): """Decorate a method as handling infix tokens of the given kinds""" def wrap(fn): try: fn.infix_kinds.extend(kinds) except AttributeError: fn.infix_kinds = list(kinds) return fn return wrap
python
def infix(*kinds): """Decorate a method as handling infix tokens of the given kinds""" def wrap(fn): try: fn.infix_kinds.extend(kinds) except AttributeError: fn.infix_kinds = list(kinds) return fn return wrap
[ "def", "infix", "(", "*", "kinds", ")", ":", "def", "wrap", "(", "fn", ")", ":", "try", ":", "fn", ".", "infix_kinds", ".", "extend", "(", "kinds", ")", "except", "AttributeError", ":", "fn", ".", "infix_kinds", "=", "list", "(", "kinds", ")", "return", "fn", "return", "wrap" ]
Decorate a method as handling infix tokens of the given kinds
[ "Decorate", "a", "method", "as", "handling", "infix", "tokens", "of", "the", "given", "kinds" ]
ac0c9fba1de3ed619f05a64dae929f6687789cbc
https://github.com/taskcluster/json-e/blob/ac0c9fba1de3ed619f05a64dae929f6687789cbc/jsone/prattparser.py#L31-L39
894
taskcluster/json-e
jsone/prattparser.py
ParseContext.attempt
def attempt(self, *kinds): """Try to get the next token if it matches one of the kinds given, otherwise returning None. If no kinds are given, any kind is accepted.""" if self._error: raise self._error token = self.next_token if not token: return None if kinds and token.kind not in kinds: return None self._advance() return token
python
def attempt(self, *kinds): """Try to get the next token if it matches one of the kinds given, otherwise returning None. If no kinds are given, any kind is accepted.""" if self._error: raise self._error token = self.next_token if not token: return None if kinds and token.kind not in kinds: return None self._advance() return token
[ "def", "attempt", "(", "self", ",", "*", "kinds", ")", ":", "if", "self", ".", "_error", ":", "raise", "self", ".", "_error", "token", "=", "self", ".", "next_token", "if", "not", "token", ":", "return", "None", "if", "kinds", "and", "token", ".", "kind", "not", "in", "kinds", ":", "return", "None", "self", ".", "_advance", "(", ")", "return", "token" ]
Try to get the next token if it matches one of the kinds given, otherwise returning None. If no kinds are given, any kind is accepted.
[ "Try", "to", "get", "the", "next", "token", "if", "it", "matches", "one", "of", "the", "kinds", "given", "otherwise", "returning", "None", ".", "If", "no", "kinds", "are", "given", "any", "kind", "is", "accepted", "." ]
ac0c9fba1de3ed619f05a64dae929f6687789cbc
https://github.com/taskcluster/json-e/blob/ac0c9fba1de3ed619f05a64dae929f6687789cbc/jsone/prattparser.py#L150-L162
895
taskcluster/json-e
jsone/prattparser.py
ParseContext.require
def require(self, *kinds): """Get the next token, raising an exception if it doesn't match one of the given kinds, or the input ends. If no kinds are given, returns the next token of any kind.""" token = self.attempt() if not token: raise SyntaxError('Unexpected end of input') if kinds and token.kind not in kinds: raise SyntaxError.unexpected(token, kinds) return token
python
def require(self, *kinds): """Get the next token, raising an exception if it doesn't match one of the given kinds, or the input ends. If no kinds are given, returns the next token of any kind.""" token = self.attempt() if not token: raise SyntaxError('Unexpected end of input') if kinds and token.kind not in kinds: raise SyntaxError.unexpected(token, kinds) return token
[ "def", "require", "(", "self", ",", "*", "kinds", ")", ":", "token", "=", "self", ".", "attempt", "(", ")", "if", "not", "token", ":", "raise", "SyntaxError", "(", "'Unexpected end of input'", ")", "if", "kinds", "and", "token", ".", "kind", "not", "in", "kinds", ":", "raise", "SyntaxError", ".", "unexpected", "(", "token", ",", "kinds", ")", "return", "token" ]
Get the next token, raising an exception if it doesn't match one of the given kinds, or the input ends. If no kinds are given, returns the next token of any kind.
[ "Get", "the", "next", "token", "raising", "an", "exception", "if", "it", "doesn", "t", "match", "one", "of", "the", "given", "kinds", "or", "the", "input", "ends", ".", "If", "no", "kinds", "are", "given", "returns", "the", "next", "token", "of", "any", "kind", "." ]
ac0c9fba1de3ed619f05a64dae929f6687789cbc
https://github.com/taskcluster/json-e/blob/ac0c9fba1de3ed619f05a64dae929f6687789cbc/jsone/prattparser.py#L164-L173
896
amzn/ion-python
amazon/ion/symbols.py
local_symbol_table
def local_symbol_table(imports=None, symbols=()): """Constructs a local symbol table. Args: imports (Optional[SymbolTable]): Shared symbol tables to import. symbols (Optional[Iterable[Unicode]]): Initial local symbols to add. Returns: SymbolTable: A mutable local symbol table with the seeded local symbols. """ return SymbolTable( table_type=LOCAL_TABLE_TYPE, symbols=symbols, imports=imports )
python
def local_symbol_table(imports=None, symbols=()): """Constructs a local symbol table. Args: imports (Optional[SymbolTable]): Shared symbol tables to import. symbols (Optional[Iterable[Unicode]]): Initial local symbols to add. Returns: SymbolTable: A mutable local symbol table with the seeded local symbols. """ return SymbolTable( table_type=LOCAL_TABLE_TYPE, symbols=symbols, imports=imports )
[ "def", "local_symbol_table", "(", "imports", "=", "None", ",", "symbols", "=", "(", ")", ")", ":", "return", "SymbolTable", "(", "table_type", "=", "LOCAL_TABLE_TYPE", ",", "symbols", "=", "symbols", ",", "imports", "=", "imports", ")" ]
Constructs a local symbol table. Args: imports (Optional[SymbolTable]): Shared symbol tables to import. symbols (Optional[Iterable[Unicode]]): Initial local symbols to add. Returns: SymbolTable: A mutable local symbol table with the seeded local symbols.
[ "Constructs", "a", "local", "symbol", "table", "." ]
0b21fa3ba7755f55f745e4aa970d86343b82449d
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/symbols.py#L380-L394
897
amzn/ion-python
amazon/ion/symbols.py
shared_symbol_table
def shared_symbol_table(name, version, symbols, imports=None): """Constructs a shared symbol table. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. symbols (Iterable[unicode]): The symbols to associate with the table. imports (Optional[Iterable[SymbolTable]): The shared symbol tables to inject into this one. Returns: SymbolTable: The constructed table. """ return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=symbols, name=name, version=version, imports=imports )
python
def shared_symbol_table(name, version, symbols, imports=None): """Constructs a shared symbol table. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. symbols (Iterable[unicode]): The symbols to associate with the table. imports (Optional[Iterable[SymbolTable]): The shared symbol tables to inject into this one. Returns: SymbolTable: The constructed table. """ return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=symbols, name=name, version=version, imports=imports )
[ "def", "shared_symbol_table", "(", "name", ",", "version", ",", "symbols", ",", "imports", "=", "None", ")", ":", "return", "SymbolTable", "(", "table_type", "=", "SHARED_TABLE_TYPE", ",", "symbols", "=", "symbols", ",", "name", "=", "name", ",", "version", "=", "version", ",", "imports", "=", "imports", ")" ]
Constructs a shared symbol table. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. symbols (Iterable[unicode]): The symbols to associate with the table. imports (Optional[Iterable[SymbolTable]): The shared symbol tables to inject into this one. Returns: SymbolTable: The constructed table.
[ "Constructs", "a", "shared", "symbol", "table", "." ]
0b21fa3ba7755f55f745e4aa970d86343b82449d
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/symbols.py#L397-L415
898
amzn/ion-python
amazon/ion/symbols.py
placeholder_symbol_table
def placeholder_symbol_table(name, version, max_id): """Constructs a shared symbol table that consists symbols that all have no known text. This is generally used for cases where a shared symbol table is not available by the application. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0`` Returns: SymbolTable: The synthesized table. """ if version <= 0: raise ValueError('Version must be grater than or equal to 1: %s' % version) if max_id < 0: raise ValueError('Max ID must be zero or positive: %s' % max_id) return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=repeat(None, max_id), name=name, version=version, is_substitute=True )
python
def placeholder_symbol_table(name, version, max_id): """Constructs a shared symbol table that consists symbols that all have no known text. This is generally used for cases where a shared symbol table is not available by the application. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0`` Returns: SymbolTable: The synthesized table. """ if version <= 0: raise ValueError('Version must be grater than or equal to 1: %s' % version) if max_id < 0: raise ValueError('Max ID must be zero or positive: %s' % max_id) return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=repeat(None, max_id), name=name, version=version, is_substitute=True )
[ "def", "placeholder_symbol_table", "(", "name", ",", "version", ",", "max_id", ")", ":", "if", "version", "<=", "0", ":", "raise", "ValueError", "(", "'Version must be grater than or equal to 1: %s'", "%", "version", ")", "if", "max_id", "<", "0", ":", "raise", "ValueError", "(", "'Max ID must be zero or positive: %s'", "%", "max_id", ")", "return", "SymbolTable", "(", "table_type", "=", "SHARED_TABLE_TYPE", ",", "symbols", "=", "repeat", "(", "None", ",", "max_id", ")", ",", "name", "=", "name", ",", "version", "=", "version", ",", "is_substitute", "=", "True", ")" ]
Constructs a shared symbol table that consists symbols that all have no known text. This is generally used for cases where a shared symbol table is not available by the application. Args: name (unicode): The name of the shared symbol table. version (int): The version of the shared symbol table. max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0`` Returns: SymbolTable: The synthesized table.
[ "Constructs", "a", "shared", "symbol", "table", "that", "consists", "symbols", "that", "all", "have", "no", "known", "text", "." ]
0b21fa3ba7755f55f745e4aa970d86343b82449d
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/symbols.py#L418-L443
899
amzn/ion-python
amazon/ion/symbols.py
substitute_symbol_table
def substitute_symbol_table(table, version, max_id): """Substitutes a given shared symbol table for another version. * If the given table has **more** symbols than the requested substitute, then the generated symbol table will be a subset of the given table. * If the given table has **less** symbols than the requested substitute, then the generated symbol table will have symbols with unknown text generated for the difference. Args: table (SymbolTable): The shared table to derive from. version (int): The version to target. max_id (int): The maximum ID allocated by the substitute, must be ``>= 0``. Returns: SymbolTable: The synthesized table. """ if not table.table_type.is_shared: raise ValueError('Symbol table to substitute from must be a shared table') if version <= 0: raise ValueError('Version must be grater than or equal to 1: %s' % version) if max_id < 0: raise ValueError('Max ID must be zero or positive: %s' % max_id) # TODO Recycle the symbol tokens from the source table into the substitute. if max_id <= table.max_id: symbols = (token.text for token in islice(table, max_id)) else: symbols = chain( (token.text for token in table), repeat(None, max_id - table.max_id) ) return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=symbols, name=table.name, version=version, is_substitute=True )
python
def substitute_symbol_table(table, version, max_id): """Substitutes a given shared symbol table for another version. * If the given table has **more** symbols than the requested substitute, then the generated symbol table will be a subset of the given table. * If the given table has **less** symbols than the requested substitute, then the generated symbol table will have symbols with unknown text generated for the difference. Args: table (SymbolTable): The shared table to derive from. version (int): The version to target. max_id (int): The maximum ID allocated by the substitute, must be ``>= 0``. Returns: SymbolTable: The synthesized table. """ if not table.table_type.is_shared: raise ValueError('Symbol table to substitute from must be a shared table') if version <= 0: raise ValueError('Version must be grater than or equal to 1: %s' % version) if max_id < 0: raise ValueError('Max ID must be zero or positive: %s' % max_id) # TODO Recycle the symbol tokens from the source table into the substitute. if max_id <= table.max_id: symbols = (token.text for token in islice(table, max_id)) else: symbols = chain( (token.text for token in table), repeat(None, max_id - table.max_id) ) return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=symbols, name=table.name, version=version, is_substitute=True )
[ "def", "substitute_symbol_table", "(", "table", ",", "version", ",", "max_id", ")", ":", "if", "not", "table", ".", "table_type", ".", "is_shared", ":", "raise", "ValueError", "(", "'Symbol table to substitute from must be a shared table'", ")", "if", "version", "<=", "0", ":", "raise", "ValueError", "(", "'Version must be grater than or equal to 1: %s'", "%", "version", ")", "if", "max_id", "<", "0", ":", "raise", "ValueError", "(", "'Max ID must be zero or positive: %s'", "%", "max_id", ")", "# TODO Recycle the symbol tokens from the source table into the substitute.", "if", "max_id", "<=", "table", ".", "max_id", ":", "symbols", "=", "(", "token", ".", "text", "for", "token", "in", "islice", "(", "table", ",", "max_id", ")", ")", "else", ":", "symbols", "=", "chain", "(", "(", "token", ".", "text", "for", "token", "in", "table", ")", ",", "repeat", "(", "None", ",", "max_id", "-", "table", ".", "max_id", ")", ")", "return", "SymbolTable", "(", "table_type", "=", "SHARED_TABLE_TYPE", ",", "symbols", "=", "symbols", ",", "name", "=", "table", ".", "name", ",", "version", "=", "version", ",", "is_substitute", "=", "True", ")" ]
Substitutes a given shared symbol table for another version. * If the given table has **more** symbols than the requested substitute, then the generated symbol table will be a subset of the given table. * If the given table has **less** symbols than the requested substitute, then the generated symbol table will have symbols with unknown text generated for the difference. Args: table (SymbolTable): The shared table to derive from. version (int): The version to target. max_id (int): The maximum ID allocated by the substitute, must be ``>= 0``. Returns: SymbolTable: The synthesized table.
[ "Substitutes", "a", "given", "shared", "symbol", "table", "for", "another", "version", "." ]
0b21fa3ba7755f55f745e4aa970d86343b82449d
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/symbols.py#L446-L484