code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
182
url
stringlengths
46
251
license
stringclasses
4 values
def test_build_from_template(temp_with_override, cli): """Test building the book template and a few test configs.""" # Create the book from the template book = temp_with_override / "new_book" _ = cli.invoke(commands.create, book.as_posix()) build_result = cli.invoke( commands.build, [book.as_posix(), "-n", "-W", "--keep-going"] ) assert build_result.exit_code == 0, build_result.output html = book.joinpath("_build", "html") assert html.joinpath("index.html").exists() assert html.joinpath("intro.html").exists()
Test building the book template and a few test configs.
test_build_from_template
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_build_dirhtml_from_template(temp_with_override, cli): """Test building the book template with dirhtml.""" # Create the book from the template book = temp_with_override / "new_book" _ = cli.invoke(commands.create, book.as_posix()) build_result = cli.invoke( commands.build, [book.as_posix(), "-n", "-W", "--builder", "dirhtml"] ) assert build_result.exit_code == 0, build_result.output html = book.joinpath("_build", "dirhtml") assert html.joinpath("index.html").exists() assert html.joinpath("intro", "index.html").exists()
Test building the book template with dirhtml.
test_build_dirhtml_from_template
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_build_singlehtml_from_template(temp_with_override, cli): """Test building the book template with singlehtml.""" # Create the book from the template book = temp_with_override / "new_book" _ = cli.invoke(commands.create, book.as_posix()) build_result = cli.invoke( commands.build, [book.as_posix(), "-n", "-W", "--builder", "singlehtml"] ) # TODO: Remove when docutils>=0.20 is pinned in jupyter-book # https://github.com/mcmtroffaes/sphinxcontrib-bibtex/issues/322 if (0, 18) <= docutils.__version_info__ < (0, 20): assert build_result.exit_code == 1, build_result.output else: assert build_result.exit_code == 0, build_result.output html = book.joinpath("_build", "singlehtml") assert html.joinpath("index.html").exists() assert html.joinpath("intro.html").exists()
Test building the book template with singlehtml.
test_build_singlehtml_from_template
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_custom_config(cli, build_resources): """Test a variety of custom configuration values.""" books, _ = build_resources config = books.joinpath("config") result = cli.invoke(commands.build, [config.as_posix(), "-n", "-W", "--keep-going"]) assert result.exit_code == 0, result.output html = config.joinpath("_build", "html", "index.html").read_text(encoding="utf8") soup = BeautifulSoup(html, "html.parser") assert '<p class="title logo__title">TEST PROJECT NAME</p>' in html assert '<div class="tab-set docutils">' in html assert '<link rel="stylesheet" type="text/css" href="_static/mycss.css" />' in html assert '<script src="_static/js/myjs.js"></script>' in html # Check that our comments engines were correctly added assert soup.find("script", attrs={"kind": "hypothesis"}) assert soup.find("script", attrs={"kind": "utterances"})
Test a variety of custom configuration values.
test_custom_config
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_toc_builds(cli, build_resources, toc): """Test building the book template with several different TOC files.""" books, tocs = build_resources result = cli.invoke( commands.build, [tocs.as_posix(), "--toc", (tocs / toc).as_posix(), "-n", "-W", "--keep-going"], ) assert result.exit_code == 0, result.output
Test building the book template with several different TOC files.
test_toc_builds
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_toc_rebuild(cli, build_resources): """Changes to the TOC should force a re-build of pages. Also tests for changes to the relative ordering of content pages. """ _, tocs = build_resources toc = tocs / "_toc_simple.yml" index_html = tocs.joinpath("_build", "html", "index.html") # Not using -W because we expect warnings for pages not listed in TOC result = cli.invoke( commands.build, [tocs.as_posix(), "--toc", toc.as_posix(), "-n"], ) html = BeautifulSoup(index_html.read_text(encoding="utf8"), "html.parser") tags = html.find_all("a", "reference internal") assert result.exit_code == 0, result.output assert tags[1].attrs["href"] == "content1.html" assert tags[2].attrs["href"] == "content2.html" # Clean build manually (to avoid caching of sidebar) build_path = tocs.joinpath("_build") shutil.rmtree(build_path) # Build with secondary ToC toc = tocs / "_toc_simple_changed.yml" result = cli.invoke( commands.build, [tocs.as_posix(), "--toc", toc.as_posix(), "-n"], ) assert result.exit_code == 0, result.output html = BeautifulSoup(index_html.read_text(encoding="utf8"), "html.parser") tags = html.find_all("a", "reference internal") # The rendered TOC should reflect the order in the modified _toc.yml assert tags[1].attrs["href"] == "content2.html" assert tags[2].attrs["href"] == "content1.html"
Changes to the TOC should force a re-build of pages. Also tests for changes to the relative ordering of content pages.
test_toc_rebuild
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_build_page(pages, cli): """Test building a page.""" page = pages.joinpath("single_page.ipynb") html = pages.joinpath("_build", "_page", "single_page", "html") index = html.joinpath("index.html") result = cli.invoke(commands.build, [page.as_posix(), "-n", "-W", "--keep-going"]) assert result.exit_code == 0, result.output assert html.joinpath("single_page.html").exists() assert not html.joinpath("extra_page.html").exists() assert 'url=single_page.html" />' in index.read_text(encoding="utf8")
Test building a page.
test_build_page
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_execution_timeout(pages, build_resources, cli): """Testing timeout execution for a page.""" books, _ = build_resources path_page = pages.joinpath("loop_unrun.ipynb") path_c = books.joinpath("config", "_config_timeout.yml") path_html = pages.joinpath("_build", "_page", "loop_unrun", "html") result = cli.invoke( commands.build, [ path_page.as_posix(), "--config", path_c.as_posix(), "-n", "-W", "--keep-going", ], ) assert "Executing notebook failed:" in result.stdout assert path_html.joinpath("reports", "loop_unrun.err.log").exists()
Testing timeout execution for a page.
test_execution_timeout
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_build_using_custom_builder(cli, build_resources): """Test building the book template using a custom builder""" books, _ = build_resources config = books.joinpath("config_custombuilder") result = cli.invoke( commands.build, [ config.as_posix(), "--builder=custom", "--custom-builder=mycustombuilder", "-n", "-W", "--keep-going", ], ) assert result.exit_code == 0, result.output html = config.joinpath("_build", "mycustombuilder", "index.html").read_text( encoding="utf8" ) assert '<p class="title logo__title">TEST PROJECT NAME</p>' in html assert '<link rel="stylesheet" type="text/css" href="_static/mycss.css" />' in html assert '<script src="_static/js/myjs.js"></script>' in html
Test building the book template using a custom builder
test_build_using_custom_builder
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_toc_numbered( toc_file: str, cli: CliRunner, temp_with_override, file_regression ): """Testing that numbers make it into the sidebar""" path_output = temp_with_override.joinpath("book1").absolute() p_toc = PATH_BOOKS.joinpath("toc") path_toc = p_toc.joinpath(toc_file) result = cli.invoke( commands.build, [ p_toc.as_posix(), "--path-output", path_output.as_posix(), "--toc", path_toc.as_posix(), "-W", ], ) assert result.exit_code == 0, result.output path_toc_directive = path_output.joinpath("_build", "html", "index.html") # get the tableofcontents markup soup = BeautifulSoup(path_toc_directive.read_text(encoding="utf8"), "html.parser") toc = soup.select("nav.bd-links")[0] file_regression.check( toc.prettify(), basename=toc_file.split(".")[0], extension=f"{SPHINX_VERSION}.html", )
Testing that numbers make it into the sidebar
test_toc_numbered
python
jupyter-book/jupyter-book
tests/test_build.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_build.py
BSD-3-Clause
def test_toc_numbered_multitoc_numbering_false( toc_file, cli, build_resources, file_regression ): """Testing use_multitoc_numbering: false""" books, tocs = build_resources config = books.joinpath("config").joinpath("_config_sphinx_multitoc_numbering.yml") toc = tocs.joinpath(toc_file) # TODO: commented out because of the issue described below. Uncomment when it is resolved. # Issue #1339: There is an issue when using CliRunner and building projects # that make use of --config. The internal state of Sphinx appears to # be correct, but the written outputs (i.e. html) are not correct # suggesting some type of caching is going on. # result = cli.invoke( # commands.build, # [ # tocs.as_posix(), # "--path-output", # books.as_posix(), # "--toc", # toc.as_posix(), # "--config", # config.as_posix(), # "-W", # ], # ) # assert result.exit_code == 0, result.output process = subprocess.Popen( [ "jb", "build", tocs.as_posix(), "--path-output", books.as_posix(), "--toc", toc.as_posix(), "--config", config.as_posix(), "-W", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = process.communicate() assert process.returncode == 0, stderr path_toc_directive = books.joinpath("_build", "html", "index.html") # get the tableofcontents markup soup = BeautifulSoup(path_toc_directive.read_text(encoding="utf8"), "html.parser") toc = soup.select("nav.bd-links")[0] file_regression.check( toc.prettify(), basename=toc_file.split(".")[0] + "_multitoc_numbering_false", extension=f"{SPHINX_VERSION}.html", )
Testing use_multitoc_numbering: false
test_toc_numbered_multitoc_numbering_false
python
jupyter-book/jupyter-book
tests/test_sphinx_multitoc_numbering.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_sphinx_multitoc_numbering.py
BSD-3-Clause
def test_toc_fail(cli: CliRunner, build_resources): """Folder with no content should return none""" books, tocs = build_resources p_empty = tocs.parent result = cli.invoke(create_toc, [p_empty.as_posix()]) assert result.exit_code != 0 assert isinstance(result.exception, OSError) assert "path does not contain a root file" in str(result.exception)
Folder with no content should return none
test_toc_fail
python
jupyter-book/jupyter-book
tests/test_toc.py
https://github.com/jupyter-book/jupyter-book/blob/master/tests/test_toc.py
BSD-3-Clause
def init_myst_file(path, kernel, verbose=True): """Initialize a file with a Jupytext header that marks it as MyST markdown. Parameters ---------- path : string A path to a markdown file to be initialized for Jupytext kernel : string A kernel name to add to the markdown file. See a list of kernel names with `jupyter kernelspec list`. """ try: from jupytext.cli import jupytext except ImportError: raise ImportError( "In order to use myst markdown features, " "please install jupytext first." ) if not Path(path).exists(): raise FileNotFoundError(f"Markdown file not found: {path}") kernels = list(find_kernel_specs().keys()) kernels_text = "\n".join(kernels) if kernel is None: if len(kernels) > 1: _error( "There are multiple kernel options, so you must give one manually." " with `--kernel`\nPlease specify one of the following kernels.\n\n" f"{kernels_text}" ) else: kernel = kernels[0] if kernel not in kernels: raise ValueError( f"Did not find kernel: {kernel}\nPlease specify one of the " f"installed kernels:\n\n{kernels_text}" ) args = (str(path), "-q", "--set-kernel", kernel, "--set-formats", "myst") jupytext(args) if verbose: print(f"Initialized file: {path}\nWith kernel: {kernel}")
Initialize a file with a Jupytext header that marks it as MyST markdown. Parameters ---------- path : string A path to a markdown file to be initialized for Jupytext kernel : string A kernel name to add to the markdown file. See a list of kernel names with `jupyter kernelspec list`.
init_myst_file
python
jupyter-book/jupyter-book
jupyter_book/utils.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/utils.py
BSD-3-Clause
def get_default_sphinx_config(): """Some configuration values that are really sphinx-specific.""" return dict( extensions=[ "sphinx_togglebutton", "sphinx_copybutton", "myst_nb", "jupyter_book", "sphinx_thebe", "sphinx_comments", "sphinx_external_toc", "sphinx.ext.intersphinx", "sphinx_design", "sphinx_book_theme", ], pygments_style="sphinx", html_theme="sphinx_book_theme", html_theme_options={"search_bar_text": "Search this book..."}, html_sourcelink_suffix="", numfig=True, recursive_update=False, suppress_warnings=["myst.domains"], )
Some configuration values that are really sphinx-specific.
get_default_sphinx_config
python
jupyter-book/jupyter-book
jupyter_book/config.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/config.py
BSD-3-Clause
def validate_yaml(yaml: dict, raise_on_errors=False, print_func=print): """Validate the YAML configuration against a JSON schema.""" errors = sorted(get_validator().iter_errors(yaml), key=lambda e: e.path) error_msg = "\n".join( [ "- {} [key path: '{}']".format( error.message, "/".join([str(p) for p in error.path]) ) for error in errors ] ) if not errors: return if raise_on_errors: raise jsonschema.ValidationError(error_msg) return _message_box( f"Warning: Validation errors in config:\n{error_msg}", color="orange", print_func=print_func, )
Validate the YAML configuration against a JSON schema.
validate_yaml
python
jupyter-book/jupyter-book
jupyter_book/config.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/config.py
BSD-3-Clause
def get_final_config( *, user_yaml: Optional[Union[dict, Path]] = None, cli_config: Optional[dict] = None, sourcedir: Optional[Path] = None, validate: bool = True, raise_on_invalid: bool = False, use_external_toc: bool = True, ): """Create the final configuration dictionary, to parser to sphinx :param user_config_path: A path to a YAML file written by the user :param cli_config: Configuration coming directly from the CLI :param sourcedir: path to source directory. If it contains a `_static` folder, we ad that to the final `html_static_path` :param validate: Validate user yaml against the data schema :param raise_on_invalid: Raise a ValidationError, or only log a warning Order of precedence is: 1. CLI Sphinx Configuration 2. User JB(YAML) Configuration 3. Default JB (YAML) Configuration 4. Default Sphinx Configuration """ # get the default sphinx configuration sphinx_config = get_default_sphinx_config() # get the default yaml configuration yaml_config, default_yaml_update, add_paths = yaml_to_sphinx( yaml.safe_load(PATH_YAML_DEFAULT.read_text(encoding="utf8")) ) yaml_config.update(default_yaml_update) # if available, get the user defined configuration user_yaml_recurse, user_yaml_update = {}, {} user_yaml_path = None if user_yaml: if isinstance(user_yaml, Path): user_yaml_path = user_yaml user_yaml = yaml.safe_load(user_yaml.read_text(encoding="utf8")) else: user_yaml = user_yaml if validate: validate_yaml(user_yaml, raise_on_errors=raise_on_invalid) user_yaml_recurse, user_yaml_update, add_paths = yaml_to_sphinx(user_yaml) # add paths from yaml config if user_yaml_path: for path in add_paths: path = (user_yaml_path.parent / path).resolve() sys.path.append(path.as_posix()) # first merge the user yaml into the default yaml _recursive_update(yaml_config, user_yaml_recurse) # then merge this into the default sphinx config _recursive_update(sphinx_config, yaml_config) # TODO: deprecate this in version 0.14 # https://github.com/executablebooks/jupyter-book/issues/1502 if "mathjax_config" in user_yaml_update: # Switch off warning if user has specified mathjax v2 if ( "mathjax_path" in user_yaml_update and "@2" in user_yaml_update["mathjax_path"] ): # use mathjax2_config so not to trigger deprecation warning in future user_yaml_update["mathjax2_config"] = user_yaml_update.pop("mathjax_config") else: _message_box( ( f"[Warning] Mathjax configuration has changed for sphinx>=4.0 [Using sphinx: {sphinx.__version__}]\n" # noqa: E501 "Your _config.yml needs to be updated:\n" # noqa: E501 "mathjax_config -> mathjax3_config\n" # noqa: E501 "To continue using `mathjax v2` you will need to use the `mathjax_path` configuration\n" # noqa: E501 "\n" "See Sphinx Documentation:\n" "https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.mathjax" # noqa: E501 ), color="orange", print_func=print, ) # Automatically make the configuration name substitution so older projects build user_yaml_update["mathjax3_config"] = user_yaml_update.pop("mathjax_config") # Recursively update sphinx config if option is specified, # otherwise forcefully override options non-recursively if sphinx_config.pop("recursive_update") is True: _recursive_update(sphinx_config, user_yaml_update) else: sphinx_config.update(user_yaml_update) # This is to deal with a special case, where the override needs to be applied after # the sphinx app is initialised (since the default is a function) # TODO I'm not sure if there is a better way to deal with this? config_meta = { "latex_doc_overrides": sphinx_config.pop("latex_doc_overrides"), "latex_individualpages": cli_config.pop("latex_individualpages"), } if sphinx_config.get("use_jupyterbook_latex"): sphinx_config["extensions"].append("sphinx_jupyterbook_latex") # Add sphinx_multitoc_numbering extension if necessary if sphinx_config.get("use_multitoc_numbering"): sphinx_config["extensions"].append("sphinx_multitoc_numbering") # finally merge in CLI configuration _recursive_update(sphinx_config, cli_config or {}) # Initialize static files if sourcedir and Path(sourcedir).joinpath("_static").is_dir(): # Add the `_static` folder to html_static_path, only if it exists paths_static = sphinx_config.get("html_static_path", []) paths_static.append("_static") sphinx_config["html_static_path"] = paths_static # Search the static files paths and initialize any CSS or JS files. for path in paths_static: path = Path(sourcedir).joinpath(path) for path_css in path.rglob("*.css"): css_files = sphinx_config.get("html_css_files", []) css_files.append((path_css.relative_to(path)).as_posix()) sphinx_config["html_css_files"] = css_files for path_js in path.rglob("*.js"): js_files = sphinx_config.get("html_js_files", []) js_files.append((path_js.relative_to(path)).as_posix()) sphinx_config["html_js_files"] = js_files if not use_external_toc: # TODO perhaps a better logic for this? # remove all configuration related to sphinx_external_toc try: idx = sphinx_config["extensions"].index("sphinx_external_toc") except ValueError: pass else: sphinx_config["extensions"].pop(idx) sphinx_config.pop("external_toc_path", None) sphinx_config.pop("external_toc_exclude_missing", None) return sphinx_config, config_meta
Create the final configuration dictionary, to parser to sphinx :param user_config_path: A path to a YAML file written by the user :param cli_config: Configuration coming directly from the CLI :param sourcedir: path to source directory. If it contains a `_static` folder, we ad that to the final `html_static_path` :param validate: Validate user yaml against the data schema :param raise_on_invalid: Raise a ValidationError, or only log a warning Order of precedence is: 1. CLI Sphinx Configuration 2. User JB(YAML) Configuration 3. Default JB (YAML) Configuration 4. Default Sphinx Configuration
get_final_config
python
jupyter-book/jupyter-book
jupyter_book/config.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/config.py
BSD-3-Clause
def yaml_to_sphinx(yaml: dict): """Convert a Jupyter Book style config structure into a Sphinx config dict. :returns: (recursive_updates, override_updates, add_paths) add_paths collects paths that are specified in the _config.yml (such as those provided in local_extensions) and returns them for adding to sys.path in a context where the _config.yml path is known """ sphinx_config = {} # top-level, string type YAML_TRANSLATIONS = { "title": "html_title", "author": "author", "copyright": "copyright", "logo": "html_logo", "project": "project", } for key, newkey in YAML_TRANSLATIONS.items(): if key in yaml: val = yaml.get(key) if val is None: val = "" sphinx_config[newkey] = val # exclude patterns if "exclude_patterns" in yaml: # we always include these excludes, so as not to break back-compatibility defaults = {"_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"} defaults.update(yaml["exclude_patterns"]) sphinx_config["exclude_patterns"] = list(sorted(defaults)) if "only_build_toc_files" in yaml: sphinx_config["external_toc_exclude_missing"] = yaml["only_build_toc_files"] # Theme sphinx_config["html_theme_options"] = theme_options = {} if "launch_buttons" in yaml: theme_options["launch_buttons"] = yaml["launch_buttons"] repository_config = yaml.get("repository", {}) for spx_key, yml_key in [ ("path_to_docs", "path_to_book"), ("repository_url", "url"), ("repository_branch", "branch"), ]: if yml_key in repository_config: theme_options[spx_key] = repository_config[yml_key] # HTML html = yaml.get("html") if html: for spx_key, yml_key in [ ("html_favicon", "favicon"), ("html_baseurl", "baseurl"), ("comments_config", "comments"), ("use_multitoc_numbering", "use_multitoc_numbering"), ]: if yml_key in html: sphinx_config[spx_key] = html[yml_key] for spx_key, yml_key in [ ("navbar_footer_text", "navbar_footer_text"), # Deprecate navbar_footer_text after a release cycle ("extra_footer", "extra_footer"), ("home_page_in_toc", "home_page_in_navbar"), ("announcement", "announcement"), ]: if yml_key in html: theme_options[spx_key] = html[yml_key] # Fix for renamed field spx_analytics = theme_options["analytics"] = {} google_analytics_id = html.get("google_analytics_id") if google_analytics_id is not None: _message_box( ( "[Warning] The `html.google_analytics_id` configuration value has moved to `html.analytics.google_analytics_id`" # noqa: E501 ), color="orange", print_func=print, ) spx_analytics["google_analytics_id"] = google_analytics_id # Analytics yml_analytics = html.get("analytics", {}) for spx_key, yml_key in [ ("google_analytics_id", "google_analytics_id"), ("plausible_analytics_domain", "plausible_analytics_domain"), ("plausible_analytics_url", "plausible_analytics_url"), ]: if yml_key in yml_analytics: spx_analytics[spx_key] = yml_analytics[yml_key] # Pass through the buttons btns = ["use_repository_button", "use_edit_page_button", "use_issues_button"] use_buttons = {btn: html.get(btn) for btn in btns if btn in html} if any(use_buttons.values()): if not repository_config.get("url"): raise ValueError( "To use 'repository' buttons, you must specify the repository URL" ) # Update our config theme_options.update(use_buttons) # Parse and Rendering parse = yaml.get("parse") if parse: # Enable extra extensions extensions = sphinx_config.get("myst_enable_extensions", []) # TODO: deprecate this in v0.11.0 if parse.get("myst_extended_syntax") is True: extensions.append( [ "colon_fence", "dollarmath", "amsmath", "deflist", "html_image", ] ) _message_box( ( "myst_extended_syntax is deprecated, instead specify extensions " "you wish to be enabled. See https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html" # noqa: E501 ), color="orange", print_func=print, ) for ext in parse.get("myst_enable_extensions", []): if ext not in extensions: extensions.append(ext) if extensions: sphinx_config["myst_enable_extensions"] = extensions # Configuration values we'll just pass-through for ikey in ["myst_substitutions", "myst_url_schemes"]: if ikey in parse: sphinx_config[ikey] = parse.get(ikey) # Execution execute = yaml.get("execute") if execute: for spx_key, yml_key in [ ("nb_execution_allow_errors", "allow_errors"), ("nb_execution_raise_on_error", "raise_on_error"), ("nb_eval_name_regex", "eval_regex"), ("nb_execution_show_tb", "show_tb"), ("nb_execution_in_temp", "run_in_temp"), ("nb_output_stderr", "stderr_output"), ("nb_execution_timeout", "timeout"), ("nb_execution_cache_path", "cache"), ("nb_execution_mode", "execute_notebooks"), ("nb_execution_excludepatterns", "exclude_patterns"), ]: if yml_key in execute: sphinx_config[spx_key] = execute[yml_key] if sphinx_config.get("nb_execution_mode") is False: # Special case because YAML treats `off` as "False". sphinx_config["nb_execution_mode"] = "off" # LaTeX latex = yaml.get("latex") if latex: for spx_key, yml_key in [ ("latex_engine", "latex_engine"), ("use_jupyterbook_latex", "use_jupyterbook_latex"), ]: if yml_key in latex: sphinx_config[spx_key] = latex[yml_key] sphinx_config["latex_doc_overrides"] = {} if "title" in yaml: sphinx_config["latex_doc_overrides"]["title"] = yaml["title"] for key, val in yaml.get("latex", {}).get("latex_documents", {}).items(): sphinx_config["latex_doc_overrides"][key] = val # Sphinx Configuration extra_extensions = yaml.get("sphinx", {}).get("extra_extensions") if extra_extensions: sphinx_config["extensions"] = get_default_sphinx_config()["extensions"] if not isinstance(extra_extensions, list): extra_extensions = [extra_extensions] for extension in extra_extensions: if extension not in sphinx_config["extensions"]: sphinx_config["extensions"].append(extension) local_extensions = yaml.get("sphinx", {}).get("local_extensions") # add_paths collects additional paths for sys.path add_paths = [] if local_extensions: if "extensions" not in sphinx_config: sphinx_config["extensions"] = get_default_sphinx_config()["extensions"] for extension, path in local_extensions.items(): if extension not in sphinx_config["extensions"]: sphinx_config["extensions"].append(extension) if path not in sys.path: add_paths.append(path) # Overwrite sphinx config or not if "recursive_update" in yaml.get("sphinx", {}): sphinx_config["recursive_update"] = yaml.get("sphinx", {}).get( "recursive_update" ) # Citations sphinxcontrib_bibtex_configs = ["bibtex_bibfiles", "bibtex_reference_style"] if any(bibtex_config in yaml for bibtex_config in sphinxcontrib_bibtex_configs): # Load sphincontrib-bibtex if "extensions" not in sphinx_config: sphinx_config["extensions"] = get_default_sphinx_config()["extensions"] sphinx_config["extensions"].append("sphinxcontrib.bibtex") # Report Bug in Specific Docutils Versions # TODO: Remove when docutils>=0.20 is pinned in jupyter-book # https://github.com/mcmtroffaes/sphinxcontrib-bibtex/issues/322 if (0, 18) <= docutils.__version_info__ < (0, 20): logger.warning( "[sphinxcontrib-bibtex] Beware that docutils versions 0.18 and 0.19 " "(you are running {}) are known to generate invalid html for citations. " "If this issue affects you, please use docutils<0.18 or >=0.20 instead. " "For more details, see https://sourceforge.net/p/docutils/patches/195/".format( docutils.__version__ ) ) # Pass through configuration if yaml.get("bibtex_bibfiles"): if isinstance(yaml.get("bibtex_bibfiles"), str): yaml["bibtex_bibfiles"] = [yaml["bibtex_bibfiles"]] sphinx_config["bibtex_bibfiles"] = yaml["bibtex_bibfiles"] # items in sphinx.config will override defaults, # rather than recursively updating them return sphinx_config, yaml.get("sphinx", {}).get("config") or {}, add_paths
Convert a Jupyter Book style config structure into a Sphinx config dict. :returns: (recursive_updates, override_updates, add_paths) add_paths collects paths that are specified in the _config.yml (such as those provided in local_extensions) and returns them for adding to sys.path in a context where the _config.yml path is known
yaml_to_sphinx
python
jupyter-book/jupyter-book
jupyter_book/config.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/config.py
BSD-3-Clause
def _recursive_update(config, update, list_extend=False): """Update the dict `config` with `update` recursively. This *updates* nested dicts / lists instead of replacing them. """ for key, val in update.items(): if isinstance(config.get(key), dict): # if a dict value update is set to None, # then the entire dict will be "wiped", # otherwise it is recursively updated. if isinstance(val, dict): _recursive_update(config[key], val, list_extend) else: config[key] = val elif isinstance(config.get(key), list): if isinstance(val, list) and list_extend: config[key].extend(val) else: config[key] = val else: config[key] = val
Update the dict `config` with `update` recursively. This *updates* nested dicts / lists instead of replacing them.
_recursive_update
python
jupyter-book/jupyter-book
jupyter_book/config.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/config.py
BSD-3-Clause
def build_sphinx( sourcedir, outputdir, *, use_external_toc=True, confdir=None, path_config=None, noconfig=False, confoverrides=None, doctreedir=None, filenames=None, force_all=False, quiet=False, really_quiet=False, builder="html", freshenv=False, warningiserror=False, tags=None, verbosity=0, jobs=None, keep_going=False, ) -> Union[int, Exception]: """Sphinx build "main" command-line entry. This is a slightly modified version of https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/cmd/build.py#L198. """ ####################### # Configuration creation sphinx_config, config_meta = get_final_config( user_yaml=Path(path_config) if path_config else None, cli_config=confoverrides or {}, sourcedir=Path(sourcedir), use_external_toc=use_external_toc, ) ################################## # Preparing Sphinx build arguments # Configuration directory if noconfig: confdir = None elif not confdir: confdir = sourcedir # Doctrees directory if not doctreedir: doctreedir = Path(outputdir).parent.joinpath(".doctrees") if jobs is None: jobs = 1 # Manually re-building files in filenames if filenames is None: filenames = [] missing_files = [] for filename in filenames: if not op.isfile(filename): missing_files.append(filename) if missing_files: raise IOError("cannot find files %r" % missing_files) if force_all and filenames: raise ValueError("cannot combine -a option and filenames") # Debug args (hack to get this to pass through properly) def debug_args(): pass debug_args.pdb = False debug_args.verbosity = False debug_args.traceback = False # Logging behavior status = sys.stdout warning = sys.stderr error = sys.stderr if quiet: status = None if really_quiet: status = warning = None ################### # Build with Sphinx app = None # In case we fail, this allows us to handle the exception try: # These patches temporarily override docutils global variables, # such as the dictionaries of directives, roles and nodes # NOTE: this action is not thread-safe and not suitable for asynchronous use! with patch_docutils(confdir), docutils_namespace(): app = Sphinx( srcdir=sourcedir, confdir=confdir, outdir=outputdir, doctreedir=doctreedir, buildername=builder, confoverrides=sphinx_config, status=status, warning=warning, freshenv=freshenv, warningiserror=warningiserror, tags=tags, verbosity=verbosity, parallel=jobs, keep_going=keep_going, ) # We have to apply this update after the sphinx initialisation, # since default_latex_documents is dynamically generated # see sphinx/builders/latex/__init__.py:default_latex_documents new_latex_documents = update_latex_documents( app.config.latex_documents, config_meta["latex_doc_overrides"] ) app.config.latex_documents = new_latex_documents # set the below flag to always to enable maths in singlehtml builder if app.builder.name == "singlehtml": app.set_html_assets_policy("always") # setting up sphinx-multitoc-numbering if app.config["use_multitoc_numbering"]: # if sphinx-external-toc is used if "external_toc_path" in app.config: import yaml site_map = app.config.external_site_map site_map_str = yaml.dump(site_map.as_json()) # only if there is at least one numbered: true in the toc file if "numbered: true" in site_map_str: app.setup_extension("sphinx_multitoc_numbering") else: app.setup_extension("sphinx_multitoc_numbering") # Build latex_doc tuples based on --individualpages option request if config_meta["latex_individualpages"]: from .pdf import autobuild_singlepage_latexdocs # Ask Builder to read the source files to fetch titles and documents app.builder.read() latex_documents = autobuild_singlepage_latexdocs(app) app.config.latex_documents = latex_documents app.build(force_all, filenames) return app.statuscode except (Exception, KeyboardInterrupt) as exc: handle_exception(app, debug_args, exc, error) return exc
Sphinx build "main" command-line entry. This is a slightly modified version of https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/cmd/build.py#L198.
build_sphinx
python
jupyter-book/jupyter-book
jupyter_book/sphinx.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/sphinx.py
BSD-3-Clause
def html_to_pdf(html_file, pdf_file): """ Convert arbitrary HTML file to PDF using playwright. Parameters ---------- html_file : str A path to an HTML file to convert to PDF pdf_file : str A path to an output PDF file that will be created """ asyncio.run(_html_to_pdf(html_file, pdf_file))
Convert arbitrary HTML file to PDF using playwright. Parameters ---------- html_file : str A path to an HTML file to convert to PDF pdf_file : str A path to an output PDF file that will be created
html_to_pdf
python
jupyter-book/jupyter-book
jupyter_book/pdf.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/pdf.py
BSD-3-Clause
def update_latex_documents(latex_documents, latexoverrides): """ Apply latexoverrides from _config.yml to latex_documents tuple """ if len(latex_documents) > 1: _message_box( "Latex documents specified as a multi element list in the _config", "This suggests the user has made custom settings to their build", "[Skipping] processing of automatic latex overrides", ) return latex_documents # Extract latex document tuple latex_document = latex_documents[0] # Apply single overrides from _config.yml updated_latexdocs = [] for loc, item in enumerate(LATEX_DOCUMENTS): # the last element toctree_only seems optionally included if loc >= len(latex_document): break if item in latexoverrides.keys(): updated_latexdocs.append(latexoverrides[item]) else: updated_latexdocs.append(latex_document[loc]) return [tuple(updated_latexdocs)]
Apply latexoverrides from _config.yml to latex_documents tuple
update_latex_documents
python
jupyter-book/jupyter-book
jupyter_book/pdf.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/pdf.py
BSD-3-Clause
def latex_document_components(latex_documents): """Return a dictionary of latex_document components by name""" latex_tuple_components = {} for idx, item in enumerate(LATEX_DOCUMENTS): # skip if latex_documents doesn't doesn't contain all elements # of the LATEX_DOCUMENT specification tuple if idx >= len(latex_documents): continue latex_tuple_components[item] = latex_documents[idx] return latex_tuple_components
Return a dictionary of latex_document components by name
latex_document_components
python
jupyter-book/jupyter-book
jupyter_book/pdf.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/pdf.py
BSD-3-Clause
def latex_document_tuple(components): """Return a tuple for latex_documents from named components dictionary""" latex_doc = [] for item in LATEX_DOCUMENTS: if item not in components.keys(): continue else: latex_doc.append(components[item]) return tuple(latex_doc)
Return a tuple for latex_documents from named components dictionary
latex_document_tuple
python
jupyter-book/jupyter-book
jupyter_book/pdf.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/pdf.py
BSD-3-Clause
def autobuild_singlepage_latexdocs(app): """ Build list of tuples for each document in the Project [((startdocname, targetname, title, author, theme, toctree_only))] https://www.sphinx-doc.org/en/3.x/usage/configuration.html#confval-latex_documents """ latex_documents = app.config.latex_documents if len(latex_documents) > 1: _message_box( "Latex documents specified as a multi element list in the _config", "This suggests the user has made custom settings to their build", "[Skipping] --individualpages option", ) return latex_documents # Extract latex_documents updated tuple latex_documents = latex_documents[0] titles = app.env.titles master_doc = app.config.master_doc sourcedir = os.path.dirname(master_doc) # Construct Tuples DEFAULT_VALUES = latex_document_components(latex_documents) latex_documents = [] for doc, title in titles.items(): latex_doc = copy(DEFAULT_VALUES) # if doc has a subdir relative to src dir docname = None parts = Path(doc).parts latex_doc["startdocname"] = doc if DEFAULT_VALUES["startdocname"] == doc: targetdoc = DEFAULT_VALUES["targetname"] else: if sourcedir in parts: parts = list(parts) # assuming we need to remove only the first instance parts.remove(sourcedir) docname = "-".join(parts) targetdoc = docname + ".tex" latex_doc["targetname"] = targetdoc latex_doc["title"] = title.astext() latex_doc = latex_document_tuple(latex_doc) latex_documents.append(latex_doc) return latex_documents
Build list of tuples for each document in the Project [((startdocname, targetname, title, author, theme, toctree_only))] https://www.sphinx-doc.org/en/3.x/usage/configuration.html#confval-latex_documents
autobuild_singlepage_latexdocs
python
jupyter-book/jupyter-book
jupyter_book/pdf.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/pdf.py
BSD-3-Clause
def version_callback(ctx, param, value): """Callback for supplying version information""" if not value or ctx.resilient_parsing: return from jupyter_cache import __version__ as jcv from myst_nb import __version__ as mnbv from myst_parser import __version__ as mpv from nbclient import __version__ as ncv from sphinx_book_theme import __version__ as sbtv from sphinx_external_toc import __version__ as etoc from jupyter_book import __version__ as jbv versions = { "Jupyter Book": jbv, "External ToC": etoc, "MyST-Parser": mpv, "MyST-NB": mnbv, "Sphinx Book Theme": sbtv, "Jupyter-Cache": jcv, "NbClient": ncv, } versions_string = "\n".join(f"{tt:<18}: {vv}" for tt, vv in versions.items()) click.echo(versions_string) ctx.exit()
Callback for supplying version information
version_callback
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def main(): """Build and manage books with Jupyter.""" pass
Build and manage books with Jupyter.
main
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def build( path_source, path_output, config, toc, warningiserror, nitpick, keep_going, freshenv, builder, custom_builder, verbose, quiet, individualpages, get_config_only=False, ): """Convert your book's or page's content to HTML or a PDF.""" from sphinx_external_toc.parsing import MalformedError, parse_toc_yaml from jupyter_book import __version__ as jbv from jupyter_book.sphinx import build_sphinx if not get_config_only: click.secho(f"Running Jupyter-Book v{jbv}", bold=True, fg="green") # Paths for the notebooks PATH_SRC_FOLDER = Path(path_source).absolute() config_overrides = {} use_external_toc = True found_config = find_config_path(PATH_SRC_FOLDER) BUILD_PATH = path_output if path_output is not None else found_config[0] # Set config for --individualpages option (pages, documents) if individualpages: if builder != "pdflatex": _error( """ Specified option --individualpages only works with the following builders: pdflatex """ ) # Build Page if not PATH_SRC_FOLDER.is_dir(): # it is a single file build_type = "page" use_external_toc = False subdir = None PATH_SRC = Path(path_source) PATH_SRC_FOLDER = PATH_SRC.parent.absolute() PAGE_NAME = PATH_SRC.with_suffix("").name # checking if the page is inside a sub directory # then changing the build_path accordingly if str(BUILD_PATH) in str(PATH_SRC_FOLDER): subdir = str(PATH_SRC_FOLDER.relative_to(BUILD_PATH)) if subdir and subdir != ".": subdir = subdir.replace("/", "-") subdir = subdir + "-" + PAGE_NAME BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", subdir) else: BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", PAGE_NAME) # Find all files that *aren't* the page we're building and exclude them to_exclude = [ op.relpath(ifile, PATH_SRC_FOLDER) for ifile in iglob(str(PATH_SRC_FOLDER.joinpath("**", "*")), recursive=True) if ifile != str(PATH_SRC.absolute()) ] to_exclude.extend(["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]) # Now call the Sphinx commands to build config_overrides = { "master_doc": PAGE_NAME, "exclude_patterns": to_exclude, # --individualpages option set to True for page call "latex_individualpages": True, } # Build Project else: build_type = "book" PAGE_NAME = None BUILD_PATH = Path(BUILD_PATH).joinpath("_build") # Table of contents toc = PATH_SRC_FOLDER.joinpath("_toc.yml") if toc is None else Path(toc) if not get_config_only: if not toc.exists(): _error( "Couldn't find a Table of Contents file. " "To auto-generate one, run:" f"\n\n\tjupyter-book toc from-project {path_source}" ) # we don't need to read the toc here, but do so to control the error message try: parse_toc_yaml(toc) except MalformedError as exc: _error( f"The Table of Contents file is malformed: {exc}\n" "You may need to migrate from the old format, using:" f"\n\n\tjupyter-book toc migrate {toc} -o {toc}" ) # TODO could also check/warn if the format is not set to jb-article/jb-book? config_overrides["external_toc_path"] = ( toc.relative_to(PATH_SRC_FOLDER).as_posix() if get_config_only else toc.as_posix() ) # --individualpages option passthrough config_overrides["latex_individualpages"] = individualpages # Use the specified configuration file, or one found in the root directory path_config = config or ( found_config[0].joinpath("_config.yml") if found_config[1] else None ) if path_config and not Path(path_config).exists(): raise IOError(f"Config file path given, but not found: {path_config}") if builder in ["html", "pdfhtml", "linkcheck"]: OUTPUT_PATH = BUILD_PATH.joinpath("html") elif builder in ["latex", "pdflatex"]: OUTPUT_PATH = BUILD_PATH.joinpath("latex") elif builder in ["dirhtml"]: OUTPUT_PATH = BUILD_PATH.joinpath("dirhtml") elif builder in ["singlehtml"]: OUTPUT_PATH = BUILD_PATH.joinpath("singlehtml") elif builder in ["custom"]: OUTPUT_PATH = BUILD_PATH.joinpath(custom_builder) BUILDER_OPTS["custom"] = custom_builder if nitpick: config_overrides["nitpicky"] = True # If we only want config (e.g. for printing/validation), stop here if get_config_only: return (path_config, PATH_SRC_FOLDER, config_overrides) # print information about the build click.echo( click.style("Source Folder: ", bold=True, fg="blue") + click.format_filename(f"{PATH_SRC_FOLDER}") ) click.echo( click.style("Config Path: ", bold=True, fg="blue") + click.format_filename(f"{path_config}") ) click.echo( click.style("Output Path: ", bold=True, fg="blue") + click.format_filename(f"{OUTPUT_PATH}") ) # Now call the Sphinx commands to build result = build_sphinx( PATH_SRC_FOLDER, OUTPUT_PATH, use_external_toc=use_external_toc, noconfig=True, path_config=path_config, confoverrides=config_overrides, builder=BUILDER_OPTS[builder], warningiserror=warningiserror, keep_going=keep_going, freshenv=freshenv, verbosity=verbose, quiet=quiet > 0, really_quiet=quiet > 1, ) builder_specific_actions( result, builder, OUTPUT_PATH, build_type, PAGE_NAME, click.echo )
Convert your book's or page's content to HTML or a PDF.
build
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def create(path_book, cookiecutter, no_input): """Create a Jupyter Book template that you can customize.""" book = Path(path_book) if not cookiecutter: # this will be the more common option template_path = Path(__file__).parent.parent.joinpath("book_template") sh.copytree(template_path, book) else: cc_url = "gh:executablebooks/cookiecutter-jupyter-book" try: from cookiecutter.main import cookiecutter except ModuleNotFoundError as e: _error( f"{e}. To install, run\n\n\tpip install cookiecutter", kind=e.__class__, ) book = cookiecutter(cc_url, output_dir=Path(path_book), no_input=no_input) _message_box(f"Your book template can be found at\n\n {book}{os.sep}")
Create a Jupyter Book template that you can customize.
create
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def remove_option(path, option, rm_both=False): """Remove folder specified under option. If rm_both is True, remove folder and skip message_box.""" option_path = path.joinpath(option) if not option_path.is_dir(): return sh.rmtree(option_path) if not rm_both: _message_box(f"Your {option} directory has been removed")
Remove folder specified under option. If rm_both is True, remove folder and skip message_box.
clean.remove_option
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def remove_html_latex(path): """Remove both html and latex folders.""" print_msg = False for opt in ["html", "latex"]: if path.joinpath(opt).is_dir(): print_msg = True remove_option(path, opt, True) if print_msg: _message_box("Your html and latex directories have been removed")
Remove both html and latex folders.
clean.remove_html_latex
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def remove_all(path): """Remove _build directory entirely.""" sh.rmtree(path) _message_box("Your _build directory has been removed")
Remove _build directory entirely.
clean.remove_all
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def remove_default(path): """Remove all subfolders in _build except .jupyter_cache.""" to_remove = [ dd for dd in path.iterdir() if dd.is_dir() and dd.name != ".jupyter_cache" ] for dd in to_remove: sh.rmtree(path.joinpath(dd.name)) _message_box("Your _build directory has been emptied except for .jupyter_cache")
Remove all subfolders in _build except .jupyter_cache.
clean.remove_default
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def clean(path_book, all_, html, latex): """Empty the _build directory except jupyter_cache. If the all option has been flagged, it will remove the entire _build. If html/latex option is flagged, it will remove the html/latex subdirectories.""" def remove_option(path, option, rm_both=False): """Remove folder specified under option. If rm_both is True, remove folder and skip message_box.""" option_path = path.joinpath(option) if not option_path.is_dir(): return sh.rmtree(option_path) if not rm_both: _message_box(f"Your {option} directory has been removed") def remove_html_latex(path): """Remove both html and latex folders.""" print_msg = False for opt in ["html", "latex"]: if path.joinpath(opt).is_dir(): print_msg = True remove_option(path, opt, True) if print_msg: _message_box("Your html and latex directories have been removed") def remove_all(path): """Remove _build directory entirely.""" sh.rmtree(path) _message_box("Your _build directory has been removed") def remove_default(path): """Remove all subfolders in _build except .jupyter_cache.""" to_remove = [ dd for dd in path.iterdir() if dd.is_dir() and dd.name != ".jupyter_cache" ] for dd in to_remove: sh.rmtree(path.joinpath(dd.name)) _message_box("Your _build directory has been emptied except for .jupyter_cache") PATH_OUTPUT = Path(path_book).absolute() if not PATH_OUTPUT.is_dir(): _error(f"Path to book isn't a directory: {PATH_OUTPUT}") build_path = PATH_OUTPUT.joinpath("_build") if not build_path.is_dir(): return if all_: remove_all(build_path) elif html and latex: remove_html_latex(build_path) elif html: remove_option(build_path, "html") elif latex: remove_option(build_path, "latex") else: remove_default(build_path)
Empty the _build directory except jupyter_cache. If the all option has been flagged, it will remove the entire _build. If html/latex option is flagged, it will remove the html/latex subdirectories.
clean
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def myst(): """Manipulate MyST markdown files.""" pass
Manipulate MyST markdown files.
myst
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def init(path, kernel): """Add Jupytext metadata for your markdown file(s), with optional Kernel name.""" from jupyter_book.utils import init_myst_file for ipath in path: init_myst_file(ipath, kernel, verbose=True)
Add Jupytext metadata for your markdown file(s), with optional Kernel name.
init
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def config(): """Inspect your _config.yml file.""" pass
Inspect your _config.yml file.
config
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def sphinx(ctx, path_source, config, toc): """Generate a Sphinx conf.py representation of the build configuration.""" from jupyter_book.config import get_final_config path_config, full_path_source, config_overrides = ctx.invoke( build, path_source=path_source, config=config, toc=toc, get_config_only=True ) sphinx_config, _ = get_final_config( user_yaml=Path(path_config) if path_config else None, sourcedir=Path(full_path_source), cli_config=config_overrides, ) lines = [ "###############################################################################", "# Auto-generated by `jupyter-book config`", "# If you wish to continue using _config.yml, make edits to that file and", "# re-generate this one.", "###############################################################################", ] for key in sorted(sphinx_config): lines.append(f"{key} = {sphinx_config[key]!r}") content = "\n".join(lines).rstrip() + "\n" out_folder = Path(path_config).parent if path_config else Path(full_path_source) out_folder.joinpath("conf.py").write_text(content, encoding="utf8") click.secho(f"Wrote conf.py to {out_folder}", fg="green")
Generate a Sphinx conf.py representation of the build configuration.
sphinx
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def find_config_path(path: Path) -> Tuple[Path, bool]: """checks for any _config.yml file in current/parent dirs. if found then returns the path which has _config.yml, else returns the present dir as the path. """ if path.is_dir(): current_dir = path else: current_dir = path.parent if (current_dir / "_config.yml").is_file(): return (current_dir, True) while current_dir != current_dir.parent: if (current_dir / "_config.yml").is_file(): return (current_dir, True) current_dir = current_dir.parent if not path.is_dir(): return (path.parent, False) return (path, False)
checks for any _config.yml file in current/parent dirs. if found then returns the path which has _config.yml, else returns the present dir as the path.
find_config_path
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def builder_specific_actions( result, builder, output_path, cmd_type, page_name=None, print_func=print ): """Run post-sphinx-build actions. :param result: the result of the build execution; a status code or and exception """ from jupyter_book.pdf import html_to_pdf from jupyter_book.sphinx import REDIRECT_TEXT if isinstance(result, Exception): msg = ( f"There was an error in building your {cmd_type}. " "Look above for the cause." ) # TODO ideally we probably only want the original traceback here raise RuntimeError(_message_box(msg, color="red", doprint=False)) from result elif result: msg = ( f"Building your {cmd_type}, returns a non-zero exit code ({result}). " "Look above for the cause." ) _message_box(msg, color="red", print_func=click.echo) sys.exit(result) # Builder-specific options if builder == "html": path_output_rel = Path(op.relpath(output_path, Path())) if cmd_type == "page": path_page = path_output_rel.joinpath(f"{page_name}.html") # Write an index file if it doesn't exist so we get redirects path_index = path_output_rel.joinpath("index.html") if not path_index.exists(): path_index.write_text(REDIRECT_TEXT.format(first_page=path_page.name)) _message_box( dedent( f""" Page build finished. Your page folder is: {path_page.parent}{os.sep} Open your page at: {path_page} """ ) ) elif cmd_type == "book": path_output_rel = Path(op.relpath(output_path, Path())) path_index = path_output_rel.joinpath("index.html") _message_box( f"""\ Finished generating HTML for {cmd_type}. Your book's HTML pages are here: {path_output_rel}{os.sep} You can look at your book by opening this file in a browser: {path_index} Or paste this line directly into your browser bar: file://{path_index.resolve()}\ """ ) if builder == "pdfhtml": print_func(f"Finished generating HTML for {cmd_type}...") print_func(f"Converting {cmd_type} HTML into PDF...") path_pdf_output = output_path.parent.joinpath("pdf") path_pdf_output.mkdir(exist_ok=True) if cmd_type == "book": path_pdf_output = path_pdf_output.joinpath("book.pdf") html_to_pdf(output_path.joinpath("index.html"), path_pdf_output) elif cmd_type == "page": path_pdf_output = path_pdf_output.joinpath(page_name + ".pdf") html_to_pdf(output_path.joinpath(page_name + ".html"), path_pdf_output) path_pdf_output_rel = Path(op.relpath(path_pdf_output, Path())) _message_box( f"""\ Finished generating PDF via HTML for {cmd_type}. Your PDF is here: {path_pdf_output_rel}\ """ ) if builder == "pdflatex": print_func(f"Finished generating latex for {cmd_type}...") print_func(f"Converting {cmd_type} latex into PDF...") # Convert to PDF via tex and template built Makefile and make.bat if sys.platform == "win32": makecmd = os.environ.get("MAKE", "make.bat") else: makecmd = os.environ.get("MAKE", "make") try: output = subprocess.run([makecmd, "all-pdf"], cwd=output_path) if output.returncode != 0: _error("Error: Failed to build pdf") return output.returncode _message_box( f"""\ A PDF of your {cmd_type} can be found at: {output_path} """ ) except OSError: _error("Error: Failed to run: %s" % makecmd) return 1
Run post-sphinx-build actions. :param result: the result of the build execution; a status code or and exception
builder_specific_actions
python
jupyter-book/jupyter-book
jupyter_book/cli/main.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/main.py
BSD-3-Clause
def __init__(self, *, entry_point_group: str, **kwargs: Any): """Initialize with entry point group.""" self.exclude_external_plugins = False self._entry_point_group: str = entry_point_group self._use_internal: Set[str] = kwargs.pop("use_internal", set()) super().__init__(**kwargs)
Initialize with entry point group.
__init__
python
jupyter-book/jupyter-book
jupyter_book/cli/pluggable.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/pluggable.py
BSD-3-Clause
def list_commands(self, ctx: click.Context) -> Iterable[str]: """Add entry point names of available plugins to the command list.""" subcommands = super().list_commands(ctx) if not self.exclude_external_plugins: subcommands.extend(get_entry_point_names(self._entry_point_group)) return subcommands
Add entry point names of available plugins to the command list.
list_commands
python
jupyter-book/jupyter-book
jupyter_book/cli/pluggable.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/pluggable.py
BSD-3-Clause
def get_command(self, ctx: click.Context, name: str) -> click.BaseCommand: """Try to load a subcommand from entry points, else defer to super.""" command = None if self.exclude_external_plugins or name in self._use_internal: command = super().get_command(ctx, name) else: try: command = load_entry_point(self._entry_point_group, name) except KeyError: command = super().get_command(ctx, name) return command
Try to load a subcommand from entry points, else defer to super.
get_command
python
jupyter-book/jupyter-book
jupyter_book/cli/pluggable.py
https://github.com/jupyter-book/jupyter-book/blob/master/jupyter_book/cli/pluggable.py
BSD-3-Clause
def check_source(source_name): """Chooses C or pyx source files, and raises if C is needed but missing""" source_ext = ".pyx" if not HAS_CYTHON: source_name = source_name.replace(".pyx.in", ".c") source_name = source_name.replace(".pyx", ".c") source_ext = ".c" if not os.path.exists(source_name): msg = ( "C source not found. You must have Cython installed to " "build if the C source files have not been generated." ) raise OSError(msg) return source_name, source_ext
Chooses C or pyx source files, and raises if C is needed but missing
check_source
python
statsmodels/statsmodels
setup.py
https://github.com/statsmodels/statsmodels/blob/master/setup.py
BSD-3-Clause
def process_tempita(source_name): """Runs pyx.in files through tempita is needed""" if source_name.endswith("pyx.in"): with open(source_name, encoding="utf-8") as templated: pyx_template = templated.read() pyx = Tempita.sub(pyx_template) pyx_filename = source_name[:-3] with open(pyx_filename, "w", encoding="utf-8") as pyx_file: pyx_file.write(pyx) file_stats = os.stat(source_name) try: os.utime( pyx_filename, ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns), ) except AttributeError: os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime)) source_name = pyx_filename return source_name
Runs pyx.in files through tempita is needed
process_tempita
python
statsmodels/statsmodels
setup.py
https://github.com/statsmodels/statsmodels/blob/master/setup.py
BSD-3-Clause
def close_figures(): """ Fixture that closes all figures after a test function has completed Returns ------- closer : callable Function that will close all figures when called. Notes ----- Used by passing as an argument to the function that produces a plot, for example def test_some_plot(close_figures): <test code> If a function creates many figures, then these can be destroyed within a test function by calling close_figures to ensure that the number of figures does not become too large. def test_many_plots(close_figures): for i in range(100000): plt.plot(x,y) close_figures() """ try: import matplotlib.pyplot def close(): matplotlib.pyplot.close("all") except ImportError: def close(): pass yield close close()
Fixture that closes all figures after a test function has completed Returns ------- closer : callable Function that will close all figures when called. Notes ----- Used by passing as an argument to the function that produces a plot, for example def test_some_plot(close_figures): <test code> If a function creates many figures, then these can be destroyed within a test function by calling close_figures to ensure that the number of figures does not become too large. def test_many_plots(close_figures): for i in range(100000): plt.plot(x,y) close_figures()
close_figures
python
statsmodels/statsmodels
statsmodels/conftest.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/conftest.py
BSD-3-Clause
def reset_randomstate(): """ Fixture that set the global RandomState to the fixed seed 1 Notes ----- Used by passing as an argument to the function that uses the global RandomState def test_some_plot(reset_randomstate): <test code> Returns the state after the test function exits """ state = np.random.get_state() np.random.seed(1) yield np.random.set_state(state)
Fixture that set the global RandomState to the fixed seed 1 Notes ----- Used by passing as an argument to the function that uses the global RandomState def test_some_plot(reset_randomstate): <test code> Returns the state after the test function exits
reset_randomstate
python
statsmodels/statsmodels
statsmodels/conftest.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/conftest.py
BSD-3-Clause
def test(extra_args=None, exit=False): """ Run the test suite Parameters ---------- extra_args : list[str] List of argument to pass to pytest when running the test suite. The default is ['--tb=short', '--disable-pytest-warnings']. exit : bool Flag indicating whether the test runner should exit when finished. Returns ------- int The status code from the test run if exit is False. """ from .tools._test_runner import PytestTester tst = PytestTester(package_path=__file__) return tst(extra_args=extra_args, exit=exit)
Run the test suite Parameters ---------- extra_args : list[str] List of argument to pass to pytest when running the test suite. The default is ['--tb=short', '--disable-pytest-warnings']. exit : bool Flag indicating whether the test runner should exit when finished. Returns ------- int The status code from the test run if exit is False.
test
python
statsmodels/statsmodels
statsmodels/__init__.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/__init__.py
BSD-3-Clause
def fit(self, slice_n=20, **kwargs): """ Estimate the EDR space using Sliced Inverse Regression. Parameters ---------- slice_n : int, optional Target number of observations per slice """ # Sample size per slice if len(kwargs) > 0: msg = "SIR.fit does not take any extra keyword arguments" warnings.warn(msg) # Number of slices n_slice = self.exog.shape[0] // slice_n self._prep(n_slice) mn = [z.mean(0) for z in self._split_wexog] n = [z.shape[0] for z in self._split_wexog] mn = np.asarray(mn) n = np.asarray(n) # Estimate Cov E[X | Y=y] mnc = np.dot(mn.T, n[:, None] * mn) / n.sum() a, b = np.linalg.eigh(mnc) jj = np.argsort(-a) a = a[jj] b = b[:, jj] params = np.linalg.solve(self._covxr.T, b) results = DimReductionResults(self, params, eigs=a) return DimReductionResultsWrapper(results)
Estimate the EDR space using Sliced Inverse Regression. Parameters ---------- slice_n : int, optional Target number of observations per slice
fit
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def fit_regularized(self, ndim=1, pen_mat=None, slice_n=20, maxiter=100, gtol=1e-3, **kwargs): """ Estimate the EDR space using regularized SIR. Parameters ---------- ndim : int The number of EDR directions to estimate pen_mat : array_like A 2d array such that the squared Frobenius norm of `dot(pen_mat, dirs)`` is added to the objective function, where `dirs` is an orthogonal array whose columns span the estimated EDR space. slice_n : int, optional Target number of observations per slice maxiter :int The maximum number of iterations for estimating the EDR space. gtol : float If the norm of the gradient of the objective function falls below this value, the algorithm has converged. Returns ------- A results class instance. Notes ----- If each row of `exog` can be viewed as containing the values of a function evaluated at equally-spaced locations, then setting the rows of `pen_mat` to [[1, -2, 1, ...], [0, 1, -2, 1, ..], ...] will give smooth EDR coefficients. This is a form of "functional SIR" using the squared second derivative as a penalty. References ---------- L. Ferre, A.F. Yao (2003). Functional sliced inverse regression analysis. Statistics: a journal of theoretical and applied statistics 37(6) 475-488. """ if len(kwargs) > 0: msg = "SIR.fit_regularized does not take keyword arguments" warnings.warn(msg) if pen_mat is None: raise ValueError("pen_mat is a required argument") start_params = kwargs.get("start_params", None) # Sample size per slice slice_n = kwargs.get("slice_n", 20) # Number of slices n_slice = self.exog.shape[0] // slice_n # Sort the data by endog ii = np.argsort(self.endog) x = self.exog[ii, :] x -= x.mean(0) covx = np.cov(x.T) # Split the data into slices split_exog = np.array_split(x, n_slice) mn = [z.mean(0) for z in split_exog] n = [z.shape[0] for z in split_exog] mn = np.asarray(mn) n = np.asarray(n) self._slice_props = n / n.sum() self.ndim = ndim self.k_vars = covx.shape[0] self.pen_mat = pen_mat self._covx = covx self.n_slice = n_slice self._slice_means = mn if start_params is None: params = np.zeros((self.k_vars, ndim)) params[0:ndim, 0:ndim] = np.eye(ndim) params = params else: if start_params.shape[1] != ndim: msg = "Shape of start_params is not compatible with ndim" raise ValueError(msg) params = start_params params, _, cnvrg = _grass_opt(params, self._regularized_objective, self._regularized_grad, maxiter, gtol) if not cnvrg: g = self._regularized_grad(params.ravel()) gn = np.sqrt(np.dot(g, g)) msg = "SIR.fit_regularized did not converge, |g|=%f" % gn warnings.warn(msg) results = DimReductionResults(self, params, eigs=None) return DimReductionResultsWrapper(results)
Estimate the EDR space using regularized SIR. Parameters ---------- ndim : int The number of EDR directions to estimate pen_mat : array_like A 2d array such that the squared Frobenius norm of `dot(pen_mat, dirs)`` is added to the objective function, where `dirs` is an orthogonal array whose columns span the estimated EDR space. slice_n : int, optional Target number of observations per slice maxiter :int The maximum number of iterations for estimating the EDR space. gtol : float If the norm of the gradient of the objective function falls below this value, the algorithm has converged. Returns ------- A results class instance. Notes ----- If each row of `exog` can be viewed as containing the values of a function evaluated at equally-spaced locations, then setting the rows of `pen_mat` to [[1, -2, 1, ...], [0, 1, -2, 1, ..], ...] will give smooth EDR coefficients. This is a form of "functional SIR" using the squared second derivative as a penalty. References ---------- L. Ferre, A.F. Yao (2003). Functional sliced inverse regression analysis. Statistics: a journal of theoretical and applied statistics 37(6) 475-488.
fit_regularized
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def fit(self, **kwargs): """ Estimate the EDR space using PHD. Parameters ---------- resid : bool, optional If True, use least squares regression to remove the linear relationship between each covariate and the response, before conducting PHD. Returns ------- A results instance which can be used to access the estimated parameters. """ resid = kwargs.get("resid", False) y = self.endog - self.endog.mean() x = self.exog - self.exog.mean(0) if resid: from statsmodels.regression.linear_model import OLS r = OLS(y, x).fit() y = r.resid cm = np.einsum('i,ij,ik->jk', y, x, x) cm /= len(y) cx = np.cov(x.T) cb = np.linalg.solve(cx, cm) a, b = np.linalg.eig(cb) jj = np.argsort(-np.abs(a)) a = a[jj] params = b[:, jj] results = DimReductionResults(self, params, eigs=a) return DimReductionResultsWrapper(results)
Estimate the EDR space using PHD. Parameters ---------- resid : bool, optional If True, use least squares regression to remove the linear relationship between each covariate and the response, before conducting PHD. Returns ------- A results instance which can be used to access the estimated parameters.
fit
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def fit(self, **kwargs): """ Estimate the EDR space. Parameters ---------- slice_n : int Number of observations per slice """ # Sample size per slice slice_n = kwargs.get("slice_n", 50) # Number of slices n_slice = self.exog.shape[0] // slice_n self._prep(n_slice) cv = [np.cov(z.T) for z in self._split_wexog] ns = [z.shape[0] for z in self._split_wexog] p = self.wexog.shape[1] if not self.bc: # Cook's original approach vm = 0 for w, cvx in zip(ns, cv): icv = np.eye(p) - cvx vm += w * np.dot(icv, icv) vm /= len(cv) else: # The bias-corrected approach of Li and Zhu # \Lambda_n in Li, Zhu av = 0 for c in cv: av += np.dot(c, c) av /= len(cv) # V_n in Li, Zhu vn = 0 for x in self._split_wexog: r = x - x.mean(0) for i in range(r.shape[0]): u = r[i, :] m = np.outer(u, u) vn += np.dot(m, m) vn /= self.exog.shape[0] c = np.mean(ns) k1 = c * (c - 1) / ((c - 1)**2 + 1) k2 = (c - 1) / ((c - 1)**2 + 1) av2 = k1 * av - k2 * vn vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2 a, b = np.linalg.eigh(vm) jj = np.argsort(-a) a = a[jj] b = b[:, jj] params = np.linalg.solve(self._covxr.T, b) results = DimReductionResults(self, params, eigs=a) return DimReductionResultsWrapper(results)
Estimate the EDR space. Parameters ---------- slice_n : int Number of observations per slice
fit
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def _grass_opt(params, fun, grad, maxiter, gtol): """ Minimize a function on a Grassmann manifold. Parameters ---------- params : array_like Starting value for the optimization. fun : function The function to be minimized. grad : function The gradient of fun. maxiter : int The maximum number of iterations. gtol : float Convergence occurs when the gradient norm falls below this value. Returns ------- params : array_like The minimizing value for the objective function. fval : float The smallest achieved value of the objective function. cnvrg : bool True if the algorithm converged to a limit point. Notes ----- `params` is 2-d, but `fun` and `grad` should take 1-d arrays `params.ravel()` as arguments. Reference --------- A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with orthogonality constraints. SIAM J Matrix Anal Appl. http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf """ p, d = params.shape params = params.ravel() f0 = fun(params) cnvrg = False for _ in range(maxiter): # Project the gradient to the tangent space g = grad(params) g -= np.dot(g, params) * params / np.dot(params, params) if np.sqrt(np.sum(g * g)) < gtol: cnvrg = True break gm = g.reshape((p, d)) u, s, vt = np.linalg.svd(gm, 0) paramsm = params.reshape((p, d)) pa0 = np.dot(paramsm, vt.T) def geo(t): # Parameterize the geodesic path in the direction # of the gradient as a function of a real value t. pa = pa0 * np.cos(s * t) + u * np.sin(s * t) return np.dot(pa, vt).ravel() # Try to find a downhill step along the geodesic path. step = 2. while step > 1e-10: pa = geo(-step) f1 = fun(pa) if f1 < f0: params = pa f0 = f1 break step /= 2 params = params.reshape((p, d)) return params, f0, cnvrg
Minimize a function on a Grassmann manifold. Parameters ---------- params : array_like Starting value for the optimization. fun : function The function to be minimized. grad : function The gradient of fun. maxiter : int The maximum number of iterations. gtol : float Convergence occurs when the gradient norm falls below this value. Returns ------- params : array_like The minimizing value for the objective function. fval : float The smallest achieved value of the objective function. cnvrg : bool True if the algorithm converged to a limit point. Notes ----- `params` is 2-d, but `fun` and `grad` should take 1-d arrays `params.ravel()` as arguments. Reference --------- A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with orthogonality constraints. SIAM J Matrix Anal Appl. http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
_grass_opt
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def loglike(self, params): """ Evaluate the log-likelihood Parameters ---------- params : array_like The projection matrix used to reduce the covariances, flattened to 1d. Returns the log-likelihood. """ p = self.covm.shape[0] proj = params.reshape((p, self.dim)) c = np.dot(proj.T, np.dot(self.covm, proj)) _, ldet = np.linalg.slogdet(c) f = self.nobs * ldet / 2 for j, c in enumerate(self.covs): c = np.dot(proj.T, np.dot(c, proj)) _, ldet = np.linalg.slogdet(c) f -= self.ns[j] * ldet / 2 return f
Evaluate the log-likelihood Parameters ---------- params : array_like The projection matrix used to reduce the covariances, flattened to 1d. Returns the log-likelihood.
loglike
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def score(self, params): """ Evaluate the score function. Parameters ---------- params : array_like The projection matrix used to reduce the covariances, flattened to 1d. Returns the score function evaluated at 'params'. """ p = self.covm.shape[0] proj = params.reshape((p, self.dim)) c0 = np.dot(proj.T, np.dot(self.covm, proj)) cP = np.dot(self.covm, proj) g = self.nobs * np.linalg.solve(c0, cP.T).T for j, c in enumerate(self.covs): c0 = np.dot(proj.T, np.dot(c, proj)) cP = np.dot(c, proj) g -= self.ns[j] * np.linalg.solve(c0, cP.T).T return g.ravel()
Evaluate the score function. Parameters ---------- params : array_like The projection matrix used to reduce the covariances, flattened to 1d. Returns the score function evaluated at 'params'.
score
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def fit(self, start_params=None, maxiter=200, gtol=1e-4): """ Fit the covariance reduction model. Parameters ---------- start_params : array_like Starting value for the projection matrix. May be rectangular, or flattened. maxiter : int The maximum number of gradient steps to take. gtol : float Convergence criterion for the gradient norm. Returns ------- A results instance that can be used to access the fitted parameters. """ p = self.covm.shape[0] d = self.dim # Starting value for params if start_params is None: params = np.zeros((p, d)) params[0:d, 0:d] = np.eye(d) params = params else: params = start_params # _grass_opt is designed for minimization, we are doing maximization # here so everything needs to be flipped. params, llf, cnvrg = _grass_opt(params, lambda x: -self.loglike(x), lambda x: -self.score(x), maxiter, gtol) llf *= -1 if not cnvrg: g = self.score(params.ravel()) gn = np.sqrt(np.sum(g * g)) msg = "CovReduce optimization did not converge, |g|=%f" % gn warnings.warn(msg, ConvergenceWarning) results = DimReductionResults(self, params, eigs=None) results.llf = llf return DimReductionResultsWrapper(results)
Fit the covariance reduction model. Parameters ---------- start_params : array_like Starting value for the projection matrix. May be rectangular, or flattened. maxiter : int The maximum number of gradient steps to take. gtol : float Convergence criterion for the gradient norm. Returns ------- A results instance that can be used to access the fitted parameters.
fit
python
statsmodels/statsmodels
statsmodels/regression/dimred.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/dimred.py
BSD-3-Clause
def get_cov(self, time, sc, sm): """ Returns the covariance matrix for given time values. Parameters ---------- time : array_like The time points for the observations. If len(time) = p, a pxp covariance matrix is returned. sc : array_like The scaling parameters for the observations. sm : array_like The smoothness parameters for the observation. See class docstring for details. """ raise NotImplementedError
Returns the covariance matrix for given time values. Parameters ---------- time : array_like The time points for the observations. If len(time) = p, a pxp covariance matrix is returned. sc : array_like The scaling parameters for the observations. sm : array_like The smoothness parameters for the observation. See class docstring for details.
get_cov
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def jac(self, time, sc, sm): """ The Jacobian of the covariance with respect to the parameters. See get_cov for parameters. Returns ------- jsc : list-like jsc[i] is the derivative of the covariance matrix with respect to the i^th scaling parameter. jsm : list-like jsm[i] is the derivative of the covariance matrix with respect to the i^th smoothness parameter. """ raise NotImplementedError
The Jacobian of the covariance with respect to the parameters. See get_cov for parameters. Returns ------- jsc : list-like jsc[i] is the derivative of the covariance matrix with respect to the i^th scaling parameter. jsm : list-like jsm[i] is the derivative of the covariance matrix with respect to the i^th smoothness parameter.
jac
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def unpack(self, z): """ Split the packed parameter vector into blocks. """ # Mean parameters pm = self.exog.shape[1] mnpar = z[0:pm] # Standard deviation parameters pv = self.exog_scale.shape[1] scpar = z[pm:pm + pv] # Smoothness parameters ps = self.exog_smooth.shape[1] smpar = z[pm + pv:pm + pv + ps] # Observation white noise standard deviation. # Empty if has_noise = False. nopar = z[pm + pv + ps:] return mnpar, scpar, smpar, nopar
Split the packed parameter vector into blocks.
unpack
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def loglike(self, params): """ Calculate the log-likelihood function for the model. Parameters ---------- params : array_like The packed parameters for the model. Returns ------- The log-likelihood value at the given parameter point. Notes ----- The mean, scaling, and smoothing parameters are packed into a vector. Use `unpack` to access the component vectors. """ mnpar, scpar, smpar, nopar = self.unpack(params) # Residuals resid = self.endog - np.dot(self.exog, mnpar) # Scaling parameters sc = np.exp(np.dot(self.exog_scale, scpar)) # Smoothness parameters sm = np.exp(np.dot(self.exog_smooth, smpar)) # White noise standard deviation if self._has_noise: no = np.exp(np.dot(self.exog_noise, nopar)) # Get the log-likelihood ll = 0. for _, ix in self._groups_ix.items(): # Get the covariance matrix for this person. cm = self.cov.get_cov(self.time[ix], sc[ix], sm[ix]) # The variance of the additive noise, if present. if self._has_noise: cm.flat[::cm.shape[0] + 1] += no[ix]**2 re = resid[ix] ll -= 0.5 * np.linalg.slogdet(cm)[1] ll -= 0.5 * np.dot(re, np.linalg.solve(cm, re)) if self.verbose: print("L=", ll) return ll
Calculate the log-likelihood function for the model. Parameters ---------- params : array_like The packed parameters for the model. Returns ------- The log-likelihood value at the given parameter point. Notes ----- The mean, scaling, and smoothing parameters are packed into a vector. Use `unpack` to access the component vectors.
loglike
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def score(self, params): """ Calculate the score function for the model. Parameters ---------- params : array_like The packed parameters for the model. Returns ------- The score vector at the given parameter point. Notes ----- The mean, scaling, and smoothing parameters are packed into a vector. Use `unpack` to access the component vectors. """ mnpar, scpar, smpar, nopar = self.unpack(params) pm, pv, ps = len(mnpar), len(scpar), len(smpar) # Residuals resid = self.endog - np.dot(self.exog, mnpar) # Scaling sc = np.exp(np.dot(self.exog_scale, scpar)) # Smoothness sm = np.exp(np.dot(self.exog_smooth, smpar)) # White noise standard deviation if self._has_noise: no = np.exp(np.dot(self.exog_noise, nopar)) # Get the log-likelihood score = np.zeros(len(mnpar) + len(scpar) + len(smpar) + len(nopar)) for _, ix in self._groups_ix.items(): sc_i = sc[ix] sm_i = sm[ix] resid_i = resid[ix] time_i = self.time[ix] exog_i = self.exog[ix, :] exog_scale_i = self.exog_scale[ix, :] exog_smooth_i = self.exog_smooth[ix, :] # Get the covariance matrix for this person. cm = self.cov.get_cov(time_i, sc_i, sm_i) if self._has_noise: no_i = no[ix] exog_noise_i = self.exog_noise[ix, :] cm.flat[::cm.shape[0] + 1] += no[ix]**2 cmi = np.linalg.inv(cm) jacv, jacs = self.cov.jac(time_i, sc_i, sm_i) # The derivatives for the mean parameters. dcr = np.linalg.solve(cm, resid_i) score[0:pm] += np.dot(exog_i.T, dcr) # The derivatives for the scaling parameters. rx = np.outer(resid_i, resid_i) qm = np.linalg.solve(cm, rx) qm = 0.5 * np.linalg.solve(cm, qm.T) scx = sc_i[:, None] * exog_scale_i for i, _ in enumerate(ix): jq = np.sum(jacv[i] * qm) score[pm:pm + pv] += jq * scx[i, :] score[pm:pm + pv] -= 0.5 * np.sum(jacv[i] * cmi) * scx[i, :] # The derivatives for the smoothness parameters. smx = sm_i[:, None] * exog_smooth_i for i, _ in enumerate(ix): jq = np.sum(jacs[i] * qm) score[pm + pv:pm + pv + ps] += jq * smx[i, :] score[pm + pv:pm + pv + ps] -= ( 0.5 * np.sum(jacs[i] * cmi) * smx[i, :]) # The derivatives with respect to the standard deviation parameters if self._has_noise: sno = no_i[:, None]**2 * exog_noise_i score[pm + pv + ps:] -= np.dot(cmi.flat[::cm.shape[0] + 1], sno) bm = np.dot(cmi, np.dot(rx, cmi)) score[pm + pv + ps:] += np.dot(bm.flat[::bm.shape[0] + 1], sno) if self.verbose: print("|G|=", np.sqrt(np.sum(score * score))) return score
Calculate the score function for the model. Parameters ---------- params : array_like The packed parameters for the model. Returns ------- The score vector at the given parameter point. Notes ----- The mean, scaling, and smoothing parameters are packed into a vector. Use `unpack` to access the component vectors.
score
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def fit(self, start_params=None, method=None, maxiter=None, **kwargs): """ Fit a grouped Gaussian process regression using MLE. Parameters ---------- start_params : array_like Optional starting values. method : str or array of str Method or sequence of methods for scipy optimize. maxiter : int The maximum number of iterations in the optimization. Returns ------- An instance of ProcessMLEResults. """ if "verbose" in kwargs: self.verbose = kwargs["verbose"] minim_opts = {} if "minim_opts" in kwargs: minim_opts = kwargs["minim_opts"] if start_params is None: start_params = self._get_start() if isinstance(method, str): method = [method] elif method is None: method = ["powell", "bfgs"] for j, meth in enumerate(method): if meth not in ("powell",): def jac(x): return -self.score(x) else: jac = None if maxiter is not None: if np.isscalar(maxiter): minim_opts["maxiter"] = maxiter else: minim_opts["maxiter"] = maxiter[j % len(maxiter)] f = minimize( lambda x: -self.loglike(x), method=meth, x0=start_params, jac=jac, options=minim_opts) if not f.success: msg = "Fitting did not converge" if jac is not None: msg += ", |gradient|=%.6f" % np.sqrt(np.sum(f.jac**2)) if j < len(method) - 1: msg += ", trying %s next..." % method[j+1] warnings.warn(msg) if np.isfinite(f.x).all(): start_params = f.x hess = self.hessian(f.x) try: cov_params = -np.linalg.inv(hess) except Exception: cov_params = None class rslt: pass r = rslt() r.params = f.x r.normalized_cov_params = cov_params r.optim_retvals = f r.scale = 1 rslt = ProcessMLEResults(self, r) return rslt
Fit a grouped Gaussian process regression using MLE. Parameters ---------- start_params : array_like Optional starting values. method : str or array of str Method or sequence of methods for scipy optimize. maxiter : int The maximum number of iterations in the optimization. Returns ------- An instance of ProcessMLEResults.
fit
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def covariance(self, time, scale_params, smooth_params, scale_data, smooth_data): """ Returns a Gaussian process covariance matrix. Parameters ---------- time : array_like The time points at which the fitted covariance matrix is calculated. scale_params : array_like The regression parameters for the scaling part of the covariance structure. smooth_params : array_like The regression parameters for the smoothing part of the covariance structure. scale_data : DataFrame The data used to determine the scale parameter, must have len(time) rows. smooth_data : DataFrame The data used to determine the smoothness parameter, must have len(time) rows. Returns ------- A covariance matrix. Notes ----- If the model was fit using formulas, `scale` and `smooth` should be Dataframes, containing all variables that were present in the respective scaling and smoothing formulas used to fit the model. Otherwise, `scale` and `smooth` should contain data arrays whose columns align with the fitted scaling and smoothing parameters. The covariance is only for the Gaussian process and does not include the white noise variance. """ if not hasattr(self.data, "scale_model_spec"): sca = np.dot(scale_data, scale_params) smo = np.dot(smooth_data, smooth_params) else: mgr = FormulaManager() sc = mgr.get_matrices(self.data.scale_model_spec, scale_data, pandas=False) sm = mgr.get_matrices( self.data.smooth_model_spec, smooth_data, pandas=False ) sca = np.exp(np.dot(sc, scale_params)) smo = np.exp(np.dot(sm, smooth_params)) return self.cov.get_cov(time, sca, smo)
Returns a Gaussian process covariance matrix. Parameters ---------- time : array_like The time points at which the fitted covariance matrix is calculated. scale_params : array_like The regression parameters for the scaling part of the covariance structure. smooth_params : array_like The regression parameters for the smoothing part of the covariance structure. scale_data : DataFrame The data used to determine the scale parameter, must have len(time) rows. smooth_data : DataFrame The data used to determine the smoothness parameter, must have len(time) rows. Returns ------- A covariance matrix. Notes ----- If the model was fit using formulas, `scale` and `smooth` should be Dataframes, containing all variables that were present in the respective scaling and smoothing formulas used to fit the model. Otherwise, `scale` and `smooth` should contain data arrays whose columns align with the fitted scaling and smoothing parameters. The covariance is only for the Gaussian process and does not include the white noise variance.
covariance
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def predict(self, params, exog=None, *args, **kwargs): """ Obtain predictions of the mean structure. Parameters ---------- params : array_like The model parameters, may be truncated to include only mean parameters. exog : array_like The design matrix for the mean structure. If not provided, the model's design matrix is used. """ if exog is None: exog = self.exog elif hasattr(self.data, "model_spec"): # Run the provided data through the formula if present mgr = FormulaManager() exog = mgr.get_matrices(self.data.model_spec, exog) if len(params) > exog.shape[1]: params = params[0:exog.shape[1]] return np.dot(exog, params)
Obtain predictions of the mean structure. Parameters ---------- params : array_like The model parameters, may be truncated to include only mean parameters. exog : array_like The design matrix for the mean structure. If not provided, the model's design matrix is used.
predict
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def covariance(self, time, scale, smooth): """ Returns a fitted covariance matrix. Parameters ---------- time : array_like The time points at which the fitted covariance matrix is calculated. scale : array_like The data used to determine the scale parameter, must have len(time) rows. smooth : array_like The data used to determine the smoothness parameter, must have len(time) rows. Returns ------- A covariance matrix. Notes ----- If the model was fit using formulas, `scale` and `smooth` should be Dataframes, containing all variables that were present in the respective scaling and smoothing formulas used to fit the model. Otherwise, `scale` and `smooth` should be data arrays whose columns align with the fitted scaling and smoothing parameters. """ return self.model.covariance(time, self.scale_params, self.smooth_params, scale, smooth)
Returns a fitted covariance matrix. Parameters ---------- time : array_like The time points at which the fitted covariance matrix is calculated. scale : array_like The data used to determine the scale parameter, must have len(time) rows. smooth : array_like The data used to determine the smoothness parameter, must have len(time) rows. Returns ------- A covariance matrix. Notes ----- If the model was fit using formulas, `scale` and `smooth` should be Dataframes, containing all variables that were present in the respective scaling and smoothing formulas used to fit the model. Otherwise, `scale` and `smooth` should be data arrays whose columns align with the fitted scaling and smoothing parameters.
covariance
python
statsmodels/statsmodels
statsmodels/regression/process_regression.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/process_regression.py
BSD-3-Clause
def _reset(self, idx): """Compute xpx and xpy using a single dot product""" _, wy, wx, _, not_missing = self._get_data(idx) nobs = not_missing.sum() xpx = wx.T @ wx xpy = wx.T @ wy return xpx, xpy, nobs
Compute xpx and xpy using a single dot product
_reset
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def fit( self, method="inv", cov_type="nonrobust", cov_kwds=None, reset=None, use_t=False, params_only=False, ): """ Estimate model parameters. Parameters ---------- method : {'inv', 'lstsq', 'pinv'} Method to use when computing the the model parameters. * 'inv' - use moving windows inner-products and matrix inversion. This method is the fastest, but may be less accurate than the other methods. * 'lstsq' - Use numpy.linalg.lstsq * 'pinv' - Use numpy.linalg.pinv. This method matches the default estimator in non-moving regression estimators. cov_type : {'nonrobust', 'HCCM', 'HC0'} Covariance estimator: * nonrobust - The classic OLS covariance estimator * HCCM, HC0 - White heteroskedasticity robust covariance cov_kwds : dict Unused reset : int, optional Interval to recompute the moving window inner products used to estimate the model parameters. Smaller values improve accuracy, although in practice this setting is not required to be set. use_t : bool, optional Flag indicating to use the Student's t distribution when computing p-values. params_only : bool, optional Flag indicating that only parameters should be computed. Avoids calculating all other statistics or performing inference. Returns ------- RollingRegressionResults Estimation results where all pre-sample values are nan-filled. """ method = string_like( method, "method", options=("inv", "lstsq", "pinv") ) reset = int_like(reset, "reset", optional=True) reset = self._y.shape[0] if reset is None else reset if reset < 1: raise ValueError("reset must be a positive integer") nobs, k = self._x.shape store = RollingStore( params=np.full((nobs, k), np.nan), ssr=np.full(nobs, np.nan), llf=np.full(nobs, np.nan), nobs=np.zeros(nobs, dtype=int), s2=np.full(nobs, np.nan), xpxi=np.full((nobs, k, k), np.nan), xeex=np.full((nobs, k, k), np.nan), centered_tss=np.full(nobs, np.nan), uncentered_tss=np.full(nobs, np.nan), ) w = self._window first = self._min_nobs if self._expanding else w xpx, xpy, nobs = self._reset(first) if not (self._has_nan[first - 1] and self._skip_missing): self._fit_single(first, xpx, xpy, nobs, store, params_only, method) wx, wy = self._wx, self._wy for i in range(first + 1, self._x.shape[0] + 1): if self._has_nan[i - 1] and self._skip_missing: continue if i % reset == 0: xpx, xpy, nobs = self._reset(i) else: if not self._is_nan[i - w - 1] and i > w: remove_x = wx[i - w - 1 : i - w] xpx -= remove_x.T @ remove_x xpy -= remove_x.T @ wy[i - w - 1 : i - w] nobs -= 1 if not self._is_nan[i - 1]: add_x = wx[i - 1 : i] xpx += add_x.T @ add_x xpy += add_x.T @ wy[i - 1 : i] nobs += 1 self._fit_single(i, xpx, xpy, nobs, store, params_only, method) return RollingRegressionResults( self, store, self.k_constant, use_t, cov_type )
Estimate model parameters. Parameters ---------- method : {'inv', 'lstsq', 'pinv'} Method to use when computing the the model parameters. * 'inv' - use moving windows inner-products and matrix inversion. This method is the fastest, but may be less accurate than the other methods. * 'lstsq' - Use numpy.linalg.lstsq * 'pinv' - Use numpy.linalg.pinv. This method matches the default estimator in non-moving regression estimators. cov_type : {'nonrobust', 'HCCM', 'HC0'} Covariance estimator: * nonrobust - The classic OLS covariance estimator * HCCM, HC0 - White heteroskedasticity robust covariance cov_kwds : dict Unused reset : int, optional Interval to recompute the moving window inner products used to estimate the model parameters. Smaller values improve accuracy, although in practice this setting is not required to be set. use_t : bool, optional Flag indicating to use the Student's t distribution when computing p-values. params_only : bool, optional Flag indicating that only parameters should be computed. Avoids calculating all other statistics or performing inference. Returns ------- RollingRegressionResults Estimation results where all pre-sample values are nan-filled.
fit
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def _wrap(self, val): """Wrap output as pandas Series or DataFrames as needed""" if not self._use_pandas: return val col_names = self.model.data.param_names row_names = self.model.data.row_labels if val.ndim == 1: return Series(val, index=row_names) if val.ndim == 2: return DataFrame(val, columns=col_names, index=row_names) else: # ndim == 3 mi = MultiIndex.from_product((row_names, col_names)) val = np.reshape(val, (-1, val.shape[-1])) return DataFrame(val, columns=col_names, index=mi)
Wrap output as pandas Series or DataFrames as needed
_wrap
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def params(self): """Estimated model parameters""" return self._wrap(self._params)
Estimated model parameters
params
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def k_constant(self): """Flag indicating whether the model contains a constant""" return self._k_constant
Flag indicating whether the model contains a constant
k_constant
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def cov_params(self): """ Estimated parameter covariance Returns ------- array_like The estimated model covariances. If the original input is a numpy array, the returned covariance is a 3-d array with shape (nobs, nvar, nvar). If the original inputs are pandas types, then the returned covariance is a DataFrame with a MultiIndex with key (observation, variable), so that the covariance for observation with index i is cov.loc[i]. """ return self._wrap(self._cov_params)
Estimated parameter covariance Returns ------- array_like The estimated model covariances. If the original input is a numpy array, the returned covariance is a 3-d array with shape (nobs, nvar, nvar). If the original inputs are pandas types, then the returned covariance is a DataFrame with a MultiIndex with key (observation, variable), so that the covariance for observation with index i is cov.loc[i].
cov_params
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def cov_type(self): """Name of covariance estimator""" return self._cov_type
Name of covariance estimator
cov_type
python
statsmodels/statsmodels
statsmodels/regression/rolling.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/rolling.py
BSD-3-Clause
def iterative_fit(self, maxiter=3): """ Perform an iterative two-step procedure to estimate a WLS model. The model is assumed to have heteroskedastic errors. The variance is estimated by OLS regression of the link transformed squared residuals on Z, i.e.:: link(sigma_i) = x_i*gamma. Parameters ---------- maxiter : int, optional the number of iterations Notes ----- maxiter=1: returns the estimated based on given weights maxiter=2: performs a second estimation with the updated weights, this is 2-step estimation maxiter>2: iteratively estimate and update the weights TODO: possible extension stop iteration if change in parameter estimates is smaller than x_tol Repeated calls to fit_iterative, will do one redundant pinv_wexog calculation. Calling fit_iterative(maxiter) ones does not do any redundant recalculations (whitening or calculating pinv_wexog). """ import collections self.history = collections.defaultdict(list) #not really necessary res_resid = None #if maxiter < 2 no updating for i in range(maxiter): #pinv_wexog is cached if hasattr(self, 'pinv_wexog'): del self.pinv_wexog #self.initialize() #print 'wls self', results = self.fit() self.history['self_params'].append(results.params) if not i == maxiter-1: #skip for last iteration, could break instead #print 'ols', self.results_old = results #for debugging #estimate heteroscedasticity res_resid = OLS(self.link(results.resid**2), self.exog_var).fit() self.history['ols_params'].append(res_resid.params) #update weights self.weights = 1./self.linkinv(res_resid.fittedvalues) self.weights /= self.weights.max() #not required self.weights[self.weights < 1e-14] = 1e-14 #clip #print 'in iter', i, self.weights.var() #debug, do weights change self.initialize() #note results is the wrapper, results._results is the results instance results._results.results_residual_regression = res_resid return results
Perform an iterative two-step procedure to estimate a WLS model. The model is assumed to have heteroskedastic errors. The variance is estimated by OLS regression of the link transformed squared residuals on Z, i.e.:: link(sigma_i) = x_i*gamma. Parameters ---------- maxiter : int, optional the number of iterations Notes ----- maxiter=1: returns the estimated based on given weights maxiter=2: performs a second estimation with the updated weights, this is 2-step estimation maxiter>2: iteratively estimate and update the weights TODO: possible extension stop iteration if change in parameter estimates is smaller than x_tol Repeated calls to fit_iterative, will do one redundant pinv_wexog calculation. Calling fit_iterative(maxiter) ones does not do any redundant recalculations (whitening or calculating pinv_wexog).
iterative_fit
python
statsmodels/statsmodels
statsmodels/regression/feasible_gls.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/feasible_gls.py
BSD-3-Clause
def _dot(x, y): """ Returns the dot product of the arrays, works for sparse and dense. """ if isinstance(x, np.ndarray) and isinstance(y, np.ndarray): return np.dot(x, y) elif sparse.issparse(x): return x.dot(y) elif sparse.issparse(y): return y.T.dot(x.T).T
Returns the dot product of the arrays, works for sparse and dense.
_dot
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _multi_dot_three(A, B, C): """ Find best ordering for three arrays and do the multiplication. Doing in manually instead of using dynamic programing is approximately 15 times faster. """ # cost1 = cost((AB)C) cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB) A.shape[0] * B.shape[1] * C.shape[1]) # (--)C # cost2 = cost((AB)C) cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC) A.shape[0] * A.shape[1] * C.shape[1]) # A(--) if cost1 < cost2: return _dot(_dot(A, B), C) else: return _dot(A, _dot(B, C))
Find best ordering for three arrays and do the multiplication. Doing in manually instead of using dynamic programing is approximately 15 times faster.
_multi_dot_three
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _dotsum(x, y): """ Returns sum(x * y), where '*' is the pointwise product, computed efficiently for dense and sparse matrices. """ if sparse.issparse(x): return x.multiply(y).sum() else: # This way usually avoids allocating a temporary. return np.dot(x.ravel(), y.ravel())
Returns sum(x * y), where '*' is the pointwise product, computed efficiently for dense and sparse matrices.
_dotsum
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _get_exog_re_names(self, exog_re): """ Passes through if given a list of names. Otherwise, gets pandas names or creates some generic variable names as needed. """ if self.k_re == 0: return [] if isinstance(exog_re, pd.DataFrame): return exog_re.columns.tolist() elif isinstance(exog_re, pd.Series) and exog_re.name is not None: return [exog_re.name] elif isinstance(exog_re, list): return exog_re # Default names defnames = [f"x_re{k + 1:1d}" for k in range(exog_re.shape[1])] return defnames
Passes through if given a list of names. Otherwise, gets pandas names or creates some generic variable names as needed.
_get_exog_re_names
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def from_packed(params, k_fe, k_re, use_sqrt, has_fe): """ Create a MixedLMParams object from packed parameter vector. Parameters ---------- params : array_like The mode parameters packed into a single vector. k_fe : int The number of covariates with fixed effects k_re : int The number of covariates with random effects (excluding variance components). use_sqrt : bool If True, the random effects covariance matrix is provided as its Cholesky factor, otherwise the lower triangle of the covariance matrix is stored. has_fe : bool If True, `params` contains fixed effects parameters. Otherwise, the fixed effects parameters are set to zero. Returns ------- A MixedLMParams object. """ k_re2 = int(k_re * (k_re + 1) / 2) # The number of covariance parameters. if has_fe: k_vc = len(params) - k_fe - k_re2 else: k_vc = len(params) - k_re2 pa = MixedLMParams(k_fe, k_re, k_vc) cov_re = np.zeros((k_re, k_re)) ix = pa._ix if has_fe: pa.fe_params = params[0:k_fe] cov_re[ix] = params[k_fe:k_fe+k_re2] else: pa.fe_params = np.zeros(k_fe) cov_re[ix] = params[0:k_re2] if use_sqrt: cov_re = np.dot(cov_re, cov_re.T) else: cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re)) pa.cov_re = cov_re if k_vc > 0: if use_sqrt: pa.vcomp = params[-k_vc:]**2 else: pa.vcomp = params[-k_vc:] else: pa.vcomp = np.array([]) return pa
Create a MixedLMParams object from packed parameter vector. Parameters ---------- params : array_like The mode parameters packed into a single vector. k_fe : int The number of covariates with fixed effects k_re : int The number of covariates with random effects (excluding variance components). use_sqrt : bool If True, the random effects covariance matrix is provided as its Cholesky factor, otherwise the lower triangle of the covariance matrix is stored. has_fe : bool If True, `params` contains fixed effects parameters. Otherwise, the fixed effects parameters are set to zero. Returns ------- A MixedLMParams object.
from_packed
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None, vcomp=None): """ Create a MixedLMParams object from each parameter component. Parameters ---------- fe_params : array_like The fixed effects parameter (a 1-dimensional array). If None, there are no fixed effects. cov_re : array_like The random effects covariance matrix (a square, symmetric 2-dimensional array). cov_re_sqrt : array_like The Cholesky (lower triangular) square root of the random effects covariance matrix. vcomp : array_like The variance component parameters. If None, there are no variance components. Returns ------- A MixedLMParams object. """ if vcomp is None: vcomp = np.empty(0) if fe_params is None: fe_params = np.empty(0) if cov_re is None and cov_re_sqrt is None: cov_re = np.empty((0, 0)) k_fe = len(fe_params) k_vc = len(vcomp) k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0] pa = MixedLMParams(k_fe, k_re, k_vc) pa.fe_params = fe_params if cov_re_sqrt is not None: pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T) elif cov_re is not None: pa.cov_re = cov_re pa.vcomp = vcomp return pa
Create a MixedLMParams object from each parameter component. Parameters ---------- fe_params : array_like The fixed effects parameter (a 1-dimensional array). If None, there are no fixed effects. cov_re : array_like The random effects covariance matrix (a square, symmetric 2-dimensional array). cov_re_sqrt : array_like The Cholesky (lower triangular) square root of the random effects covariance matrix. vcomp : array_like The variance component parameters. If None, there are no variance components. Returns ------- A MixedLMParams object.
from_components
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def copy(self): """ Returns a copy of the object. """ obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc) obj.fe_params = self.fe_params.copy() obj.cov_re = self.cov_re.copy() obj.vcomp = self.vcomp.copy() return obj
Returns a copy of the object.
copy
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def get_packed(self, use_sqrt, has_fe=False): """ Return the model parameters packed into a single vector. Parameters ---------- use_sqrt : bool If True, the Cholesky square root of `cov_re` is included in the packed result. Otherwise the lower triangle of `cov_re` is included. has_fe : bool If True, the fixed effects parameters are included in the packed result, otherwise they are omitted. """ if self.k_re > 0: if use_sqrt: try: L = np.linalg.cholesky(self.cov_re) except np.linalg.LinAlgError: L = np.diag(np.sqrt(np.diag(self.cov_re))) cpa = L[self._ix] else: cpa = self.cov_re[self._ix] else: cpa = np.zeros(0) if use_sqrt: vcomp = np.sqrt(self.vcomp) else: vcomp = self.vcomp if has_fe: pa = np.concatenate((self.fe_params, cpa, vcomp)) else: pa = np.concatenate((cpa, vcomp)) return pa
Return the model parameters packed into a single vector. Parameters ---------- use_sqrt : bool If True, the Cholesky square root of `cov_re` is included in the packed result. Otherwise the lower triangle of `cov_re` is included. has_fe : bool If True, the fixed effects parameters are included in the packed result, otherwise they are omitted.
get_packed
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _make_param_names(self, exog_re): """ Returns the full parameter names list, just the exogenous random effects variables, and the exogenous random effects variables with the interaction terms. """ exog_names = list(self.exog_names) exog_re_names = _get_exog_re_names(self, exog_re) param_names = [] jj = self.k_fe for i in range(len(exog_re_names)): for j in range(i + 1): if i == j: param_names.append(exog_re_names[i] + " Var") else: param_names.append(exog_re_names[j] + " x " + exog_re_names[i] + " Cov") jj += 1 vc_names = [x + " Var" for x in self.exog_vc.names] return exog_names + param_names + vc_names, exog_re_names, param_names
Returns the full parameter names list, just the exogenous random effects variables, and the exogenous random effects variables with the interaction terms.
_make_param_names
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def from_formula(cls, formula, data, re_formula=None, vc_formula=None, subset=None, use_sparse=False, missing='none', *args, **kwargs): """ Create a Model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model data : array_like The data for the model. See Notes. re_formula : str A one-sided formula defining the variance structure of the model. The default gives a random intercept for each group. vc_formula : dict-like Formulas describing variance components. `vc_formula[vc]` is the formula for the component with variance parameter named `vc`. The formula is processed into a matrix, and the columns of this matrix are linearly combined with independent random coefficients having mean zero and a common variance. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame` missing : str Either 'none' or 'drop' args : extra arguments These are passed to the model kwargs : extra keyword arguments These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- model : Model instance Notes ----- `data` must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. If the variance component is intended to produce random intercepts for disjoint subsets of a group, specified by string labels or a categorical data value, always use '0 +' in the formula so that no overall intercept is included. If the variance components specify random slopes and you do not also want a random group-level intercept in the model, then use '0 +' in the formula to exclude the intercept. The variance components formulas are processed separately for each group. If a variable is categorical the results will not be affected by whether the group labels are distinct or re-used over the top-level groups. Examples -------- Suppose we have data from an educational study with students nested in classrooms nested in schools. The students take a test, and we want to relate the test scores to the students' ages, while accounting for the effects of classrooms and schools. The school will be the top-level group, and the classroom is a nested group that is specified as a variance component. Note that the schools may have different number of classrooms, and the classroom labels may (but need not be) different across the schools. >>> vc = {'classroom': '0 + C(classroom)'} >>> MixedLM.from_formula('test_score ~ age', vc_formula=vc, \ re_formula='1', groups='school', data=data) Now suppose we also have a previous test score called 'pretest'. If we want the relationship between pretest scores and the current test to vary by classroom, we can specify a random slope for the pretest score >>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'} >>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \ re_formula='1', groups='school', data=data) The following model is almost equivalent to the previous one, but here the classroom random intercept and pretest slope may be correlated. >>> vc = {'classroom': '0 + C(classroom)'} >>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \ re_formula='1 + pretest', groups='school', \ data=data) """ if "groups" not in kwargs.keys(): raise AttributeError("'groups' is a required keyword argument " + "in MixedLM.from_formula") groups = kwargs["groups"] # If `groups` is a variable name, retrieve the data for the # groups variable. group_name = "Group" if isinstance(groups, str): group_name = groups groups = np.asarray(data[groups]) else: groups = np.asarray(groups) del kwargs["groups"] # Bypass all upstream missing data handling to properly handle # variance components if missing == 'drop': data, groups = _handle_missing(data, groups, formula, re_formula, vc_formula) missing = 'none' if re_formula is not None: if re_formula.strip() == "1": # Work around Patsy bug, fixed by 0.3. exog_re = np.ones((data.shape[0], 1)) exog_re_names = [group_name] else: eval_env = kwargs.get('eval_env', None) if eval_env is None: eval_env = 1 elif eval_env == -1: mgr = FormulaManager() eval_env = mgr.get_empty_eval_env() mgr = FormulaManager() exog_re = mgr.get_matrices(re_formula, data, eval_env=eval_env) exog_re_names = mgr.get_column_names(exog_re) exog_re_names = [x.replace("Intercept", group_name) for x in exog_re_names] exog_re = np.asarray(exog_re) if exog_re.ndim == 1: exog_re = exog_re[:, None] else: exog_re = None if vc_formula is None: exog_re_names = [group_name] else: exog_re_names = [] if vc_formula is not None: eval_env = kwargs.get('eval_env', None) if eval_env is None: eval_env = 1 elif eval_env == -1: mgr = FormulaManager() eval_env = mgr.get_empty_eval_env() vc_mats = [] vc_colnames = [] vc_names = [] gb = data.groupby(groups) kylist = sorted(gb.groups.keys()) vcf = sorted(vc_formula.keys()) mgr = FormulaManager() for vc_name in vcf: model_spec = mgr.get_spec(vc_formula[vc_name]) vc_names.append(vc_name) evc_mats, evc_colnames = [], [] for group_ix, group in enumerate(kylist): ii = gb.groups[group] mat = mgr.get_matrices( model_spec, data.loc[ii, :], eval_env=eval_env, pandas=True ) evc_colnames.append(mat.columns.tolist()) if use_sparse: evc_mats.append(sparse.csr_matrix(mat)) else: evc_mats.append(np.asarray(mat)) vc_mats.append(evc_mats) vc_colnames.append(evc_colnames) exog_vc = VCSpec(vc_names, vc_colnames, vc_mats) else: exog_vc = VCSpec([], [], []) kwargs["subset"] = None kwargs["exog_re"] = exog_re kwargs["exog_vc"] = exog_vc kwargs["groups"] = groups advance_eval_env(kwargs) mod = super().from_formula( formula, data, *args, **kwargs) # expand re names to account for pairs of RE (param_names, exog_re_names, exog_re_names_full) = mod._make_param_names(exog_re_names) mod.data.param_names = param_names mod.data.exog_re_names = exog_re_names mod.data.exog_re_names_full = exog_re_names_full if vc_formula is not None: mod.data.vcomp_names = mod.exog_vc.names return mod
Create a Model from a formula and dataframe. Parameters ---------- formula : str or generic Formula object The formula specifying the model data : array_like The data for the model. See Notes. re_formula : str A one-sided formula defining the variance structure of the model. The default gives a random intercept for each group. vc_formula : dict-like Formulas describing variance components. `vc_formula[vc]` is the formula for the component with variance parameter named `vc`. The formula is processed into a matrix, and the columns of this matrix are linearly combined with independent random coefficients having mean zero and a common variance. subset : array_like An array-like object of booleans, integers, or index values that indicate the subset of df to use in the model. Assumes df is a `pandas.DataFrame` missing : str Either 'none' or 'drop' args : extra arguments These are passed to the model kwargs : extra keyword arguments These are passed to the model with one exception. The ``eval_env`` keyword is passed to patsy. It can be either a :class:`patsy:patsy.EvalEnvironment` object or an integer indicating the depth of the namespace to use. For example, the default ``eval_env=0`` uses the calling namespace. If you wish to use a "clean" environment set ``eval_env=-1``. Returns ------- model : Model instance Notes ----- `data` must define __getitem__ with the keys in the formula terms args and kwargs are passed on to the model instantiation. E.g., a numpy structured or rec array, a dictionary, or a pandas DataFrame. If the variance component is intended to produce random intercepts for disjoint subsets of a group, specified by string labels or a categorical data value, always use '0 +' in the formula so that no overall intercept is included. If the variance components specify random slopes and you do not also want a random group-level intercept in the model, then use '0 +' in the formula to exclude the intercept. The variance components formulas are processed separately for each group. If a variable is categorical the results will not be affected by whether the group labels are distinct or re-used over the top-level groups. Examples -------- Suppose we have data from an educational study with students nested in classrooms nested in schools. The students take a test, and we want to relate the test scores to the students' ages, while accounting for the effects of classrooms and schools. The school will be the top-level group, and the classroom is a nested group that is specified as a variance component. Note that the schools may have different number of classrooms, and the classroom labels may (but need not be) different across the schools. >>> vc = {'classroom': '0 + C(classroom)'} >>> MixedLM.from_formula('test_score ~ age', vc_formula=vc, \ re_formula='1', groups='school', data=data) Now suppose we also have a previous test score called 'pretest'. If we want the relationship between pretest scores and the current test to vary by classroom, we can specify a random slope for the pretest score >>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'} >>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \ re_formula='1', groups='school', data=data) The following model is almost equivalent to the previous one, but here the classroom random intercept and pretest slope may be correlated. >>> vc = {'classroom': '0 + C(classroom)'} >>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \ re_formula='1 + pretest', groups='school', \ data=data)
from_formula
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def predict(self, params, exog=None): """ Return predicted values from a design matrix. Parameters ---------- params : array_like Parameters of a mixed linear model. Can be either a MixedLMParams instance, or a vector containing the packed model parameters in which the fixed effects parameters are at the beginning of the vector, or a vector containing only the fixed effects parameters. exog : array_like, optional Design / exogenous data for the fixed effects. Model exog is used if None. Returns ------- An array of fitted values. Note that these predicted values only reflect the fixed effects mean structure of the model. """ if exog is None: exog = self.exog if isinstance(params, MixedLMParams): params = params.fe_params else: params = params[0:self.k_fe] return np.dot(exog, params)
Return predicted values from a design matrix. Parameters ---------- params : array_like Parameters of a mixed linear model. Can be either a MixedLMParams instance, or a vector containing the packed model parameters in which the fixed effects parameters are at the beginning of the vector, or a vector containing only the fixed effects parameters. exog : array_like, optional Design / exogenous data for the fixed effects. Model exog is used if None. Returns ------- An array of fitted values. Note that these predicted values only reflect the fixed effects mean structure of the model.
predict
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def group_list(self, array): """ Returns `array` split into subarrays corresponding to the grouping structure. """ if array is None: return None if array.ndim == 1: return [np.array(array[self.row_indices[k]]) for k in self.group_labels] else: return [np.array(array[self.row_indices[k], :]) for k in self.group_labels]
Returns `array` split into subarrays corresponding to the grouping structure.
group_list
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def fit_regularized(self, start_params=None, method='l1', alpha=0, ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs): """ Fit a model in which the fixed effects parameters are penalized. The dependence parameters are held fixed at their estimated values in the unpenalized model. Parameters ---------- method : str of Penalty object Method for regularization. If a string, must be 'l1'. alpha : array_like Scalar or vector of penalty weights. If a scalar, the same weight is applied to all coefficients; if a vector, it contains a weight for each coefficient. If method is a Penalty object, the weights are scaled by alpha. For L1 regularization, the weights are used directly. ceps : positive real scalar Fixed effects parameters smaller than this value in magnitude are treated as being zero. ptol : positive real scalar Convergence occurs when the sup norm difference between successive values of `fe_params` is less than `ptol`. maxit : int The maximum number of iterations. **fit_kwargs Additional keyword arguments passed to fit. Returns ------- A MixedLMResults instance containing the results. Notes ----- The covariance structure is not updated as the fixed effects parameters are varied. The algorithm used here for L1 regularization is a"shooting" or cyclic coordinate descent algorithm. If method is 'l1', then `fe_pen` and `cov_pen` are used to obtain the covariance structure, but are ignored during the L1-penalized fitting. References ---------- Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized Paths for Generalized Linear Models via Coordinate Descent. Journal of Statistical Software, 33(1) (2008) http://www.jstatsoft.org/v33/i01/paper http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf """ if isinstance(method, str) and (method.lower() != 'l1'): raise ValueError("Invalid regularization method") # If method is a smooth penalty just optimize directly. if isinstance(method, Penalty): # Scale the penalty weights by alpha method.alpha = alpha fit_kwargs.update({"fe_pen": method}) return self.fit(**fit_kwargs) if np.isscalar(alpha): alpha = alpha * np.ones(self.k_fe, dtype=np.float64) # Fit the unpenalized model to get the dependence structure. mdf = self.fit(**fit_kwargs) fe_params = mdf.fe_params cov_re = mdf.cov_re vcomp = mdf.vcomp scale = mdf.scale try: cov_re_inv = np.linalg.inv(cov_re) except np.linalg.LinAlgError: cov_re_inv = None for itr in range(maxit): fe_params_s = fe_params.copy() for j in range(self.k_fe): if abs(fe_params[j]) < ceps: continue # The residuals fe_params[j] = 0. expval = np.dot(self.exog, fe_params) resid_all = self.endog - expval # The loss function has the form # a*x^2 + b*x + pwt*|x| a, b = 0., 0. for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] resid = resid_all[self.row_indices[group]] solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv, 1 / vc_var) x = exog[:, j] u = solver(x) a += np.dot(u, x) b -= 2 * np.dot(u, resid) pwt1 = alpha[j] if b > pwt1: fe_params[j] = -(b - pwt1) / (2 * a) elif b < -pwt1: fe_params[j] = -(b + pwt1) / (2 * a) if np.abs(fe_params_s - fe_params).max() < ptol: break # Replace the fixed effects estimates with their penalized # values, leave the dependence parameters in their unpenalized # state. params_prof = mdf.params.copy() params_prof[0:self.k_fe] = fe_params scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp) # Get the Hessian including only the nonzero fixed effects, # then blow back up to the full size after inverting. hess, sing = self.hessian(params_prof) if sing: warnings.warn(_warn_cov_sing) pcov = np.nan * np.ones_like(hess) ii = np.abs(params_prof) > ceps ii[self.k_fe:] = True ii = np.flatnonzero(ii) hess1 = hess[ii, :][:, ii] pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1) params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re) results = MixedLMResults(self, params_prof, pcov / scale) results.params_object = params_object results.fe_params = fe_params results.cov_re = cov_re results.vcomp = vcomp results.scale = scale results.cov_re_unscaled = mdf.cov_re_unscaled results.method = mdf.method results.converged = True results.cov_pen = self.cov_pen results.k_fe = self.k_fe results.k_re = self.k_re results.k_re2 = self.k_re2 results.k_vc = self.k_vc return MixedLMResultsWrapper(results)
Fit a model in which the fixed effects parameters are penalized. The dependence parameters are held fixed at their estimated values in the unpenalized model. Parameters ---------- method : str of Penalty object Method for regularization. If a string, must be 'l1'. alpha : array_like Scalar or vector of penalty weights. If a scalar, the same weight is applied to all coefficients; if a vector, it contains a weight for each coefficient. If method is a Penalty object, the weights are scaled by alpha. For L1 regularization, the weights are used directly. ceps : positive real scalar Fixed effects parameters smaller than this value in magnitude are treated as being zero. ptol : positive real scalar Convergence occurs when the sup norm difference between successive values of `fe_params` is less than `ptol`. maxit : int The maximum number of iterations. **fit_kwargs Additional keyword arguments passed to fit. Returns ------- A MixedLMResults instance containing the results. Notes ----- The covariance structure is not updated as the fixed effects parameters are varied. The algorithm used here for L1 regularization is a"shooting" or cyclic coordinate descent algorithm. If method is 'l1', then `fe_pen` and `cov_pen` are used to obtain the covariance structure, but are ignored during the L1-penalized fitting. References ---------- Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized Paths for Generalized Linear Models via Coordinate Descent. Journal of Statistical Software, 33(1) (2008) http://www.jstatsoft.org/v33/i01/paper http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
fit_regularized
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def get_fe_params(self, cov_re, vcomp, tol=1e-10): """ Use GLS to update the fixed effects parameter estimates. Parameters ---------- cov_re : array_like (2d) The covariance matrix of the random effects. vcomp : array_like (1d) The variance components. tol : float A tolerance parameter to determine when covariances are singular. Returns ------- params : ndarray The GLS estimates of the fixed effects parameters. singular : bool True if the covariance is singular """ if self.k_fe == 0: return np.array([]), False sing = False if self.k_re == 0: cov_re_inv = np.empty((0, 0)) else: w, v = np.linalg.eigh(cov_re) if w.min() < tol: # Singular, use pseudo-inverse sing = True ii = np.flatnonzero(w >= tol) if len(ii) == 0: cov_re_inv = np.zeros_like(cov_re) else: vi = v[:, ii] wi = w[ii] cov_re_inv = np.dot(vi / wi, vi.T) else: cov_re_inv = np.linalg.inv(cov_re) # Cache these quantities that do not change. if not hasattr(self, "_endex_li"): self._endex_li = [] for group_ix, _ in enumerate(self.group_labels): mat = np.concatenate( (self.exog_li[group_ix], self.endog_li[group_ix][:, None]), axis=1) self._endex_li.append(mat) xtxy = 0. for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) if vc_var.size > 0: if vc_var.min() < tol: # Pseudo-inverse sing = True ii = np.flatnonzero(vc_var >= tol) vc_vari = np.zeros_like(vc_var) vc_vari[ii] = 1 / vc_var[ii] else: vc_vari = 1 / vc_var else: vc_vari = np.empty(0) exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, vc_vari) u = solver(self._endex_li[group_ix]) xtxy += np.dot(exog.T, u) if sing: fe_params = np.dot(np.linalg.pinv(xtxy[:, 0:-1]), xtxy[:, -1]) else: fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1]) return fe_params, sing
Use GLS to update the fixed effects parameter estimates. Parameters ---------- cov_re : array_like (2d) The covariance matrix of the random effects. vcomp : array_like (1d) The variance components. tol : float A tolerance parameter to determine when covariances are singular. Returns ------- params : ndarray The GLS estimates of the fixed effects parameters. singular : bool True if the covariance is singular
get_fe_params
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _reparam(self): """ Returns parameters of the map converting parameters from the form used in optimization to the form returned to the user. Returns ------- lin : list-like Linear terms of the map quad : list-like Quadratic terms of the map Notes ----- If P are the standard form parameters and R are the transformed parameters (i.e. with the Cholesky square root covariance and square root transformed variance components), then P[i] = lin[i] * R + R' * quad[i] * R """ k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc k_tot = k_fe + k_re2 + k_vc ix = np.tril_indices(self.k_re) lin = [] for k in range(k_fe): e = np.zeros(k_tot) e[k] = 1 lin.append(e) for k in range(k_re2): lin.append(np.zeros(k_tot)) for k in range(k_vc): lin.append(np.zeros(k_tot)) quad = [] # Quadratic terms for fixed effects. for k in range(k_tot): quad.append(np.zeros((k_tot, k_tot))) # Quadratic terms for random effects covariance. ii = np.tril_indices(k_re) ix = [(a, b) for a, b in zip(ii[0], ii[1])] for i1 in range(k_re2): for i2 in range(k_re2): ix1 = ix[i1] ix2 = ix[i2] if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]): ii = (ix2[0], ix1[0]) k = ix.index(ii) quad[k_fe+k][k_fe+i2, k_fe+i1] += 1 for k in range(k_tot): quad[k] = 0.5*(quad[k] + quad[k].T) # Quadratic terms for variance components. km = k_fe + k_re2 for k in range(km, km+k_vc): quad[k][k, k] = 1 return lin, quad
Returns parameters of the map converting parameters from the form used in optimization to the form returned to the user. Returns ------- lin : list-like Linear terms of the map quad : list-like Quadratic terms of the map Notes ----- If P are the standard form parameters and R are the transformed parameters (i.e. with the Cholesky square root covariance and square root transformed variance components), then P[i] = lin[i] * R + R' * quad[i] * R
_reparam
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _expand_vcomp(self, vcomp, group_ix): """ Replicate variance parameters to match a group's design. Parameters ---------- vcomp : array_like The variance parameters for the variance components. group_ix : int The group index Returns an expanded version of vcomp, in which each variance parameter is copied as many times as there are independent realizations of the variance component in the given group. """ if len(vcomp) == 0: return np.empty(0) vc_var = [] for j in range(len(self.exog_vc.names)): d = self.exog_vc.mats[j][group_ix].shape[1] vc_var.append(vcomp[j] * np.ones(d)) if len(vc_var) > 0: return np.concatenate(vc_var) else: # Cannot reach here? return np.empty(0)
Replicate variance parameters to match a group's design. Parameters ---------- vcomp : array_like The variance parameters for the variance components. group_ix : int The group index Returns an expanded version of vcomp, in which each variance parameter is copied as many times as there are independent realizations of the variance component in the given group.
_expand_vcomp
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _augment_exog(self, group_ix): """ Concatenate the columns for variance components to the columns for other random effects to obtain a single random effects exog matrix for a given group. """ ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None if self.k_vc == 0: return ex_r ex = [ex_r] if self.k_re > 0 else [] any_sparse = False for j, _ in enumerate(self.exog_vc.names): ex.append(self.exog_vc.mats[j][group_ix]) any_sparse |= sparse.issparse(ex[-1]) if any_sparse: for j, x in enumerate(ex): if not sparse.issparse(x): ex[j] = sparse.csr_matrix(x) ex = sparse.hstack(ex) ex = sparse.csr_matrix(ex) else: ex = np.concatenate(ex, axis=1) return ex
Concatenate the columns for variance components to the columns for other random effects to obtain a single random effects exog matrix for a given group.
_augment_exog
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def loglike(self, params, profile_fe=True): """ Evaluate the (profile) log-likelihood of the linear mixed effects model. Parameters ---------- params : MixedLMParams, or array_like. The parameter value. If array-like, must be a packed parameter vector containing only the covariance parameters. profile_fe : bool If True, replace the provided value of `fe_params` with the GLS estimates. Returns ------- The log-likelihood value at `params`. Notes ----- The scale parameter `scale` is always profiled out of the log-likelihood. In addition, if `profile_fe` is true the fixed effects parameters are also profiled out. """ if type(params) is not MixedLMParams: params = MixedLMParams.from_packed(params, self.k_fe, self.k_re, self.use_sqrt, has_fe=False) cov_re = params.cov_re vcomp = params.vcomp # Move to the profile set if profile_fe: fe_params, sing = self.get_fe_params(cov_re, vcomp) if sing: self._cov_sing += 1 else: fe_params = params.fe_params if self.k_re > 0: try: cov_re_inv = np.linalg.inv(cov_re) except np.linalg.LinAlgError: cov_re_inv = np.linalg.pinv(cov_re) self._cov_sing += 1 _, cov_re_logdet = np.linalg.slogdet(cov_re) else: cov_re_inv = np.zeros((0, 0)) cov_re_logdet = 0 # The residuals expval = np.dot(self.exog, fe_params) resid_all = self.endog - expval likeval = 0. # Handle the covariance penalty if (self.cov_pen is not None) and (self.k_re > 0): likeval -= self.cov_pen.func(cov_re, cov_re_inv) # Handle the fixed effects penalty if (self.fe_pen is not None): likeval -= self.fe_pen.func(fe_params) xvx, qf = 0., 0. for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var)) exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var) resid = resid_all[self.row_indices[group]] # Part 1 of the log likelihood (for both ML and REML) ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var, cov_aug_logdet) likeval -= ld / 2. # Part 2 of the log likelihood (for both ML and REML) u = solver(resid) qf += np.dot(resid, u) # Adjustment for REML if self.reml: mat = solver(exog) xvx += np.dot(exog.T, mat) if self.reml: likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2. _, ld = np.linalg.slogdet(xvx) likeval -= ld / 2. likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2. likeval += ((self.n_totobs - self.k_fe) * np.log(self.n_totobs - self.k_fe) / 2.) likeval -= (self.n_totobs - self.k_fe) / 2. else: likeval -= self.n_totobs * np.log(qf) / 2. likeval -= self.n_totobs * np.log(2 * np.pi) / 2. likeval += self.n_totobs * np.log(self.n_totobs) / 2. likeval -= self.n_totobs / 2. return likeval
Evaluate the (profile) log-likelihood of the linear mixed effects model. Parameters ---------- params : MixedLMParams, or array_like. The parameter value. If array-like, must be a packed parameter vector containing only the covariance parameters. profile_fe : bool If True, replace the provided value of `fe_params` with the GLS estimates. Returns ------- The log-likelihood value at `params`. Notes ----- The scale parameter `scale` is always profiled out of the log-likelihood. In addition, if `profile_fe` is true the fixed effects parameters are also profiled out.
loglike
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def _gen_dV_dPar(self, ex_r, solver, group_ix, max_ix=None): """ A generator that yields the element-wise derivative of the marginal covariance matrix with respect to the random effects variance and covariance parameters. ex_r : array_like The random effects design matrix solver : function A function that given x returns V^{-1}x, where V is the group's marginal covariance matrix. group_ix : int The group index max_ix : {int, None} If not None, the generator ends when this index is reached. """ axr = solver(ex_r) # Regular random effects jj = 0 for j1 in range(self.k_re): for j2 in range(j1 + 1): if max_ix is not None and jj > max_ix: return # Need 2d mat_l, mat_r = ex_r[:, j1:j1+1], ex_r[:, j2:j2+1] vsl, vsr = axr[:, j1:j1+1], axr[:, j2:j2+1] yield jj, mat_l, mat_r, vsl, vsr, j1 == j2 jj += 1 # Variance components for j, _ in enumerate(self.exog_vc.names): if max_ix is not None and jj > max_ix: return mat = self.exog_vc.mats[j][group_ix] axmat = solver(mat) yield jj, mat, mat, axmat, axmat, True jj += 1
A generator that yields the element-wise derivative of the marginal covariance matrix with respect to the random effects variance and covariance parameters. ex_r : array_like The random effects design matrix solver : function A function that given x returns V^{-1}x, where V is the group's marginal covariance matrix. group_ix : int The group index max_ix : {int, None} If not None, the generator ends when this index is reached.
_gen_dV_dPar
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def score(self, params, profile_fe=True): """ Returns the score vector of the profile log-likelihood. Notes ----- The score vector that is returned is computed with respect to the parameterization defined by this model instance's `use_sqrt` attribute. """ if type(params) is not MixedLMParams: params = MixedLMParams.from_packed( params, self.k_fe, self.k_re, self.use_sqrt, has_fe=False) if profile_fe: params.fe_params, sing = \ self.get_fe_params(params.cov_re, params.vcomp) if sing: msg = "Random effects covariance is singular" warnings.warn(msg) if self.use_sqrt: score_fe, score_re, score_vc = self.score_sqrt( params, calc_fe=not profile_fe) else: score_fe, score_re, score_vc = self.score_full( params, calc_fe=not profile_fe) if self._freepat is not None: score_fe *= self._freepat.fe_params score_re *= self._freepat.cov_re[self._freepat._ix] score_vc *= self._freepat.vcomp if profile_fe: return np.concatenate((score_re, score_vc)) else: return np.concatenate((score_fe, score_re, score_vc))
Returns the score vector of the profile log-likelihood. Notes ----- The score vector that is returned is computed with respect to the parameterization defined by this model instance's `use_sqrt` attribute.
score
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def score_full(self, params, calc_fe): """ Returns the score with respect to untransformed parameters. Calculates the score vector for the profiled log-likelihood of the mixed effects model with respect to the parameterization in which the random effects covariance matrix is represented in its full form (not using the Cholesky factor). Parameters ---------- params : MixedLMParams or array_like The parameter at which the score function is evaluated. If array-like, must contain the packed random effects parameters (cov_re and vcomp) without fe_params. calc_fe : bool If True, calculate the score vector for the fixed effects parameters. If False, this vector is not calculated, and a vector of zeros is returned in its place. Returns ------- score_fe : array_like The score vector with respect to the fixed effects parameters. score_re : array_like The score vector with respect to the random effects parameters (excluding variance components parameters). score_vc : array_like The score vector with respect to variance components parameters. Notes ----- `score_re` is taken with respect to the parameterization in which `cov_re` is represented through its lower triangle (without taking the Cholesky square root). """ fe_params = params.fe_params cov_re = params.cov_re vcomp = params.vcomp try: cov_re_inv = np.linalg.inv(cov_re) except np.linalg.LinAlgError: cov_re_inv = np.linalg.pinv(cov_re) self._cov_sing += 1 score_fe = np.zeros(self.k_fe) score_re = np.zeros(self.k_re2) score_vc = np.zeros(self.k_vc) # Handle the covariance penalty. if self.cov_pen is not None: score_re -= self.cov_pen.deriv(cov_re, cov_re_inv) # Handle the fixed effects penalty. if calc_fe and (self.fe_pen is not None): score_fe -= self.fe_pen.deriv(fe_params) # resid' V^{-1} resid, summed over the groups (a scalar) rvir = 0. # exog' V^{-1} resid, summed over the groups (a k_fe # dimensional vector) xtvir = 0. # exog' V^{_1} exog, summed over the groups (a k_fe x k_fe # matrix) xtvix = 0. # V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th # covariance parameter. xtax = [0., ] * (self.k_re2 + self.k_vc) # Temporary related to the gradient of log |V| dlv = np.zeros(self.k_re2 + self.k_vc) # resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar) rvavr = np.zeros(self.k_re2 + self.k_vc) for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var) # The residuals resid = self.endog_li[group_ix] if self.k_fe > 0: expval = np.dot(exog, fe_params) resid = resid - expval if self.reml: viexog = solver(exog) xtvix += np.dot(exog.T, viexog) # Contributions to the covariance parameter gradient vir = solver(resid) for (jj, matl, matr, vsl, vsr, sym) in\ self._gen_dV_dPar(ex_r, solver, group_ix): dlv[jj] = _dotsum(matr, vsl) if not sym: dlv[jj] += _dotsum(matl, vsr) ul = _dot(vir, matl) ur = ul.T if sym else _dot(matr.T, vir) ulr = np.dot(ul, ur) rvavr[jj] += ulr if not sym: rvavr[jj] += ulr.T if self.reml: ul = _dot(viexog.T, matl) ur = ul.T if sym else _dot(matr.T, viexog) ulr = np.dot(ul, ur) xtax[jj] += ulr if not sym: xtax[jj] += ulr.T # Contribution of log|V| to the covariance parameter # gradient. if self.k_re > 0: score_re -= 0.5 * dlv[0:self.k_re2] if self.k_vc > 0: score_vc -= 0.5 * dlv[self.k_re2:] rvir += np.dot(resid, vir) if calc_fe: xtvir += np.dot(exog.T, vir) fac = self.n_totobs if self.reml: fac -= self.k_fe if calc_fe and self.k_fe > 0: score_fe += fac * xtvir / rvir if self.k_re > 0: score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir if self.k_vc > 0: score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir if self.reml: xtvixi = np.linalg.inv(xtvix) for j in range(self.k_re2): score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j]) for j in range(self.k_vc): score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j]) return score_fe, score_re, score_vc
Returns the score with respect to untransformed parameters. Calculates the score vector for the profiled log-likelihood of the mixed effects model with respect to the parameterization in which the random effects covariance matrix is represented in its full form (not using the Cholesky factor). Parameters ---------- params : MixedLMParams or array_like The parameter at which the score function is evaluated. If array-like, must contain the packed random effects parameters (cov_re and vcomp) without fe_params. calc_fe : bool If True, calculate the score vector for the fixed effects parameters. If False, this vector is not calculated, and a vector of zeros is returned in its place. Returns ------- score_fe : array_like The score vector with respect to the fixed effects parameters. score_re : array_like The score vector with respect to the random effects parameters (excluding variance components parameters). score_vc : array_like The score vector with respect to variance components parameters. Notes ----- `score_re` is taken with respect to the parameterization in which `cov_re` is represented through its lower triangle (without taking the Cholesky square root).
score_full
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def score_sqrt(self, params, calc_fe=True): """ Returns the score with respect to transformed parameters. Calculates the score vector with respect to the parameterization in which the random effects covariance matrix is represented through its Cholesky square root. Parameters ---------- params : MixedLMParams or array_like The model parameters. If array-like must contain packed parameters that are compatible with this model instance. calc_fe : bool If True, calculate the score vector for the fixed effects parameters. If False, this vector is not calculated, and a vector of zeros is returned in its place. Returns ------- score_fe : array_like The score vector with respect to the fixed effects parameters. score_re : array_like The score vector with respect to the random effects parameters (excluding variance components parameters). score_vc : array_like The score vector with respect to variance components parameters. """ score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe) params_vec = params.get_packed(use_sqrt=True, has_fe=True) score_full = np.concatenate((score_fe, score_re, score_vc)) scr = 0. for i in range(len(params_vec)): v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec) scr += score_full[i] * v score_fe = scr[0:self.k_fe] score_re = scr[self.k_fe:self.k_fe + self.k_re2] score_vc = scr[self.k_fe + self.k_re2:] return score_fe, score_re, score_vc
Returns the score with respect to transformed parameters. Calculates the score vector with respect to the parameterization in which the random effects covariance matrix is represented through its Cholesky square root. Parameters ---------- params : MixedLMParams or array_like The model parameters. If array-like must contain packed parameters that are compatible with this model instance. calc_fe : bool If True, calculate the score vector for the fixed effects parameters. If False, this vector is not calculated, and a vector of zeros is returned in its place. Returns ------- score_fe : array_like The score vector with respect to the fixed effects parameters. score_re : array_like The score vector with respect to the random effects parameters (excluding variance components parameters). score_vc : array_like The score vector with respect to variance components parameters.
score_sqrt
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def hessian(self, params): """ Returns the model's Hessian matrix. Calculates the Hessian matrix for the linear mixed effects model with respect to the parameterization in which the covariance matrix is represented directly (without square-root transformation). Parameters ---------- params : MixedLMParams or array_like The model parameters at which the Hessian is calculated. If array-like, must contain the packed parameters in a form that is compatible with this model instance. Returns ------- hess : 2d ndarray The Hessian matrix, evaluated at `params`. sing : boolean If True, the covariance matrix is singular and a pseudo-inverse is returned. """ if type(params) is not MixedLMParams: params = MixedLMParams.from_packed(params, self.k_fe, self.k_re, use_sqrt=self.use_sqrt, has_fe=True) fe_params = params.fe_params vcomp = params.vcomp cov_re = params.cov_re sing = False if self.k_re > 0: try: cov_re_inv = np.linalg.inv(cov_re) except np.linalg.LinAlgError: cov_re_inv = np.linalg.pinv(cov_re) sing = True else: cov_re_inv = np.empty((0, 0)) # Blocks for the fixed and random effects parameters. hess_fe = 0. hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc)) hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe)) fac = self.n_totobs if self.reml: fac -= self.exog.shape[1] rvir = 0. xtvix = 0. xtax = [0., ] * (self.k_re2 + self.k_vc) m = self.k_re2 + self.k_vc B = np.zeros(m) D = np.zeros((m, m)) F = [[0.] * m for k in range(m)] for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) vc_vari = np.zeros_like(vc_var) ii = np.flatnonzero(vc_var >= 1e-10) if len(ii) > 0: vc_vari[ii] = 1 / vc_var[ii] if len(ii) < len(vc_var): sing = True exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, vc_vari) # The residuals resid = self.endog_li[group_ix] if self.k_fe > 0: expval = np.dot(exog, fe_params) resid = resid - expval viexog = solver(exog) xtvix += np.dot(exog.T, viexog) vir = solver(resid) rvir += np.dot(resid, vir) for (jj1, matl1, matr1, vsl1, vsr1, sym1) in\ self._gen_dV_dPar(ex_r, solver, group_ix): ul = _dot(viexog.T, matl1) ur = _dot(matr1.T, vir) hess_fere[jj1, :] += np.dot(ul, ur) if not sym1: ul = _dot(viexog.T, matr1) ur = _dot(matl1.T, vir) hess_fere[jj1, :] += np.dot(ul, ur) if self.reml: ul = _dot(viexog.T, matl1) ur = ul if sym1 else np.dot(viexog.T, matr1) ulr = _dot(ul, ur.T) xtax[jj1] += ulr if not sym1: xtax[jj1] += ulr.T ul = _dot(vir, matl1) ur = ul if sym1 else _dot(vir, matr1) B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2) # V^{-1} * dV/d_theta E = [(vsl1, matr1)] if not sym1: E.append((vsr1, matl1)) for (jj2, matl2, matr2, vsl2, vsr2, sym2) in\ self._gen_dV_dPar(ex_r, solver, group_ix, jj1): re = sum([_multi_dot_three(matr2.T, x[0], x[1].T) for x in E]) vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re), vir[:, None]) if not sym2: le = sum([_multi_dot_three(matl2.T, x[0], x[1].T) for x in E]) vt += 2 * _dot(_multi_dot_three( vir[None, :], matr2, le), vir[:, None]) D[jj1, jj2] += np.squeeze(vt) if jj1 != jj2: D[jj2, jj1] += np.squeeze(vt) rt = _dotsum(vsl2, re.T) / 2 if not sym2: rt += _dotsum(vsr2, le.T) / 2 hess_re[jj1, jj2] += rt if jj1 != jj2: hess_re[jj2, jj1] += rt if self.reml: ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E]) u1 = _dot(viexog.T, matl2) u2 = _dot(matr2.T, ev) um = np.dot(u1, u2) F[jj1][jj2] += um + um.T if not sym2: u1 = np.dot(viexog.T, matr2) u2 = np.dot(matl2.T, ev) um = np.dot(u1, u2) F[jj1][jj2] += um + um.T hess_fe -= fac * xtvix / rvir hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2) hess_fere = -fac * hess_fere / rvir if self.reml: QL = [np.linalg.solve(xtvix, x) for x in xtax] for j1 in range(self.k_re2 + self.k_vc): for j2 in range(j1 + 1): a = _dotsum(QL[j1].T, QL[j2]) a -= np.trace(np.linalg.solve(xtvix, F[j1][j2])) a *= 0.5 hess_re[j1, j2] += a if j1 > j2: hess_re[j2, j1] += a # Put the blocks together to get the Hessian. m = self.k_fe + self.k_re2 + self.k_vc hess = np.zeros((m, m)) hess[0:self.k_fe, 0:self.k_fe] = hess_fe hess[0:self.k_fe, self.k_fe:] = hess_fere.T hess[self.k_fe:, 0:self.k_fe] = hess_fere hess[self.k_fe:, self.k_fe:] = hess_re return hess, sing
Returns the model's Hessian matrix. Calculates the Hessian matrix for the linear mixed effects model with respect to the parameterization in which the covariance matrix is represented directly (without square-root transformation). Parameters ---------- params : MixedLMParams or array_like The model parameters at which the Hessian is calculated. If array-like, must contain the packed parameters in a form that is compatible with this model instance. Returns ------- hess : 2d ndarray The Hessian matrix, evaluated at `params`. sing : boolean If True, the covariance matrix is singular and a pseudo-inverse is returned.
hessian
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def get_scale(self, fe_params, cov_re, vcomp): """ Returns the estimated error variance based on given estimates of the slopes and random effects covariance matrix. Parameters ---------- fe_params : array_like The regression slope estimates cov_re : 2d array_like Estimate of the random effects covariance matrix vcomp : array_like Estimate of the variance components Returns ------- scale : float The estimated error variance. """ try: cov_re_inv = np.linalg.inv(cov_re) except np.linalg.LinAlgError: cov_re_inv = np.linalg.pinv(cov_re) warnings.warn(_warn_cov_sing) qf = 0. for group_ix, group in enumerate(self.group_labels): vc_var = self._expand_vcomp(vcomp, group_ix) exog = self.exog_li[group_ix] ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix] solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var) # The residuals resid = self.endog_li[group_ix] if self.k_fe > 0: expval = np.dot(exog, fe_params) resid = resid - expval mat = solver(resid) qf += np.dot(resid, mat) if self.reml: qf /= (self.n_totobs - self.k_fe) else: qf /= self.n_totobs return qf
Returns the estimated error variance based on given estimates of the slopes and random effects covariance matrix. Parameters ---------- fe_params : array_like The regression slope estimates cov_re : 2d array_like Estimate of the random effects covariance matrix vcomp : array_like Estimate of the variance components Returns ------- scale : float The estimated error variance.
get_scale
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def fit(self, start_params=None, reml=True, niter_sa=0, do_cg=True, fe_pen=None, cov_pen=None, free=None, full_output=False, method=None, **fit_kwargs): """ Fit a linear mixed model to the data. Parameters ---------- start_params : array_like or MixedLMParams Starting values for the profile log-likelihood. If not a `MixedLMParams` instance, this should be an array containing the packed parameters for the profile log-likelihood, including the fixed effects parameters. reml : bool If true, fit according to the REML likelihood, else fit the standard likelihood using ML. niter_sa : int Currently this argument is ignored and has no effect on the results. cov_pen : CovariancePenalty object A penalty for the random effects covariance matrix do_cg : bool, defaults to True If False, the optimization is skipped and a results object at the given (or default) starting values is returned. fe_pen : Penalty object A penalty on the fixed effects free : MixedLMParams object If not `None`, this is a mask that allows parameters to be held fixed at specified values. A 1 indicates that the corresponding parameter is estimated, a 0 indicates that it is fixed at its starting value. Setting the `cov_re` component to the identity matrix fits a model with independent random effects. Note that some optimization methods do not respect this constraint (bfgs and lbfgs both work). full_output : bool If true, attach iteration history to results method : str Optimization method. Can be a scipy.optimize method name, or a list of such names to be tried in sequence. **fit_kwargs Additional keyword arguments passed to fit. Returns ------- A MixedLMResults instance. """ _allowed_kwargs = ['gtol', 'maxiter', 'eps', 'maxcor', 'ftol', 'tol', 'disp', 'maxls'] for x in fit_kwargs.keys(): if x not in _allowed_kwargs: warnings.warn("Argument %s not used by MixedLM.fit" % x) if method is None: method = ['bfgs', 'lbfgs', 'cg'] elif isinstance(method, str): method = [method] for meth in method: if meth.lower() in ["newton", "ncg"]: raise ValueError( "method %s not available for MixedLM" % meth) self.reml = reml self.cov_pen = cov_pen self.fe_pen = fe_pen self._cov_sing = 0 self._freepat = free if full_output: hist = [] else: hist = None if start_params is None: params = MixedLMParams(self.k_fe, self.k_re, self.k_vc) params.fe_params = np.zeros(self.k_fe) params.cov_re = np.eye(self.k_re) params.vcomp = np.ones(self.k_vc) else: if isinstance(start_params, MixedLMParams): params = start_params else: # It's a packed array if len(start_params) == self.k_fe + self.k_re2 + self.k_vc: params = MixedLMParams.from_packed( start_params, self.k_fe, self.k_re, self.use_sqrt, has_fe=True) elif len(start_params) == self.k_re2 + self.k_vc: params = MixedLMParams.from_packed( start_params, self.k_fe, self.k_re, self.use_sqrt, has_fe=False) else: raise ValueError("invalid start_params") if do_cg: fit_kwargs["retall"] = hist is not None if "disp" not in fit_kwargs: fit_kwargs["disp"] = False packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False) if niter_sa > 0: warnings.warn("niter_sa is currently ignored") # Try optimizing one or more times for j in range(len(method)): rslt = super().fit(start_params=packed, skip_hessian=True, method=method[j], **fit_kwargs) if rslt.mle_retvals['converged']: break packed = rslt.params if j + 1 < len(method): next_method = method[j + 1] warnings.warn( "Retrying MixedLM optimization with %s" % next_method, ConvergenceWarning) else: msg = ("MixedLM optimization failed, " + "trying a different optimizer may help.") warnings.warn(msg, ConvergenceWarning) # The optimization succeeded params = np.atleast_1d(rslt.params) if hist is not None: hist.append(rslt.mle_retvals) converged = rslt.mle_retvals['converged'] if not converged: gn = self.score(rslt.params) gn = np.sqrt(np.sum(gn**2)) msg = "Gradient optimization failed, |grad| = %f" % gn warnings.warn(msg, ConvergenceWarning) # Convert to the final parameterization (i.e. undo the square # root transform of the covariance matrix, and the profiling # over the error variance). params = MixedLMParams.from_packed( params, self.k_fe, self.k_re, use_sqrt=self.use_sqrt, has_fe=False) cov_re_unscaled = params.cov_re vcomp_unscaled = params.vcomp fe_params, sing = self.get_fe_params(cov_re_unscaled, vcomp_unscaled) params.fe_params = fe_params scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled) cov_re = scale * cov_re_unscaled vcomp = scale * vcomp_unscaled f1 = (self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01) f2 = (self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01) if f1 or f2: msg = "The MLE may be on the boundary of the parameter space." warnings.warn(msg, ConvergenceWarning) # Compute the Hessian at the MLE. Note that this is the # Hessian with respect to the random effects covariance matrix # (not its square root). It is used for obtaining standard # errors, not for optimization. hess, sing = self.hessian(params) if sing: warnings.warn(_warn_cov_sing) hess_diag = np.diag(hess) if free is not None: pcov = np.zeros_like(hess) pat = self._freepat.get_packed(use_sqrt=False, has_fe=True) ii = np.flatnonzero(pat) hess_diag = hess_diag[ii] if len(ii) > 0: hess1 = hess[np.ix_(ii, ii)] pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1) else: pcov = np.linalg.inv(-hess) if np.any(hess_diag >= 0): msg = ("The Hessian matrix at the estimated parameter values " + "is not positive definite.") warnings.warn(msg, ConvergenceWarning) # Prepare a results class instance params_packed = params.get_packed(use_sqrt=False, has_fe=True) results = MixedLMResults(self, params_packed, pcov / scale) results.params_object = params results.fe_params = fe_params results.cov_re = cov_re results.vcomp = vcomp results.scale = scale results.cov_re_unscaled = cov_re_unscaled results.method = "REML" if self.reml else "ML" results.converged = converged results.hist = hist results.reml = self.reml results.cov_pen = self.cov_pen results.k_fe = self.k_fe results.k_re = self.k_re results.k_re2 = self.k_re2 results.k_vc = self.k_vc results.use_sqrt = self.use_sqrt results.freepat = self._freepat return MixedLMResultsWrapper(results)
Fit a linear mixed model to the data. Parameters ---------- start_params : array_like or MixedLMParams Starting values for the profile log-likelihood. If not a `MixedLMParams` instance, this should be an array containing the packed parameters for the profile log-likelihood, including the fixed effects parameters. reml : bool If true, fit according to the REML likelihood, else fit the standard likelihood using ML. niter_sa : int Currently this argument is ignored and has no effect on the results. cov_pen : CovariancePenalty object A penalty for the random effects covariance matrix do_cg : bool, defaults to True If False, the optimization is skipped and a results object at the given (or default) starting values is returned. fe_pen : Penalty object A penalty on the fixed effects free : MixedLMParams object If not `None`, this is a mask that allows parameters to be held fixed at specified values. A 1 indicates that the corresponding parameter is estimated, a 0 indicates that it is fixed at its starting value. Setting the `cov_re` component to the identity matrix fits a model with independent random effects. Note that some optimization methods do not respect this constraint (bfgs and lbfgs both work). full_output : bool If true, attach iteration history to results method : str Optimization method. Can be a scipy.optimize method name, or a list of such names to be tried in sequence. **fit_kwargs Additional keyword arguments passed to fit. Returns ------- A MixedLMResults instance.
fit
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def rvs(self, n): """ Return a vector of simulated values from a mixed linear model. The parameter n is ignored, but required by the interface """ model = self.model # Fixed effects y = np.dot(self.exog, self.fe_params) # Random effects u = np.random.normal(size=(model.n_groups, model.k_re)) u = np.dot(u, np.linalg.cholesky(self.cov_re).T) y += (u[self.group_idx, :] * model.exog_re).sum(1) # Variance components for j, _ in enumerate(model.exog_vc.names): ex = model.exog_vc.mats[j] v = self.vcomp[j] for i, g in enumerate(model.group_labels): exg = ex[i] ii = model.row_indices[g] u = np.random.normal(size=exg.shape[1]) y[ii] += np.sqrt(v) * np.dot(exg, u) # Residual variance y += np.sqrt(self.scale) * np.random.normal(size=len(y)) return y
Return a vector of simulated values from a mixed linear model. The parameter n is ignored, but required by the interface
rvs
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def fittedvalues(self): """ Returns the fitted values for the model. The fitted values reflect the mean structure specified by the fixed effects and the predicted random effects. """ fit = np.dot(self.model.exog, self.fe_params) re = self.random_effects for group_ix, group in enumerate(self.model.group_labels): ix = self.model.row_indices[group] mat = [] if self.model.exog_re_li is not None: mat.append(self.model.exog_re_li[group_ix]) for j in range(self.k_vc): mat.append(self.model.exog_vc.mats[j][group_ix]) mat = np.concatenate(mat, axis=1) fit[ix] += np.dot(mat, re[group]) return fit
Returns the fitted values for the model. The fitted values reflect the mean structure specified by the fixed effects and the predicted random effects.
fittedvalues
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause
def resid(self): """ Returns the residuals for the model. The residuals reflect the mean structure specified by the fixed effects and the predicted random effects. """ return self.model.endog - self.fittedvalues
Returns the residuals for the model. The residuals reflect the mean structure specified by the fixed effects and the predicted random effects.
resid
python
statsmodels/statsmodels
statsmodels/regression/mixed_linear_model.py
https://github.com/statsmodels/statsmodels/blob/master/statsmodels/regression/mixed_linear_model.py
BSD-3-Clause